1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/units.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "qemu/main-loop.h" 27 #include "qemu/module.h" 28 #include "qemu/hw-version.h" 29 #include "qemu/memalign.h" 30 #include "hw/scsi/scsi.h" 31 #include "migration/qemu-file-types.h" 32 #include "migration/vmstate.h" 33 #include "hw/scsi/emulation.h" 34 #include "scsi/constants.h" 35 #include "sysemu/block-backend.h" 36 #include "sysemu/blockdev.h" 37 #include "hw/block/block.h" 38 #include "hw/qdev-properties.h" 39 #include "hw/qdev-properties-system.h" 40 #include "sysemu/dma.h" 41 #include "sysemu/sysemu.h" 42 #include "qemu/cutils.h" 43 #include "trace.h" 44 #include "qom/object.h" 45 46 #ifdef __linux 47 #include <scsi/sg.h> 48 #endif 49 50 #define SCSI_WRITE_SAME_MAX (512 * KiB) 51 #define SCSI_DMA_BUF_SIZE (128 * KiB) 52 #define SCSI_MAX_INQUIRY_LEN 256 53 #define SCSI_MAX_MODE_LEN 256 54 55 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 56 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 57 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 58 59 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 60 61 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE) 62 63 struct SCSIDiskClass { 64 SCSIDeviceClass parent_class; 65 DMAIOFunc *dma_readv; 66 DMAIOFunc *dma_writev; 67 bool (*need_fua_emulation)(SCSICommand *cmd); 68 void (*update_sense)(SCSIRequest *r); 69 }; 70 71 typedef struct SCSIDiskReq { 72 SCSIRequest req; 73 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */ 74 uint64_t sector; 75 uint32_t sector_count; 76 uint32_t buflen; 77 bool started; 78 bool need_fua_emulation; 79 struct iovec iov; 80 QEMUIOVector qiov; 81 BlockAcctCookie acct; 82 } SCSIDiskReq; 83 84 #define SCSI_DISK_F_REMOVABLE 0 85 #define SCSI_DISK_F_DPOFUA 1 86 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 87 88 struct SCSIDiskState { 89 SCSIDevice qdev; 90 uint32_t features; 91 bool media_changed; 92 bool media_event; 93 bool eject_request; 94 uint16_t port_index; 95 uint64_t max_unmap_size; 96 uint64_t max_io_size; 97 uint32_t quirks; 98 QEMUBH *bh; 99 char *version; 100 char *serial; 101 char *vendor; 102 char *product; 103 char *device_id; 104 bool tray_open; 105 bool tray_locked; 106 /* 107 * 0x0000 - rotation rate not reported 108 * 0x0001 - non-rotating medium (SSD) 109 * 0x0002-0x0400 - reserved 110 * 0x0401-0xffe - rotations per minute 111 * 0xffff - reserved 112 */ 113 uint16_t rotation_rate; 114 }; 115 116 static void scsi_free_request(SCSIRequest *req) 117 { 118 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 119 120 qemu_vfree(r->iov.iov_base); 121 } 122 123 /* Helper function for command completion with sense. */ 124 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 125 { 126 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, 127 sense.ascq); 128 scsi_req_build_sense(&r->req, sense); 129 scsi_req_complete(&r->req, CHECK_CONDITION); 130 } 131 132 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 133 { 134 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 135 136 if (!r->iov.iov_base) { 137 r->buflen = size; 138 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 139 } 140 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen); 141 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 142 } 143 144 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 145 { 146 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 147 148 qemu_put_be64s(f, &r->sector); 149 qemu_put_be32s(f, &r->sector_count); 150 qemu_put_be32s(f, &r->buflen); 151 if (r->buflen) { 152 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 153 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 154 } else if (!req->retry) { 155 uint32_t len = r->iov.iov_len; 156 qemu_put_be32s(f, &len); 157 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 158 } 159 } 160 } 161 162 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 163 { 164 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 165 166 qemu_get_be64s(f, &r->sector); 167 qemu_get_be32s(f, &r->sector_count); 168 qemu_get_be32s(f, &r->buflen); 169 if (r->buflen) { 170 scsi_init_iovec(r, r->buflen); 171 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 172 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 173 } else if (!r->req.retry) { 174 uint32_t len; 175 qemu_get_be32s(f, &len); 176 r->iov.iov_len = len; 177 assert(r->iov.iov_len <= r->buflen); 178 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 179 } 180 } 181 182 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 183 } 184 185 /* 186 * scsi_handle_rw_error has two return values. False means that the error 187 * must be ignored, true means that the error has been processed and the 188 * caller should not do anything else for this request. Note that 189 * scsi_handle_rw_error always manages its reference counts, independent 190 * of the return value. 191 */ 192 static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed) 193 { 194 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 195 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 196 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 197 SCSISense sense = SENSE_CODE(NO_SENSE); 198 int error = 0; 199 bool req_has_sense = false; 200 BlockErrorAction action; 201 int status; 202 203 if (ret < 0) { 204 status = scsi_sense_from_errno(-ret, &sense); 205 error = -ret; 206 } else { 207 /* A passthrough command has completed with nonzero status. */ 208 status = ret; 209 if (status == CHECK_CONDITION) { 210 req_has_sense = true; 211 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 212 } else { 213 error = EINVAL; 214 } 215 } 216 217 /* 218 * Check whether the error has to be handled by the guest or should 219 * rather follow the rerror=/werror= settings. Guest-handled errors 220 * are usually retried immediately, so do not post them to QMP and 221 * do not account them as failed I/O. 222 */ 223 if (req_has_sense && 224 scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { 225 action = BLOCK_ERROR_ACTION_REPORT; 226 acct_failed = false; 227 } else { 228 action = blk_get_error_action(s->qdev.conf.blk, is_read, error); 229 blk_error_action(s->qdev.conf.blk, action, is_read, error); 230 } 231 232 switch (action) { 233 case BLOCK_ERROR_ACTION_REPORT: 234 if (acct_failed) { 235 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 236 } 237 if (req_has_sense) { 238 sdc->update_sense(&r->req); 239 } else if (status == CHECK_CONDITION) { 240 scsi_req_build_sense(&r->req, sense); 241 } 242 scsi_req_complete(&r->req, status); 243 return true; 244 245 case BLOCK_ERROR_ACTION_IGNORE: 246 return false; 247 248 case BLOCK_ERROR_ACTION_STOP: 249 scsi_req_retry(&r->req); 250 return true; 251 252 default: 253 g_assert_not_reached(); 254 } 255 } 256 257 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 258 { 259 if (r->req.io_canceled) { 260 scsi_req_cancel_complete(&r->req); 261 return true; 262 } 263 264 if (ret < 0) { 265 return scsi_handle_rw_error(r, ret, acct_failed); 266 } 267 268 return false; 269 } 270 271 static void scsi_aio_complete(void *opaque, int ret) 272 { 273 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 274 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 275 276 /* The request must only run in the BlockBackend's AioContext */ 277 assert(blk_get_aio_context(s->qdev.conf.blk) == 278 qemu_get_current_aio_context()); 279 280 assert(r->req.aiocb != NULL); 281 r->req.aiocb = NULL; 282 283 if (scsi_disk_req_check_error(r, ret, true)) { 284 goto done; 285 } 286 287 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 288 scsi_req_complete(&r->req, GOOD); 289 290 done: 291 scsi_req_unref(&r->req); 292 } 293 294 static bool scsi_is_cmd_fua(SCSICommand *cmd) 295 { 296 switch (cmd->buf[0]) { 297 case READ_10: 298 case READ_12: 299 case READ_16: 300 case WRITE_10: 301 case WRITE_12: 302 case WRITE_16: 303 return (cmd->buf[1] & 8) != 0; 304 305 case VERIFY_10: 306 case VERIFY_12: 307 case VERIFY_16: 308 case WRITE_VERIFY_10: 309 case WRITE_VERIFY_12: 310 case WRITE_VERIFY_16: 311 return true; 312 313 case READ_6: 314 case WRITE_6: 315 default: 316 return false; 317 } 318 } 319 320 static void scsi_write_do_fua(SCSIDiskReq *r) 321 { 322 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 323 324 assert(r->req.aiocb == NULL); 325 assert(!r->req.io_canceled); 326 327 if (r->need_fua_emulation) { 328 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 329 BLOCK_ACCT_FLUSH); 330 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 331 return; 332 } 333 334 scsi_req_complete(&r->req, GOOD); 335 scsi_req_unref(&r->req); 336 } 337 338 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 339 { 340 assert(r->req.aiocb == NULL); 341 if (scsi_disk_req_check_error(r, ret, false)) { 342 goto done; 343 } 344 345 r->sector += r->sector_count; 346 r->sector_count = 0; 347 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 348 scsi_write_do_fua(r); 349 return; 350 } else { 351 scsi_req_complete(&r->req, GOOD); 352 } 353 354 done: 355 scsi_req_unref(&r->req); 356 } 357 358 static void scsi_dma_complete(void *opaque, int ret) 359 { 360 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 361 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 362 363 assert(r->req.aiocb != NULL); 364 r->req.aiocb = NULL; 365 366 if (ret < 0) { 367 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 368 } else { 369 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 370 } 371 scsi_dma_complete_noio(r, ret); 372 } 373 374 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) 375 { 376 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 377 uint32_t n; 378 379 /* The request must only run in the BlockBackend's AioContext */ 380 assert(blk_get_aio_context(s->qdev.conf.blk) == 381 qemu_get_current_aio_context()); 382 383 assert(r->req.aiocb == NULL); 384 if (scsi_disk_req_check_error(r, ret, false)) { 385 goto done; 386 } 387 388 n = r->qiov.size / BDRV_SECTOR_SIZE; 389 r->sector += n; 390 r->sector_count -= n; 391 scsi_req_data(&r->req, r->qiov.size); 392 393 done: 394 scsi_req_unref(&r->req); 395 } 396 397 static void scsi_read_complete(void *opaque, int ret) 398 { 399 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 400 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 401 402 assert(r->req.aiocb != NULL); 403 r->req.aiocb = NULL; 404 405 if (ret < 0) { 406 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 407 } else { 408 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 409 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); 410 } 411 scsi_read_complete_noio(r, ret); 412 } 413 414 /* Actually issue a read to the block device. */ 415 static void scsi_do_read(SCSIDiskReq *r, int ret) 416 { 417 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 418 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 419 420 assert (r->req.aiocb == NULL); 421 if (scsi_disk_req_check_error(r, ret, false)) { 422 goto done; 423 } 424 425 /* The request is used as the AIO opaque value, so add a ref. */ 426 scsi_req_ref(&r->req); 427 428 if (r->req.sg) { 429 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 430 r->req.residual -= r->req.sg->size; 431 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 432 r->req.sg, r->sector << BDRV_SECTOR_BITS, 433 BDRV_SECTOR_SIZE, 434 sdc->dma_readv, r, scsi_dma_complete, r, 435 DMA_DIRECTION_FROM_DEVICE); 436 } else { 437 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 438 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 439 r->qiov.size, BLOCK_ACCT_READ); 440 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 441 scsi_read_complete, r, r); 442 } 443 444 done: 445 scsi_req_unref(&r->req); 446 } 447 448 static void scsi_do_read_cb(void *opaque, int ret) 449 { 450 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 451 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 452 453 assert (r->req.aiocb != NULL); 454 r->req.aiocb = NULL; 455 456 if (ret < 0) { 457 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 458 } else { 459 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 460 } 461 scsi_do_read(opaque, ret); 462 } 463 464 /* Read more data from scsi device into buffer. */ 465 static void scsi_read_data(SCSIRequest *req) 466 { 467 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 468 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 469 bool first; 470 471 trace_scsi_disk_read_data_count(r->sector_count); 472 if (r->sector_count == 0) { 473 /* This also clears the sense buffer for REQUEST SENSE. */ 474 scsi_req_complete(&r->req, GOOD); 475 return; 476 } 477 478 /* No data transfer may already be in progress */ 479 assert(r->req.aiocb == NULL); 480 481 /* The request is used as the AIO opaque value, so add a ref. */ 482 scsi_req_ref(&r->req); 483 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 484 trace_scsi_disk_read_data_invalid(); 485 scsi_read_complete_noio(r, -EINVAL); 486 return; 487 } 488 489 if (!blk_is_available(req->dev->conf.blk)) { 490 scsi_read_complete_noio(r, -ENOMEDIUM); 491 return; 492 } 493 494 first = !r->started; 495 r->started = true; 496 if (first && r->need_fua_emulation) { 497 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 498 BLOCK_ACCT_FLUSH); 499 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 500 } else { 501 scsi_do_read(r, 0); 502 } 503 } 504 505 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 506 { 507 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 508 uint32_t n; 509 510 /* The request must only run in the BlockBackend's AioContext */ 511 assert(blk_get_aio_context(s->qdev.conf.blk) == 512 qemu_get_current_aio_context()); 513 514 assert (r->req.aiocb == NULL); 515 if (scsi_disk_req_check_error(r, ret, false)) { 516 goto done; 517 } 518 519 n = r->qiov.size / BDRV_SECTOR_SIZE; 520 r->sector += n; 521 r->sector_count -= n; 522 if (r->sector_count == 0) { 523 scsi_write_do_fua(r); 524 return; 525 } else { 526 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 527 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); 528 scsi_req_data(&r->req, r->qiov.size); 529 } 530 531 done: 532 scsi_req_unref(&r->req); 533 } 534 535 static void scsi_write_complete(void * opaque, int ret) 536 { 537 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 538 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 539 540 assert (r->req.aiocb != NULL); 541 r->req.aiocb = NULL; 542 543 if (ret < 0) { 544 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 545 } else { 546 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 547 } 548 scsi_write_complete_noio(r, ret); 549 } 550 551 static void scsi_write_data(SCSIRequest *req) 552 { 553 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 554 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 555 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 556 557 /* No data transfer may already be in progress */ 558 assert(r->req.aiocb == NULL); 559 560 /* The request is used as the AIO opaque value, so add a ref. */ 561 scsi_req_ref(&r->req); 562 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 563 trace_scsi_disk_write_data_invalid(); 564 scsi_write_complete_noio(r, -EINVAL); 565 return; 566 } 567 568 if (!r->req.sg && !r->qiov.size) { 569 /* Called for the first time. Ask the driver to send us more data. */ 570 r->started = true; 571 scsi_write_complete_noio(r, 0); 572 return; 573 } 574 if (!blk_is_available(req->dev->conf.blk)) { 575 scsi_write_complete_noio(r, -ENOMEDIUM); 576 return; 577 } 578 579 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 580 r->req.cmd.buf[0] == VERIFY_16) { 581 if (r->req.sg) { 582 scsi_dma_complete_noio(r, 0); 583 } else { 584 scsi_write_complete_noio(r, 0); 585 } 586 return; 587 } 588 589 if (r->req.sg) { 590 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 591 r->req.residual -= r->req.sg->size; 592 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 593 r->req.sg, r->sector << BDRV_SECTOR_BITS, 594 BDRV_SECTOR_SIZE, 595 sdc->dma_writev, r, scsi_dma_complete, r, 596 DMA_DIRECTION_TO_DEVICE); 597 } else { 598 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 599 r->qiov.size, BLOCK_ACCT_WRITE); 600 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 601 scsi_write_complete, r, r); 602 } 603 } 604 605 /* Return a pointer to the data buffer. */ 606 static uint8_t *scsi_get_buf(SCSIRequest *req) 607 { 608 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 609 610 return (uint8_t *)r->iov.iov_base; 611 } 612 613 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 614 { 615 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 616 uint8_t page_code = req->cmd.buf[2]; 617 int start, buflen = 0; 618 619 outbuf[buflen++] = s->qdev.type & 0x1f; 620 outbuf[buflen++] = page_code; 621 outbuf[buflen++] = 0x00; 622 outbuf[buflen++] = 0x00; 623 start = buflen; 624 625 switch (page_code) { 626 case 0x00: /* Supported page codes, mandatory */ 627 { 628 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); 629 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 630 if (s->serial) { 631 outbuf[buflen++] = 0x80; /* unit serial number */ 632 } 633 outbuf[buflen++] = 0x83; /* device identification */ 634 if (s->qdev.type == TYPE_DISK) { 635 outbuf[buflen++] = 0xb0; /* block limits */ 636 outbuf[buflen++] = 0xb1; /* block device characteristics */ 637 outbuf[buflen++] = 0xb2; /* thin provisioning */ 638 } 639 break; 640 } 641 case 0x80: /* Device serial number, optional */ 642 { 643 int l; 644 645 if (!s->serial) { 646 trace_scsi_disk_emulate_vpd_page_80_not_supported(); 647 return -1; 648 } 649 650 l = strlen(s->serial); 651 if (l > 36) { 652 l = 36; 653 } 654 655 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); 656 memcpy(outbuf + buflen, s->serial, l); 657 buflen += l; 658 break; 659 } 660 661 case 0x83: /* Device identification page, mandatory */ 662 { 663 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; 664 665 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); 666 667 if (id_len) { 668 outbuf[buflen++] = 0x2; /* ASCII */ 669 outbuf[buflen++] = 0; /* not officially assigned */ 670 outbuf[buflen++] = 0; /* reserved */ 671 outbuf[buflen++] = id_len; /* length of data following */ 672 memcpy(outbuf + buflen, s->device_id, id_len); 673 buflen += id_len; 674 } 675 676 if (s->qdev.wwn) { 677 outbuf[buflen++] = 0x1; /* Binary */ 678 outbuf[buflen++] = 0x3; /* NAA */ 679 outbuf[buflen++] = 0; /* reserved */ 680 outbuf[buflen++] = 8; 681 stq_be_p(&outbuf[buflen], s->qdev.wwn); 682 buflen += 8; 683 } 684 685 if (s->qdev.port_wwn) { 686 outbuf[buflen++] = 0x61; /* SAS / Binary */ 687 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 688 outbuf[buflen++] = 0; /* reserved */ 689 outbuf[buflen++] = 8; 690 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 691 buflen += 8; 692 } 693 694 if (s->port_index) { 695 outbuf[buflen++] = 0x61; /* SAS / Binary */ 696 697 /* PIV/Target port/relative target port */ 698 outbuf[buflen++] = 0x94; 699 700 outbuf[buflen++] = 0; /* reserved */ 701 outbuf[buflen++] = 4; 702 stw_be_p(&outbuf[buflen + 2], s->port_index); 703 buflen += 4; 704 } 705 break; 706 } 707 case 0xb0: /* block limits */ 708 { 709 SCSIBlockLimits bl = {}; 710 711 if (s->qdev.type == TYPE_ROM) { 712 trace_scsi_disk_emulate_vpd_page_b0_not_supported(); 713 return -1; 714 } 715 bl.wsnz = 1; 716 bl.unmap_sectors = 717 s->qdev.conf.discard_granularity / s->qdev.blocksize; 718 bl.min_io_size = 719 s->qdev.conf.min_io_size / s->qdev.blocksize; 720 bl.opt_io_size = 721 s->qdev.conf.opt_io_size / s->qdev.blocksize; 722 bl.max_unmap_sectors = 723 s->max_unmap_size / s->qdev.blocksize; 724 bl.max_io_sectors = 725 s->max_io_size / s->qdev.blocksize; 726 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 727 bl.max_unmap_descr = 255; 728 729 if (s->qdev.type == TYPE_DISK) { 730 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 731 int max_io_sectors_blk = 732 max_transfer_blk / s->qdev.blocksize; 733 734 bl.max_io_sectors = 735 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 736 } 737 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 738 break; 739 } 740 case 0xb1: /* block device characteristics */ 741 { 742 buflen = 0x40; 743 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 744 outbuf[5] = s->rotation_rate & 0xff; 745 outbuf[6] = 0; /* PRODUCT TYPE */ 746 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 747 outbuf[8] = 0; /* VBULS */ 748 break; 749 } 750 case 0xb2: /* thin provisioning */ 751 { 752 buflen = 8; 753 outbuf[4] = 0; 754 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 755 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 756 outbuf[7] = 0; 757 break; 758 } 759 default: 760 return -1; 761 } 762 /* done with EVPD */ 763 assert(buflen - start <= 255); 764 outbuf[start - 1] = buflen - start; 765 return buflen; 766 } 767 768 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 769 { 770 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 771 int buflen = 0; 772 773 if (req->cmd.buf[1] & 0x1) { 774 /* Vital product data */ 775 return scsi_disk_emulate_vpd_page(req, outbuf); 776 } 777 778 /* Standard INQUIRY data */ 779 if (req->cmd.buf[2] != 0) { 780 return -1; 781 } 782 783 /* PAGE CODE == 0 */ 784 buflen = req->cmd.xfer; 785 if (buflen > SCSI_MAX_INQUIRY_LEN) { 786 buflen = SCSI_MAX_INQUIRY_LEN; 787 } 788 789 outbuf[0] = s->qdev.type & 0x1f; 790 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 791 792 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 793 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 794 795 memset(&outbuf[32], 0, 4); 796 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 797 /* 798 * We claim conformance to SPC-3, which is required for guests 799 * to ask for modern features like READ CAPACITY(16) or the 800 * block characteristics VPD page by default. Not all of SPC-3 801 * is actually implemented, but we're good enough. 802 */ 803 outbuf[2] = s->qdev.default_scsi_version; 804 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 805 806 if (buflen > 36) { 807 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 808 } else { 809 /* If the allocation length of CDB is too small, 810 the additional length is not adjusted */ 811 outbuf[4] = 36 - 5; 812 } 813 814 /* Sync data transfer and TCQ. */ 815 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 816 return buflen; 817 } 818 819 static inline bool media_is_dvd(SCSIDiskState *s) 820 { 821 uint64_t nb_sectors; 822 if (s->qdev.type != TYPE_ROM) { 823 return false; 824 } 825 if (!blk_is_available(s->qdev.conf.blk)) { 826 return false; 827 } 828 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 829 return nb_sectors > CD_MAX_SECTORS; 830 } 831 832 static inline bool media_is_cd(SCSIDiskState *s) 833 { 834 uint64_t nb_sectors; 835 if (s->qdev.type != TYPE_ROM) { 836 return false; 837 } 838 if (!blk_is_available(s->qdev.conf.blk)) { 839 return false; 840 } 841 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 842 return nb_sectors <= CD_MAX_SECTORS; 843 } 844 845 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 846 uint8_t *outbuf) 847 { 848 uint8_t type = r->req.cmd.buf[1] & 7; 849 850 if (s->qdev.type != TYPE_ROM) { 851 return -1; 852 } 853 854 /* Types 1/2 are only defined for Blu-Ray. */ 855 if (type != 0) { 856 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 857 return -1; 858 } 859 860 memset(outbuf, 0, 34); 861 outbuf[1] = 32; 862 outbuf[2] = 0xe; /* last session complete, disc finalized */ 863 outbuf[3] = 1; /* first track on disc */ 864 outbuf[4] = 1; /* # of sessions */ 865 outbuf[5] = 1; /* first track of last session */ 866 outbuf[6] = 1; /* last track of last session */ 867 outbuf[7] = 0x20; /* unrestricted use */ 868 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 869 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 870 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 871 /* 24-31: disc bar code */ 872 /* 32: disc application code */ 873 /* 33: number of OPC tables */ 874 875 return 34; 876 } 877 878 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 879 uint8_t *outbuf) 880 { 881 static const int rds_caps_size[5] = { 882 [0] = 2048 + 4, 883 [1] = 4 + 4, 884 [3] = 188 + 4, 885 [4] = 2048 + 4, 886 }; 887 888 uint8_t media = r->req.cmd.buf[1]; 889 uint8_t layer = r->req.cmd.buf[6]; 890 uint8_t format = r->req.cmd.buf[7]; 891 int size = -1; 892 893 if (s->qdev.type != TYPE_ROM) { 894 return -1; 895 } 896 if (media != 0) { 897 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 898 return -1; 899 } 900 901 if (format != 0xff) { 902 if (!blk_is_available(s->qdev.conf.blk)) { 903 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 904 return -1; 905 } 906 if (media_is_cd(s)) { 907 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 908 return -1; 909 } 910 if (format >= ARRAY_SIZE(rds_caps_size)) { 911 return -1; 912 } 913 size = rds_caps_size[format]; 914 memset(outbuf, 0, size); 915 } 916 917 switch (format) { 918 case 0x00: { 919 /* Physical format information */ 920 uint64_t nb_sectors; 921 if (layer != 0) { 922 goto fail; 923 } 924 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 925 926 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 927 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 928 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 929 outbuf[7] = 0; /* default densities */ 930 931 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 932 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 933 break; 934 } 935 936 case 0x01: /* DVD copyright information, all zeros */ 937 break; 938 939 case 0x03: /* BCA information - invalid field for no BCA info */ 940 return -1; 941 942 case 0x04: /* DVD disc manufacturing information, all zeros */ 943 break; 944 945 case 0xff: { /* List capabilities */ 946 int i; 947 size = 4; 948 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 949 if (!rds_caps_size[i]) { 950 continue; 951 } 952 outbuf[size] = i; 953 outbuf[size + 1] = 0x40; /* Not writable, readable */ 954 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 955 size += 4; 956 } 957 break; 958 } 959 960 default: 961 return -1; 962 } 963 964 /* Size of buffer, not including 2 byte size field */ 965 stw_be_p(outbuf, size - 2); 966 return size; 967 968 fail: 969 return -1; 970 } 971 972 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 973 { 974 uint8_t event_code, media_status; 975 976 media_status = 0; 977 if (s->tray_open) { 978 media_status = MS_TRAY_OPEN; 979 } else if (blk_is_inserted(s->qdev.conf.blk)) { 980 media_status = MS_MEDIA_PRESENT; 981 } 982 983 /* Event notification descriptor */ 984 event_code = MEC_NO_CHANGE; 985 if (media_status != MS_TRAY_OPEN) { 986 if (s->media_event) { 987 event_code = MEC_NEW_MEDIA; 988 s->media_event = false; 989 } else if (s->eject_request) { 990 event_code = MEC_EJECT_REQUESTED; 991 s->eject_request = false; 992 } 993 } 994 995 outbuf[0] = event_code; 996 outbuf[1] = media_status; 997 998 /* These fields are reserved, just clear them. */ 999 outbuf[2] = 0; 1000 outbuf[3] = 0; 1001 return 4; 1002 } 1003 1004 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 1005 uint8_t *outbuf) 1006 { 1007 int size; 1008 uint8_t *buf = r->req.cmd.buf; 1009 uint8_t notification_class_request = buf[4]; 1010 if (s->qdev.type != TYPE_ROM) { 1011 return -1; 1012 } 1013 if ((buf[1] & 1) == 0) { 1014 /* asynchronous */ 1015 return -1; 1016 } 1017 1018 size = 4; 1019 outbuf[0] = outbuf[1] = 0; 1020 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1021 if (notification_class_request & (1 << GESN_MEDIA)) { 1022 outbuf[2] = GESN_MEDIA; 1023 size += scsi_event_status_media(s, &outbuf[size]); 1024 } else { 1025 outbuf[2] = 0x80; 1026 } 1027 stw_be_p(outbuf, size - 4); 1028 return size; 1029 } 1030 1031 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1032 { 1033 int current; 1034 1035 if (s->qdev.type != TYPE_ROM) { 1036 return -1; 1037 } 1038 1039 if (media_is_dvd(s)) { 1040 current = MMC_PROFILE_DVD_ROM; 1041 } else if (media_is_cd(s)) { 1042 current = MMC_PROFILE_CD_ROM; 1043 } else { 1044 current = MMC_PROFILE_NONE; 1045 } 1046 1047 memset(outbuf, 0, 40); 1048 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1049 stw_be_p(&outbuf[6], current); 1050 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1051 outbuf[10] = 0x03; /* persistent, current */ 1052 outbuf[11] = 8; /* two profiles */ 1053 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1054 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1055 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1056 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1057 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1058 stw_be_p(&outbuf[20], 1); 1059 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1060 outbuf[23] = 8; 1061 stl_be_p(&outbuf[24], 1); /* SCSI */ 1062 outbuf[28] = 1; /* DBE = 1, mandatory */ 1063 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1064 stw_be_p(&outbuf[32], 3); 1065 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1066 outbuf[35] = 4; 1067 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1068 /* TODO: Random readable, CD read, DVD read, drive serial number, 1069 power management */ 1070 return 40; 1071 } 1072 1073 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1074 { 1075 if (s->qdev.type != TYPE_ROM) { 1076 return -1; 1077 } 1078 memset(outbuf, 0, 8); 1079 outbuf[5] = 1; /* CD-ROM */ 1080 return 8; 1081 } 1082 1083 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1084 int page_control) 1085 { 1086 static const int mode_sense_valid[0x3f] = { 1087 [MODE_PAGE_VENDOR_SPECIFIC] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1088 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1089 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1090 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1091 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1092 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1093 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1094 [MODE_PAGE_APPLE_VENDOR] = (1 << TYPE_ROM), 1095 }; 1096 1097 uint8_t *p = *p_outbuf + 2; 1098 int length; 1099 1100 assert(page < ARRAY_SIZE(mode_sense_valid)); 1101 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1102 return -1; 1103 } 1104 1105 /* 1106 * If Changeable Values are requested, a mask denoting those mode parameters 1107 * that are changeable shall be returned. As we currently don't support 1108 * parameter changes via MODE_SELECT all bits are returned set to zero. 1109 * The buffer was already menset to zero by the caller of this function. 1110 * 1111 * The offsets here are off by two compared to the descriptions in the 1112 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1113 * but it is done so that offsets are consistent within our implementation 1114 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1115 * 2-byte and 4-byte headers. 1116 */ 1117 switch (page) { 1118 case MODE_PAGE_HD_GEOMETRY: 1119 length = 0x16; 1120 if (page_control == 1) { /* Changeable Values */ 1121 break; 1122 } 1123 /* if a geometry hint is available, use it */ 1124 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1125 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1126 p[2] = s->qdev.conf.cyls & 0xff; 1127 p[3] = s->qdev.conf.heads & 0xff; 1128 /* Write precomp start cylinder, disabled */ 1129 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1130 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1131 p[6] = s->qdev.conf.cyls & 0xff; 1132 /* Reduced current start cylinder, disabled */ 1133 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1134 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1135 p[9] = s->qdev.conf.cyls & 0xff; 1136 /* Device step rate [ns], 200ns */ 1137 p[10] = 0; 1138 p[11] = 200; 1139 /* Landing zone cylinder */ 1140 p[12] = 0xff; 1141 p[13] = 0xff; 1142 p[14] = 0xff; 1143 /* Medium rotation rate [rpm], 5400 rpm */ 1144 p[18] = (5400 >> 8) & 0xff; 1145 p[19] = 5400 & 0xff; 1146 break; 1147 1148 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1149 length = 0x1e; 1150 if (page_control == 1) { /* Changeable Values */ 1151 break; 1152 } 1153 /* Transfer rate [kbit/s], 5Mbit/s */ 1154 p[0] = 5000 >> 8; 1155 p[1] = 5000 & 0xff; 1156 /* if a geometry hint is available, use it */ 1157 p[2] = s->qdev.conf.heads & 0xff; 1158 p[3] = s->qdev.conf.secs & 0xff; 1159 p[4] = s->qdev.blocksize >> 8; 1160 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1161 p[7] = s->qdev.conf.cyls & 0xff; 1162 /* Write precomp start cylinder, disabled */ 1163 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1164 p[9] = s->qdev.conf.cyls & 0xff; 1165 /* Reduced current start cylinder, disabled */ 1166 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1167 p[11] = s->qdev.conf.cyls & 0xff; 1168 /* Device step rate [100us], 100us */ 1169 p[12] = 0; 1170 p[13] = 1; 1171 /* Device step pulse width [us], 1us */ 1172 p[14] = 1; 1173 /* Device head settle delay [100us], 100us */ 1174 p[15] = 0; 1175 p[16] = 1; 1176 /* Motor on delay [0.1s], 0.1s */ 1177 p[17] = 1; 1178 /* Motor off delay [0.1s], 0.1s */ 1179 p[18] = 1; 1180 /* Medium rotation rate [rpm], 5400 rpm */ 1181 p[26] = (5400 >> 8) & 0xff; 1182 p[27] = 5400 & 0xff; 1183 break; 1184 1185 case MODE_PAGE_CACHING: 1186 length = 0x12; 1187 if (page_control == 1 || /* Changeable Values */ 1188 blk_enable_write_cache(s->qdev.conf.blk)) { 1189 p[0] = 4; /* WCE */ 1190 } 1191 break; 1192 1193 case MODE_PAGE_R_W_ERROR: 1194 length = 10; 1195 if (page_control == 1) { /* Changeable Values */ 1196 if (s->qdev.type == TYPE_ROM) { 1197 /* Automatic Write Reallocation Enabled */ 1198 p[0] = 0x80; 1199 } 1200 break; 1201 } 1202 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1203 if (s->qdev.type == TYPE_ROM) { 1204 p[1] = 0x20; /* Read Retry Count */ 1205 } 1206 break; 1207 1208 case MODE_PAGE_AUDIO_CTL: 1209 length = 14; 1210 break; 1211 1212 case MODE_PAGE_CAPABILITIES: 1213 length = 0x14; 1214 if (page_control == 1) { /* Changeable Values */ 1215 break; 1216 } 1217 1218 p[0] = 0x3b; /* CD-R & CD-RW read */ 1219 p[1] = 0; /* Writing not supported */ 1220 p[2] = 0x7f; /* Audio, composite, digital out, 1221 mode 2 form 1&2, multi session */ 1222 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1223 RW corrected, C2 errors, ISRC, 1224 UPC, Bar code */ 1225 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1226 /* Locking supported, jumper present, eject, tray */ 1227 p[5] = 0; /* no volume & mute control, no 1228 changer */ 1229 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1230 p[7] = (50 * 176) & 0xff; 1231 p[8] = 2 >> 8; /* Two volume levels */ 1232 p[9] = 2 & 0xff; 1233 p[10] = 2048 >> 8; /* 2M buffer */ 1234 p[11] = 2048 & 0xff; 1235 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1236 p[13] = (16 * 176) & 0xff; 1237 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1238 p[17] = (16 * 176) & 0xff; 1239 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1240 p[19] = (16 * 176) & 0xff; 1241 break; 1242 1243 case MODE_PAGE_APPLE_VENDOR: 1244 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR)) { 1245 length = 0x1e; 1246 if (page_control == 1) { /* Changeable Values */ 1247 break; 1248 } 1249 1250 memset(p, 0, length); 1251 strcpy((char *)p + 8, "APPLE COMPUTER, INC "); 1252 break; 1253 } else { 1254 return -1; 1255 } 1256 1257 case MODE_PAGE_VENDOR_SPECIFIC: 1258 if (s->qdev.type == TYPE_DISK && (s->quirks & 1259 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) { 1260 length = 0x2; 1261 if (page_control == 1) { /* Changeable Values */ 1262 p[0] = 0xff; 1263 p[1] = 0xff; 1264 break; 1265 } 1266 p[0] = 0; 1267 p[1] = 0; 1268 break; 1269 } else { 1270 return -1; 1271 } 1272 1273 default: 1274 return -1; 1275 } 1276 1277 assert(length < 256); 1278 (*p_outbuf)[0] = page; 1279 (*p_outbuf)[1] = length; 1280 *p_outbuf += length + 2; 1281 return length + 2; 1282 } 1283 1284 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1285 { 1286 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1287 uint64_t nb_sectors; 1288 bool dbd; 1289 int page, buflen, ret, page_control; 1290 uint8_t *p; 1291 uint8_t dev_specific_param; 1292 1293 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1294 page = r->req.cmd.buf[2] & 0x3f; 1295 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1296 1297 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 1298 10, page, r->req.cmd.xfer, page_control); 1299 memset(outbuf, 0, r->req.cmd.xfer); 1300 p = outbuf; 1301 1302 if (s->qdev.type == TYPE_DISK) { 1303 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1304 if (!blk_is_writable(s->qdev.conf.blk)) { 1305 dev_specific_param |= 0x80; /* Readonly. */ 1306 } 1307 } else { 1308 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD)) { 1309 /* Use DBD from the request... */ 1310 dev_specific_param = 0x00; 1311 1312 /* 1313 * ... unless we receive a request for MODE_PAGE_APPLE_VENDOR 1314 * which should never return a block descriptor even though DBD is 1315 * not set, otherwise CDROM detection fails in MacOS 1316 */ 1317 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR) && 1318 page == MODE_PAGE_APPLE_VENDOR) { 1319 dbd = true; 1320 } 1321 } else { 1322 /* 1323 * MMC prescribes that CD/DVD drives have no block descriptors, 1324 * and defines no device-specific parameter. 1325 */ 1326 dev_specific_param = 0x00; 1327 dbd = true; 1328 } 1329 } 1330 1331 if (r->req.cmd.buf[0] == MODE_SENSE) { 1332 p[1] = 0; /* Default media type. */ 1333 p[2] = dev_specific_param; 1334 p[3] = 0; /* Block descriptor length. */ 1335 p += 4; 1336 } else { /* MODE_SENSE_10 */ 1337 p[2] = 0; /* Default media type. */ 1338 p[3] = dev_specific_param; 1339 p[6] = p[7] = 0; /* Block descriptor length. */ 1340 p += 8; 1341 } 1342 1343 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1344 if (!dbd && nb_sectors) { 1345 if (r->req.cmd.buf[0] == MODE_SENSE) { 1346 outbuf[3] = 8; /* Block descriptor length */ 1347 } else { /* MODE_SENSE_10 */ 1348 outbuf[7] = 8; /* Block descriptor length */ 1349 } 1350 nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1351 if (nb_sectors > 0xffffff) { 1352 nb_sectors = 0; 1353 } 1354 p[0] = 0; /* media density code */ 1355 p[1] = (nb_sectors >> 16) & 0xff; 1356 p[2] = (nb_sectors >> 8) & 0xff; 1357 p[3] = nb_sectors & 0xff; 1358 p[4] = 0; /* reserved */ 1359 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1360 p[6] = s->qdev.blocksize >> 8; 1361 p[7] = 0; 1362 p += 8; 1363 } 1364 1365 if (page_control == 3) { 1366 /* Saved Values */ 1367 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1368 return -1; 1369 } 1370 1371 if (page == 0x3f) { 1372 for (page = 0; page <= 0x3e; page++) { 1373 mode_sense_page(s, page, &p, page_control); 1374 } 1375 } else { 1376 ret = mode_sense_page(s, page, &p, page_control); 1377 if (ret == -1) { 1378 return -1; 1379 } 1380 } 1381 1382 buflen = p - outbuf; 1383 /* 1384 * The mode data length field specifies the length in bytes of the 1385 * following data that is available to be transferred. The mode data 1386 * length does not include itself. 1387 */ 1388 if (r->req.cmd.buf[0] == MODE_SENSE) { 1389 outbuf[0] = buflen - 1; 1390 } else { /* MODE_SENSE_10 */ 1391 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1392 outbuf[1] = (buflen - 2) & 0xff; 1393 } 1394 return buflen; 1395 } 1396 1397 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1398 { 1399 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1400 int start_track, format, msf, toclen; 1401 uint64_t nb_sectors; 1402 1403 msf = req->cmd.buf[1] & 2; 1404 format = req->cmd.buf[2] & 0xf; 1405 start_track = req->cmd.buf[6]; 1406 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1407 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); 1408 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 1409 switch (format) { 1410 case 0: 1411 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1412 break; 1413 case 1: 1414 /* multi session : only a single session defined */ 1415 toclen = 12; 1416 memset(outbuf, 0, 12); 1417 outbuf[1] = 0x0a; 1418 outbuf[2] = 0x01; 1419 outbuf[3] = 0x01; 1420 break; 1421 case 2: 1422 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1423 break; 1424 default: 1425 return -1; 1426 } 1427 return toclen; 1428 } 1429 1430 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1431 { 1432 SCSIRequest *req = &r->req; 1433 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1434 bool start = req->cmd.buf[4] & 1; 1435 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1436 int pwrcnd = req->cmd.buf[4] & 0xf0; 1437 1438 if (pwrcnd) { 1439 /* eject/load only happens for power condition == 0 */ 1440 return 0; 1441 } 1442 1443 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1444 if (!start && !s->tray_open && s->tray_locked) { 1445 scsi_check_condition(r, 1446 blk_is_inserted(s->qdev.conf.blk) 1447 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1448 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1449 return -1; 1450 } 1451 1452 if (s->tray_open != !start) { 1453 blk_eject(s->qdev.conf.blk, !start); 1454 s->tray_open = !start; 1455 } 1456 } 1457 return 0; 1458 } 1459 1460 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1461 { 1462 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1463 int buflen = r->iov.iov_len; 1464 1465 if (buflen) { 1466 trace_scsi_disk_emulate_read_data(buflen); 1467 r->iov.iov_len = 0; 1468 r->started = true; 1469 scsi_req_data(&r->req, buflen); 1470 return; 1471 } 1472 1473 /* This also clears the sense buffer for REQUEST SENSE. */ 1474 scsi_req_complete(&r->req, GOOD); 1475 } 1476 1477 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1478 uint8_t *inbuf, int inlen) 1479 { 1480 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1481 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1482 uint8_t *p; 1483 int len, expected_len, changeable_len, i; 1484 1485 /* The input buffer does not include the page header, so it is 1486 * off by 2 bytes. 1487 */ 1488 expected_len = inlen + 2; 1489 if (expected_len > SCSI_MAX_MODE_LEN) { 1490 return -1; 1491 } 1492 1493 /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */ 1494 if (page == MODE_PAGE_ALLS) { 1495 return -1; 1496 } 1497 1498 p = mode_current; 1499 memset(mode_current, 0, inlen + 2); 1500 len = mode_sense_page(s, page, &p, 0); 1501 if (len < 0 || len != expected_len) { 1502 return -1; 1503 } 1504 1505 p = mode_changeable; 1506 memset(mode_changeable, 0, inlen + 2); 1507 changeable_len = mode_sense_page(s, page, &p, 1); 1508 assert(changeable_len == len); 1509 1510 /* Check that unchangeable bits are the same as what MODE SENSE 1511 * would return. 1512 */ 1513 for (i = 2; i < len; i++) { 1514 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1515 return -1; 1516 } 1517 } 1518 return 0; 1519 } 1520 1521 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1522 { 1523 switch (page) { 1524 case MODE_PAGE_CACHING: 1525 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1526 break; 1527 1528 default: 1529 break; 1530 } 1531 } 1532 1533 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1534 { 1535 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1536 1537 while (len > 0) { 1538 int page, subpage, page_len; 1539 1540 /* Parse both possible formats for the mode page headers. */ 1541 page = p[0] & 0x3f; 1542 if (p[0] & 0x40) { 1543 if (len < 4) { 1544 goto invalid_param_len; 1545 } 1546 subpage = p[1]; 1547 page_len = lduw_be_p(&p[2]); 1548 p += 4; 1549 len -= 4; 1550 } else { 1551 if (len < 2) { 1552 goto invalid_param_len; 1553 } 1554 subpage = 0; 1555 page_len = p[1]; 1556 p += 2; 1557 len -= 2; 1558 } 1559 1560 if (subpage) { 1561 goto invalid_param; 1562 } 1563 if (page_len > len) { 1564 if (!(s->quirks & SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED)) { 1565 goto invalid_param_len; 1566 } 1567 trace_scsi_disk_mode_select_page_truncated(page, page_len, len); 1568 } 1569 1570 if (!change) { 1571 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1572 goto invalid_param; 1573 } 1574 } else { 1575 scsi_disk_apply_mode_select(s, page, p); 1576 } 1577 1578 p += page_len; 1579 len -= page_len; 1580 } 1581 return 0; 1582 1583 invalid_param: 1584 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1585 return -1; 1586 1587 invalid_param_len: 1588 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1589 return -1; 1590 } 1591 1592 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1593 { 1594 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1595 uint8_t *p = inbuf; 1596 int cmd = r->req.cmd.buf[0]; 1597 int len = r->req.cmd.xfer; 1598 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1599 int bd_len, bs; 1600 int pass; 1601 1602 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1603 if (!(s->quirks & 1604 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) { 1605 /* We only support PF=1, SP=0. */ 1606 goto invalid_field; 1607 } 1608 } 1609 1610 if (len < hdr_len) { 1611 goto invalid_param_len; 1612 } 1613 1614 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1615 len -= hdr_len; 1616 p += hdr_len; 1617 if (len < bd_len) { 1618 goto invalid_param_len; 1619 } 1620 if (bd_len != 0 && bd_len != 8) { 1621 goto invalid_param; 1622 } 1623 1624 /* Allow changing the block size */ 1625 if (bd_len) { 1626 bs = p[5] << 16 | p[6] << 8 | p[7]; 1627 1628 /* 1629 * Since the existing code only checks/updates bits 8-15 of the block 1630 * size, restrict ourselves to the same requirement for now to ensure 1631 * that a block size set by a block descriptor and then read back by 1632 * a subsequent SCSI command will be the same. Also disallow a block 1633 * size of 256 since we cannot handle anything below BDRV_SECTOR_SIZE. 1634 */ 1635 if (bs && !(bs & ~0xfe00) && bs != s->qdev.blocksize) { 1636 s->qdev.blocksize = bs; 1637 trace_scsi_disk_mode_select_set_blocksize(s->qdev.blocksize); 1638 } 1639 } 1640 1641 len -= bd_len; 1642 p += bd_len; 1643 1644 /* Ensure no change is made if there is an error! */ 1645 for (pass = 0; pass < 2; pass++) { 1646 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1647 assert(pass == 0); 1648 return; 1649 } 1650 } 1651 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1652 /* The request is used as the AIO opaque value, so add a ref. */ 1653 scsi_req_ref(&r->req); 1654 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1655 BLOCK_ACCT_FLUSH); 1656 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1657 return; 1658 } 1659 1660 scsi_req_complete(&r->req, GOOD); 1661 return; 1662 1663 invalid_param: 1664 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1665 return; 1666 1667 invalid_param_len: 1668 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1669 return; 1670 1671 invalid_field: 1672 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1673 } 1674 1675 /* sector_num and nb_sectors expected to be in qdev blocksize */ 1676 static inline bool check_lba_range(SCSIDiskState *s, 1677 uint64_t sector_num, uint32_t nb_sectors) 1678 { 1679 /* 1680 * The first line tests that no overflow happens when computing the last 1681 * sector. The second line tests that the last accessed sector is in 1682 * range. 1683 * 1684 * Careful, the computations should not underflow for nb_sectors == 0, 1685 * and a 0-block read to the first LBA beyond the end of device is 1686 * valid. 1687 */ 1688 return (sector_num <= sector_num + nb_sectors && 1689 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1690 } 1691 1692 typedef struct UnmapCBData { 1693 SCSIDiskReq *r; 1694 uint8_t *inbuf; 1695 int count; 1696 } UnmapCBData; 1697 1698 static void scsi_unmap_complete(void *opaque, int ret); 1699 1700 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1701 { 1702 SCSIDiskReq *r = data->r; 1703 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1704 1705 assert(r->req.aiocb == NULL); 1706 1707 if (data->count > 0) { 1708 uint64_t sector_num = ldq_be_p(&data->inbuf[0]); 1709 uint32_t nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1710 r->sector = sector_num * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1711 r->sector_count = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1712 1713 if (!check_lba_range(s, sector_num, nb_sectors)) { 1714 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), 1715 BLOCK_ACCT_UNMAP); 1716 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1717 goto done; 1718 } 1719 1720 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1721 r->sector_count * BDRV_SECTOR_SIZE, 1722 BLOCK_ACCT_UNMAP); 1723 1724 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1725 r->sector * BDRV_SECTOR_SIZE, 1726 r->sector_count * BDRV_SECTOR_SIZE, 1727 scsi_unmap_complete, data); 1728 data->count--; 1729 data->inbuf += 16; 1730 return; 1731 } 1732 1733 scsi_req_complete(&r->req, GOOD); 1734 1735 done: 1736 scsi_req_unref(&r->req); 1737 g_free(data); 1738 } 1739 1740 static void scsi_unmap_complete(void *opaque, int ret) 1741 { 1742 UnmapCBData *data = opaque; 1743 SCSIDiskReq *r = data->r; 1744 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1745 1746 assert(r->req.aiocb != NULL); 1747 r->req.aiocb = NULL; 1748 1749 if (scsi_disk_req_check_error(r, ret, true)) { 1750 scsi_req_unref(&r->req); 1751 g_free(data); 1752 } else { 1753 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1754 scsi_unmap_complete_noio(data, ret); 1755 } 1756 } 1757 1758 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1759 { 1760 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1761 uint8_t *p = inbuf; 1762 int len = r->req.cmd.xfer; 1763 UnmapCBData *data; 1764 1765 /* Reject ANCHOR=1. */ 1766 if (r->req.cmd.buf[1] & 0x1) { 1767 goto invalid_field; 1768 } 1769 1770 if (len < 8) { 1771 goto invalid_param_len; 1772 } 1773 if (len < lduw_be_p(&p[0]) + 2) { 1774 goto invalid_param_len; 1775 } 1776 if (len < lduw_be_p(&p[2]) + 8) { 1777 goto invalid_param_len; 1778 } 1779 if (lduw_be_p(&p[2]) & 15) { 1780 goto invalid_param_len; 1781 } 1782 1783 if (!blk_is_writable(s->qdev.conf.blk)) { 1784 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1785 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1786 return; 1787 } 1788 1789 data = g_new0(UnmapCBData, 1); 1790 data->r = r; 1791 data->inbuf = &p[8]; 1792 data->count = lduw_be_p(&p[2]) >> 4; 1793 1794 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1795 scsi_req_ref(&r->req); 1796 scsi_unmap_complete_noio(data, 0); 1797 return; 1798 1799 invalid_param_len: 1800 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1801 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1802 return; 1803 1804 invalid_field: 1805 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1806 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1807 } 1808 1809 typedef struct WriteSameCBData { 1810 SCSIDiskReq *r; 1811 int64_t sector; 1812 int nb_sectors; 1813 QEMUIOVector qiov; 1814 struct iovec iov; 1815 } WriteSameCBData; 1816 1817 static void scsi_write_same_complete(void *opaque, int ret) 1818 { 1819 WriteSameCBData *data = opaque; 1820 SCSIDiskReq *r = data->r; 1821 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1822 1823 assert(r->req.aiocb != NULL); 1824 r->req.aiocb = NULL; 1825 1826 if (scsi_disk_req_check_error(r, ret, true)) { 1827 goto done; 1828 } 1829 1830 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1831 1832 data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE; 1833 data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE; 1834 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, 1835 data->iov.iov_len); 1836 if (data->iov.iov_len) { 1837 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1838 data->iov.iov_len, BLOCK_ACCT_WRITE); 1839 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1840 * where final qiov may need smaller size */ 1841 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1842 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1843 data->sector << BDRV_SECTOR_BITS, 1844 &data->qiov, 0, 1845 scsi_write_same_complete, data); 1846 return; 1847 } 1848 1849 scsi_req_complete(&r->req, GOOD); 1850 1851 done: 1852 scsi_req_unref(&r->req); 1853 qemu_vfree(data->iov.iov_base); 1854 g_free(data); 1855 } 1856 1857 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1858 { 1859 SCSIRequest *req = &r->req; 1860 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1861 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1862 WriteSameCBData *data; 1863 uint8_t *buf; 1864 int i, l; 1865 1866 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1867 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1868 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1869 return; 1870 } 1871 1872 if (!blk_is_writable(s->qdev.conf.blk)) { 1873 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1874 return; 1875 } 1876 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1877 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1878 return; 1879 } 1880 1881 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1882 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1883 1884 /* The request is used as the AIO opaque value, so add a ref. */ 1885 scsi_req_ref(&r->req); 1886 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1887 nb_sectors * s->qdev.blocksize, 1888 BLOCK_ACCT_WRITE); 1889 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1890 r->req.cmd.lba * s->qdev.blocksize, 1891 nb_sectors * s->qdev.blocksize, 1892 flags, scsi_aio_complete, r); 1893 return; 1894 } 1895 1896 data = g_new0(WriteSameCBData, 1); 1897 data->r = r; 1898 data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1899 data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1900 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, 1901 SCSI_WRITE_SAME_MAX); 1902 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1903 data->iov.iov_len); 1904 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1905 1906 for (i = 0; i < data->iov.iov_len; i += l) { 1907 l = MIN(s->qdev.blocksize, data->iov.iov_len - i); 1908 memcpy(&buf[i], inbuf, l); 1909 } 1910 1911 scsi_req_ref(&r->req); 1912 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1913 data->iov.iov_len, BLOCK_ACCT_WRITE); 1914 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1915 data->sector << BDRV_SECTOR_BITS, 1916 &data->qiov, 0, 1917 scsi_write_same_complete, data); 1918 } 1919 1920 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1921 { 1922 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1923 1924 if (r->iov.iov_len) { 1925 int buflen = r->iov.iov_len; 1926 trace_scsi_disk_emulate_write_data(buflen); 1927 r->iov.iov_len = 0; 1928 scsi_req_data(&r->req, buflen); 1929 return; 1930 } 1931 1932 switch (req->cmd.buf[0]) { 1933 case MODE_SELECT: 1934 case MODE_SELECT_10: 1935 /* This also clears the sense buffer for REQUEST SENSE. */ 1936 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1937 break; 1938 1939 case UNMAP: 1940 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1941 break; 1942 1943 case VERIFY_10: 1944 case VERIFY_12: 1945 case VERIFY_16: 1946 if (r->req.status == -1) { 1947 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1948 } 1949 break; 1950 1951 case WRITE_SAME_10: 1952 case WRITE_SAME_16: 1953 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1954 break; 1955 1956 case FORMAT_UNIT: 1957 scsi_req_complete(&r->req, GOOD); 1958 break; 1959 1960 default: 1961 abort(); 1962 } 1963 } 1964 1965 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1966 { 1967 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1968 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1969 uint64_t nb_sectors; 1970 uint8_t *outbuf; 1971 int buflen; 1972 1973 switch (req->cmd.buf[0]) { 1974 case INQUIRY: 1975 case MODE_SENSE: 1976 case MODE_SENSE_10: 1977 case RESERVE: 1978 case RESERVE_10: 1979 case RELEASE: 1980 case RELEASE_10: 1981 case START_STOP: 1982 case ALLOW_MEDIUM_REMOVAL: 1983 case GET_CONFIGURATION: 1984 case GET_EVENT_STATUS_NOTIFICATION: 1985 case MECHANISM_STATUS: 1986 case REQUEST_SENSE: 1987 break; 1988 1989 default: 1990 if (!blk_is_available(s->qdev.conf.blk)) { 1991 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1992 return 0; 1993 } 1994 break; 1995 } 1996 1997 /* 1998 * FIXME: we shouldn't return anything bigger than 4k, but the code 1999 * requires the buffer to be as big as req->cmd.xfer in several 2000 * places. So, do not allow CDBs with a very large ALLOCATION 2001 * LENGTH. The real fix would be to modify scsi_read_data and 2002 * dma_buf_read, so that they return data beyond the buflen 2003 * as all zeros. 2004 */ 2005 if (req->cmd.xfer > 65536) { 2006 goto illegal_request; 2007 } 2008 r->buflen = MAX(4096, req->cmd.xfer); 2009 2010 if (!r->iov.iov_base) { 2011 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 2012 } 2013 2014 outbuf = r->iov.iov_base; 2015 memset(outbuf, 0, r->buflen); 2016 switch (req->cmd.buf[0]) { 2017 case TEST_UNIT_READY: 2018 assert(blk_is_available(s->qdev.conf.blk)); 2019 break; 2020 case INQUIRY: 2021 buflen = scsi_disk_emulate_inquiry(req, outbuf); 2022 if (buflen < 0) { 2023 goto illegal_request; 2024 } 2025 break; 2026 case MODE_SENSE: 2027 case MODE_SENSE_10: 2028 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 2029 if (buflen < 0) { 2030 goto illegal_request; 2031 } 2032 break; 2033 case READ_TOC: 2034 buflen = scsi_disk_emulate_read_toc(req, outbuf); 2035 if (buflen < 0) { 2036 goto illegal_request; 2037 } 2038 break; 2039 case RESERVE: 2040 if (req->cmd.buf[1] & 1) { 2041 goto illegal_request; 2042 } 2043 break; 2044 case RESERVE_10: 2045 if (req->cmd.buf[1] & 3) { 2046 goto illegal_request; 2047 } 2048 break; 2049 case RELEASE: 2050 if (req->cmd.buf[1] & 1) { 2051 goto illegal_request; 2052 } 2053 break; 2054 case RELEASE_10: 2055 if (req->cmd.buf[1] & 3) { 2056 goto illegal_request; 2057 } 2058 break; 2059 case START_STOP: 2060 if (scsi_disk_emulate_start_stop(r) < 0) { 2061 return 0; 2062 } 2063 break; 2064 case ALLOW_MEDIUM_REMOVAL: 2065 s->tray_locked = req->cmd.buf[4] & 1; 2066 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 2067 break; 2068 case READ_CAPACITY_10: 2069 /* The normal LEN field for this command is zero. */ 2070 memset(outbuf, 0, 8); 2071 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2072 if (!nb_sectors) { 2073 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2074 return 0; 2075 } 2076 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 2077 goto illegal_request; 2078 } 2079 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2080 /* Returned value is the address of the last sector. */ 2081 nb_sectors--; 2082 /* Remember the new size for read/write sanity checking. */ 2083 s->qdev.max_lba = nb_sectors; 2084 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 2085 if (nb_sectors > UINT32_MAX) { 2086 nb_sectors = UINT32_MAX; 2087 } 2088 outbuf[0] = (nb_sectors >> 24) & 0xff; 2089 outbuf[1] = (nb_sectors >> 16) & 0xff; 2090 outbuf[2] = (nb_sectors >> 8) & 0xff; 2091 outbuf[3] = nb_sectors & 0xff; 2092 outbuf[4] = 0; 2093 outbuf[5] = 0; 2094 outbuf[6] = s->qdev.blocksize >> 8; 2095 outbuf[7] = 0; 2096 break; 2097 case REQUEST_SENSE: 2098 /* Just return "NO SENSE". */ 2099 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2100 (req->cmd.buf[1] & 1) == 0); 2101 if (buflen < 0) { 2102 goto illegal_request; 2103 } 2104 break; 2105 case MECHANISM_STATUS: 2106 buflen = scsi_emulate_mechanism_status(s, outbuf); 2107 if (buflen < 0) { 2108 goto illegal_request; 2109 } 2110 break; 2111 case GET_CONFIGURATION: 2112 buflen = scsi_get_configuration(s, outbuf); 2113 if (buflen < 0) { 2114 goto illegal_request; 2115 } 2116 break; 2117 case GET_EVENT_STATUS_NOTIFICATION: 2118 buflen = scsi_get_event_status_notification(s, r, outbuf); 2119 if (buflen < 0) { 2120 goto illegal_request; 2121 } 2122 break; 2123 case READ_DISC_INFORMATION: 2124 buflen = scsi_read_disc_information(s, r, outbuf); 2125 if (buflen < 0) { 2126 goto illegal_request; 2127 } 2128 break; 2129 case READ_DVD_STRUCTURE: 2130 buflen = scsi_read_dvd_structure(s, r, outbuf); 2131 if (buflen < 0) { 2132 goto illegal_request; 2133 } 2134 break; 2135 case SERVICE_ACTION_IN_16: 2136 /* Service Action In subcommands. */ 2137 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2138 trace_scsi_disk_emulate_command_SAI_16(); 2139 memset(outbuf, 0, req->cmd.xfer); 2140 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2141 if (!nb_sectors) { 2142 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2143 return 0; 2144 } 2145 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2146 goto illegal_request; 2147 } 2148 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2149 /* Returned value is the address of the last sector. */ 2150 nb_sectors--; 2151 /* Remember the new size for read/write sanity checking. */ 2152 s->qdev.max_lba = nb_sectors; 2153 outbuf[0] = (nb_sectors >> 56) & 0xff; 2154 outbuf[1] = (nb_sectors >> 48) & 0xff; 2155 outbuf[2] = (nb_sectors >> 40) & 0xff; 2156 outbuf[3] = (nb_sectors >> 32) & 0xff; 2157 outbuf[4] = (nb_sectors >> 24) & 0xff; 2158 outbuf[5] = (nb_sectors >> 16) & 0xff; 2159 outbuf[6] = (nb_sectors >> 8) & 0xff; 2160 outbuf[7] = nb_sectors & 0xff; 2161 outbuf[8] = 0; 2162 outbuf[9] = 0; 2163 outbuf[10] = s->qdev.blocksize >> 8; 2164 outbuf[11] = 0; 2165 outbuf[12] = 0; 2166 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2167 2168 /* set TPE bit if the format supports discard */ 2169 if (s->qdev.conf.discard_granularity) { 2170 outbuf[14] = 0x80; 2171 } 2172 2173 /* Protection, exponent and lowest lba field left blank. */ 2174 break; 2175 } 2176 trace_scsi_disk_emulate_command_SAI_unsupported(); 2177 goto illegal_request; 2178 case SYNCHRONIZE_CACHE: 2179 /* The request is used as the AIO opaque value, so add a ref. */ 2180 scsi_req_ref(&r->req); 2181 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2182 BLOCK_ACCT_FLUSH); 2183 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2184 return 0; 2185 case SEEK_10: 2186 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); 2187 if (r->req.cmd.lba > s->qdev.max_lba) { 2188 goto illegal_lba; 2189 } 2190 break; 2191 case MODE_SELECT: 2192 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); 2193 break; 2194 case MODE_SELECT_10: 2195 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 2196 break; 2197 case UNMAP: 2198 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); 2199 break; 2200 case VERIFY_10: 2201 case VERIFY_12: 2202 case VERIFY_16: 2203 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 2204 if (req->cmd.buf[1] & 6) { 2205 goto illegal_request; 2206 } 2207 break; 2208 case WRITE_SAME_10: 2209 case WRITE_SAME_16: 2210 trace_scsi_disk_emulate_command_WRITE_SAME( 2211 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); 2212 break; 2213 case FORMAT_UNIT: 2214 trace_scsi_disk_emulate_command_FORMAT_UNIT(r->req.cmd.xfer); 2215 break; 2216 default: 2217 trace_scsi_disk_emulate_command_UNKNOWN(buf[0], 2218 scsi_command_name(buf[0])); 2219 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2220 return 0; 2221 } 2222 assert(!r->req.aiocb); 2223 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2224 if (r->iov.iov_len == 0) { 2225 scsi_req_complete(&r->req, GOOD); 2226 } 2227 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2228 assert(r->iov.iov_len == req->cmd.xfer); 2229 return -r->iov.iov_len; 2230 } else { 2231 return r->iov.iov_len; 2232 } 2233 2234 illegal_request: 2235 if (r->req.status == -1) { 2236 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2237 } 2238 return 0; 2239 2240 illegal_lba: 2241 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2242 return 0; 2243 } 2244 2245 /* Execute a scsi command. Returns the length of the data expected by the 2246 command. This will be Positive for data transfers from the device 2247 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2248 and zero if the command does not transfer any data. */ 2249 2250 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2251 { 2252 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2253 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2254 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2255 uint32_t len; 2256 uint8_t command; 2257 2258 command = buf[0]; 2259 2260 if (!blk_is_available(s->qdev.conf.blk)) { 2261 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2262 return 0; 2263 } 2264 2265 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2266 switch (command) { 2267 case READ_6: 2268 case READ_10: 2269 case READ_12: 2270 case READ_16: 2271 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); 2272 /* Protection information is not supported. For SCSI versions 2 and 2273 * older (as determined by snooping the guest's INQUIRY commands), 2274 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2275 */ 2276 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2277 goto illegal_request; 2278 } 2279 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2280 goto illegal_lba; 2281 } 2282 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2283 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2284 break; 2285 case WRITE_6: 2286 case WRITE_10: 2287 case WRITE_12: 2288 case WRITE_16: 2289 case WRITE_VERIFY_10: 2290 case WRITE_VERIFY_12: 2291 case WRITE_VERIFY_16: 2292 if (!blk_is_writable(s->qdev.conf.blk)) { 2293 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2294 return 0; 2295 } 2296 trace_scsi_disk_dma_command_WRITE( 2297 (command & 0xe) == 0xe ? "And Verify " : "", 2298 r->req.cmd.lba, len); 2299 /* fall through */ 2300 case VERIFY_10: 2301 case VERIFY_12: 2302 case VERIFY_16: 2303 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2304 * As far as DMA is concerned, we can treat it the same as a write; 2305 * scsi_block_do_sgio will send VERIFY commands. 2306 */ 2307 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2308 goto illegal_request; 2309 } 2310 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2311 goto illegal_lba; 2312 } 2313 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2314 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2315 break; 2316 default: 2317 abort(); 2318 illegal_request: 2319 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2320 return 0; 2321 illegal_lba: 2322 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2323 return 0; 2324 } 2325 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2326 if (r->sector_count == 0) { 2327 scsi_req_complete(&r->req, GOOD); 2328 } 2329 assert(r->iov.iov_len == 0); 2330 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2331 return -r->sector_count * BDRV_SECTOR_SIZE; 2332 } else { 2333 return r->sector_count * BDRV_SECTOR_SIZE; 2334 } 2335 } 2336 2337 static void scsi_disk_reset(DeviceState *dev) 2338 { 2339 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2340 uint64_t nb_sectors; 2341 2342 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2343 2344 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2345 2346 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2347 if (nb_sectors) { 2348 nb_sectors--; 2349 } 2350 s->qdev.max_lba = nb_sectors; 2351 /* reset tray statuses */ 2352 s->tray_locked = 0; 2353 s->tray_open = 0; 2354 2355 s->qdev.scsi_version = s->qdev.default_scsi_version; 2356 } 2357 2358 static void scsi_disk_drained_begin(void *opaque) 2359 { 2360 SCSIDiskState *s = opaque; 2361 2362 scsi_device_drained_begin(&s->qdev); 2363 } 2364 2365 static void scsi_disk_drained_end(void *opaque) 2366 { 2367 SCSIDiskState *s = opaque; 2368 2369 scsi_device_drained_end(&s->qdev); 2370 } 2371 2372 static void scsi_disk_resize_cb(void *opaque) 2373 { 2374 SCSIDiskState *s = opaque; 2375 2376 /* SPC lists this sense code as available only for 2377 * direct-access devices. 2378 */ 2379 if (s->qdev.type == TYPE_DISK) { 2380 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2381 } 2382 } 2383 2384 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2385 { 2386 SCSIDiskState *s = opaque; 2387 2388 /* 2389 * When a CD gets changed, we have to report an ejected state and 2390 * then a loaded state to guests so that they detect tray 2391 * open/close and media change events. Guests that do not use 2392 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2393 * states rely on this behavior. 2394 * 2395 * media_changed governs the state machine used for unit attention 2396 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2397 */ 2398 s->media_changed = load; 2399 s->tray_open = !load; 2400 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2401 s->media_event = true; 2402 s->eject_request = false; 2403 } 2404 2405 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2406 { 2407 SCSIDiskState *s = opaque; 2408 2409 s->eject_request = true; 2410 if (force) { 2411 s->tray_locked = false; 2412 } 2413 } 2414 2415 static bool scsi_cd_is_tray_open(void *opaque) 2416 { 2417 return ((SCSIDiskState *)opaque)->tray_open; 2418 } 2419 2420 static bool scsi_cd_is_medium_locked(void *opaque) 2421 { 2422 return ((SCSIDiskState *)opaque)->tray_locked; 2423 } 2424 2425 static const BlockDevOps scsi_disk_removable_block_ops = { 2426 .change_media_cb = scsi_cd_change_media_cb, 2427 .drained_begin = scsi_disk_drained_begin, 2428 .drained_end = scsi_disk_drained_end, 2429 .eject_request_cb = scsi_cd_eject_request_cb, 2430 .is_medium_locked = scsi_cd_is_medium_locked, 2431 .is_tray_open = scsi_cd_is_tray_open, 2432 .resize_cb = scsi_disk_resize_cb, 2433 }; 2434 2435 static const BlockDevOps scsi_disk_block_ops = { 2436 .drained_begin = scsi_disk_drained_begin, 2437 .drained_end = scsi_disk_drained_end, 2438 .resize_cb = scsi_disk_resize_cb, 2439 }; 2440 2441 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2442 { 2443 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2444 if (s->media_changed) { 2445 s->media_changed = false; 2446 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2447 } 2448 } 2449 2450 static void scsi_realize(SCSIDevice *dev, Error **errp) 2451 { 2452 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2453 bool read_only; 2454 2455 if (!s->qdev.conf.blk) { 2456 error_setg(errp, "drive property not set"); 2457 return; 2458 } 2459 2460 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2461 !blk_is_inserted(s->qdev.conf.blk)) { 2462 error_setg(errp, "Device needs media, but drive is empty"); 2463 return; 2464 } 2465 2466 if (!blkconf_blocksizes(&s->qdev.conf, errp)) { 2467 return; 2468 } 2469 2470 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && 2471 !s->qdev.hba_supports_iothread) 2472 { 2473 error_setg(errp, "HBA does not support iothreads"); 2474 return; 2475 } 2476 2477 if (dev->type == TYPE_DISK) { 2478 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2479 return; 2480 } 2481 } 2482 2483 read_only = !blk_supports_write_perm(s->qdev.conf.blk); 2484 if (dev->type == TYPE_ROM) { 2485 read_only = true; 2486 } 2487 2488 if (!blkconf_apply_backend_options(&dev->conf, read_only, 2489 dev->type == TYPE_DISK, errp)) { 2490 return; 2491 } 2492 2493 if (s->qdev.conf.discard_granularity == -1) { 2494 s->qdev.conf.discard_granularity = 2495 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2496 } 2497 2498 if (!s->version) { 2499 s->version = g_strdup(qemu_hw_version()); 2500 } 2501 if (!s->vendor) { 2502 s->vendor = g_strdup("QEMU"); 2503 } 2504 if (!s->device_id) { 2505 if (s->serial) { 2506 s->device_id = g_strdup_printf("%.20s", s->serial); 2507 } else { 2508 const char *str = blk_name(s->qdev.conf.blk); 2509 if (str && *str) { 2510 s->device_id = g_strdup(str); 2511 } 2512 } 2513 } 2514 2515 if (blk_is_sg(s->qdev.conf.blk)) { 2516 error_setg(errp, "unwanted /dev/sg*"); 2517 return; 2518 } 2519 2520 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2521 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2522 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2523 } else { 2524 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2525 } 2526 2527 blk_iostatus_enable(s->qdev.conf.blk); 2528 2529 add_boot_device_lchs(&dev->qdev, NULL, 2530 dev->conf.lcyls, 2531 dev->conf.lheads, 2532 dev->conf.lsecs); 2533 } 2534 2535 static void scsi_unrealize(SCSIDevice *dev) 2536 { 2537 del_boot_device_lchs(&dev->qdev, NULL); 2538 } 2539 2540 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2541 { 2542 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2543 2544 /* can happen for devices without drive. The error message for missing 2545 * backend will be issued in scsi_realize 2546 */ 2547 if (s->qdev.conf.blk) { 2548 if (!blkconf_blocksizes(&s->qdev.conf, errp)) { 2549 return; 2550 } 2551 } 2552 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2553 s->qdev.type = TYPE_DISK; 2554 if (!s->product) { 2555 s->product = g_strdup("QEMU HARDDISK"); 2556 } 2557 scsi_realize(&s->qdev, errp); 2558 } 2559 2560 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2561 { 2562 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2563 int ret; 2564 uint32_t blocksize = 2048; 2565 2566 if (!dev->conf.blk) { 2567 /* Anonymous BlockBackend for an empty drive. As we put it into 2568 * dev->conf, qdev takes care of detaching on unplug. */ 2569 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 2570 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2571 assert(ret == 0); 2572 } 2573 2574 if (dev->conf.physical_block_size != 0) { 2575 blocksize = dev->conf.physical_block_size; 2576 } 2577 2578 s->qdev.blocksize = blocksize; 2579 s->qdev.type = TYPE_ROM; 2580 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2581 if (!s->product) { 2582 s->product = g_strdup("QEMU CD-ROM"); 2583 } 2584 scsi_realize(&s->qdev, errp); 2585 } 2586 2587 2588 static const SCSIReqOps scsi_disk_emulate_reqops = { 2589 .size = sizeof(SCSIDiskReq), 2590 .free_req = scsi_free_request, 2591 .send_command = scsi_disk_emulate_command, 2592 .read_data = scsi_disk_emulate_read_data, 2593 .write_data = scsi_disk_emulate_write_data, 2594 .get_buf = scsi_get_buf, 2595 }; 2596 2597 static const SCSIReqOps scsi_disk_dma_reqops = { 2598 .size = sizeof(SCSIDiskReq), 2599 .free_req = scsi_free_request, 2600 .send_command = scsi_disk_dma_command, 2601 .read_data = scsi_read_data, 2602 .write_data = scsi_write_data, 2603 .get_buf = scsi_get_buf, 2604 .load_request = scsi_disk_load_request, 2605 .save_request = scsi_disk_save_request, 2606 }; 2607 2608 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2609 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2610 [INQUIRY] = &scsi_disk_emulate_reqops, 2611 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2612 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2613 [START_STOP] = &scsi_disk_emulate_reqops, 2614 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2615 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2616 [READ_TOC] = &scsi_disk_emulate_reqops, 2617 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2618 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2619 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2620 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2621 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2622 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2623 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2624 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2625 [SEEK_10] = &scsi_disk_emulate_reqops, 2626 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2627 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2628 [UNMAP] = &scsi_disk_emulate_reqops, 2629 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2630 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2631 [VERIFY_10] = &scsi_disk_emulate_reqops, 2632 [VERIFY_12] = &scsi_disk_emulate_reqops, 2633 [VERIFY_16] = &scsi_disk_emulate_reqops, 2634 [FORMAT_UNIT] = &scsi_disk_emulate_reqops, 2635 2636 [READ_6] = &scsi_disk_dma_reqops, 2637 [READ_10] = &scsi_disk_dma_reqops, 2638 [READ_12] = &scsi_disk_dma_reqops, 2639 [READ_16] = &scsi_disk_dma_reqops, 2640 [WRITE_6] = &scsi_disk_dma_reqops, 2641 [WRITE_10] = &scsi_disk_dma_reqops, 2642 [WRITE_12] = &scsi_disk_dma_reqops, 2643 [WRITE_16] = &scsi_disk_dma_reqops, 2644 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2645 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2646 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2647 }; 2648 2649 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) 2650 { 2651 int i; 2652 int len = scsi_cdb_length(buf); 2653 char *line_buffer, *p; 2654 2655 assert(len > 0 && len <= 16); 2656 line_buffer = g_malloc(len * 5 + 1); 2657 2658 for (i = 0, p = line_buffer; i < len; i++) { 2659 p += sprintf(p, " 0x%02x", buf[i]); 2660 } 2661 trace_scsi_disk_new_request(lun, tag, line_buffer); 2662 2663 g_free(line_buffer); 2664 } 2665 2666 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2667 uint8_t *buf, void *hba_private) 2668 { 2669 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2670 SCSIRequest *req; 2671 const SCSIReqOps *ops; 2672 uint8_t command; 2673 2674 command = buf[0]; 2675 ops = scsi_disk_reqops_dispatch[command]; 2676 if (!ops) { 2677 ops = &scsi_disk_emulate_reqops; 2678 } 2679 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2680 2681 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { 2682 scsi_disk_new_request_dump(lun, tag, buf); 2683 } 2684 2685 return req; 2686 } 2687 2688 #ifdef __linux__ 2689 static int get_device_type(SCSIDiskState *s) 2690 { 2691 uint8_t cmd[16]; 2692 uint8_t buf[36]; 2693 int ret; 2694 2695 memset(cmd, 0, sizeof(cmd)); 2696 memset(buf, 0, sizeof(buf)); 2697 cmd[0] = INQUIRY; 2698 cmd[4] = sizeof(buf); 2699 2700 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2701 buf, sizeof(buf), s->qdev.io_timeout); 2702 if (ret < 0) { 2703 return -1; 2704 } 2705 s->qdev.type = buf[0]; 2706 if (buf[1] & 0x80) { 2707 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2708 } 2709 return 0; 2710 } 2711 2712 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2713 { 2714 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2715 int sg_version; 2716 int rc; 2717 2718 if (!s->qdev.conf.blk) { 2719 error_setg(errp, "drive property not set"); 2720 return; 2721 } 2722 2723 if (s->rotation_rate) { 2724 error_report_once("rotation_rate is specified for scsi-block but is " 2725 "not implemented. This option is deprecated and will " 2726 "be removed in a future version"); 2727 } 2728 2729 /* check we are using a driver managing SG_IO (version 3 and after) */ 2730 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2731 if (rc < 0) { 2732 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2733 if (rc != -EPERM) { 2734 error_append_hint(errp, "Is this a SCSI device?\n"); 2735 } 2736 return; 2737 } 2738 if (sg_version < 30000) { 2739 error_setg(errp, "scsi generic interface too old"); 2740 return; 2741 } 2742 2743 /* get device type from INQUIRY data */ 2744 rc = get_device_type(s); 2745 if (rc < 0) { 2746 error_setg(errp, "INQUIRY failed"); 2747 return; 2748 } 2749 2750 /* Make a guess for the block size, we'll fix it when the guest sends. 2751 * READ CAPACITY. If they don't, they likely would assume these sizes 2752 * anyway. (TODO: check in /sys). 2753 */ 2754 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2755 s->qdev.blocksize = 2048; 2756 } else { 2757 s->qdev.blocksize = 512; 2758 } 2759 2760 /* Makes the scsi-block device not removable by using HMP and QMP eject 2761 * command. 2762 */ 2763 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2764 2765 scsi_realize(&s->qdev, errp); 2766 scsi_generic_read_device_inquiry(&s->qdev); 2767 } 2768 2769 typedef struct SCSIBlockReq { 2770 SCSIDiskReq req; 2771 sg_io_hdr_t io_header; 2772 2773 /* Selected bytes of the original CDB, copied into our own CDB. */ 2774 uint8_t cmd, cdb1, group_number; 2775 2776 /* CDB passed to SG_IO. */ 2777 uint8_t cdb[16]; 2778 BlockCompletionFunc *cb; 2779 void *cb_opaque; 2780 } SCSIBlockReq; 2781 2782 static void scsi_block_sgio_complete(void *opaque, int ret) 2783 { 2784 SCSIBlockReq *req = (SCSIBlockReq *)opaque; 2785 SCSIDiskReq *r = &req->req; 2786 sg_io_hdr_t *io_hdr = &req->io_header; 2787 2788 if (ret == 0) { 2789 if (io_hdr->host_status != SCSI_HOST_OK) { 2790 scsi_req_complete_failed(&r->req, io_hdr->host_status); 2791 scsi_req_unref(&r->req); 2792 return; 2793 } 2794 2795 if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) { 2796 ret = BUSY; 2797 } else { 2798 ret = io_hdr->status; 2799 } 2800 2801 if (ret > 0) { 2802 if (scsi_handle_rw_error(r, ret, true)) { 2803 scsi_req_unref(&r->req); 2804 return; 2805 } 2806 2807 /* Ignore error. */ 2808 ret = 0; 2809 } 2810 } 2811 2812 req->cb(req->cb_opaque, ret); 2813 } 2814 2815 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2816 int64_t offset, QEMUIOVector *iov, 2817 int direction, 2818 BlockCompletionFunc *cb, void *opaque) 2819 { 2820 sg_io_hdr_t *io_header = &req->io_header; 2821 SCSIDiskReq *r = &req->req; 2822 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2823 int nb_logical_blocks; 2824 uint64_t lba; 2825 BlockAIOCB *aiocb; 2826 2827 /* This is not supported yet. It can only happen if the guest does 2828 * reads and writes that are not aligned to one logical sectors 2829 * _and_ cover multiple MemoryRegions. 2830 */ 2831 assert(offset % s->qdev.blocksize == 0); 2832 assert(iov->size % s->qdev.blocksize == 0); 2833 2834 io_header->interface_id = 'S'; 2835 2836 /* The data transfer comes from the QEMUIOVector. */ 2837 io_header->dxfer_direction = direction; 2838 io_header->dxfer_len = iov->size; 2839 io_header->dxferp = (void *)iov->iov; 2840 io_header->iovec_count = iov->niov; 2841 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2842 2843 /* Build a new CDB with the LBA and length patched in, in case 2844 * DMA helpers split the transfer in multiple segments. Do not 2845 * build a CDB smaller than what the guest wanted, and only build 2846 * a larger one if strictly necessary. 2847 */ 2848 io_header->cmdp = req->cdb; 2849 lba = offset / s->qdev.blocksize; 2850 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2851 2852 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2853 /* 6-byte CDB */ 2854 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2855 req->cdb[4] = nb_logical_blocks; 2856 req->cdb[5] = 0; 2857 io_header->cmd_len = 6; 2858 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2859 /* 10-byte CDB */ 2860 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2861 req->cdb[1] = req->cdb1; 2862 stl_be_p(&req->cdb[2], lba); 2863 req->cdb[6] = req->group_number; 2864 stw_be_p(&req->cdb[7], nb_logical_blocks); 2865 req->cdb[9] = 0; 2866 io_header->cmd_len = 10; 2867 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2868 /* 12-byte CDB */ 2869 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2870 req->cdb[1] = req->cdb1; 2871 stl_be_p(&req->cdb[2], lba); 2872 stl_be_p(&req->cdb[6], nb_logical_blocks); 2873 req->cdb[10] = req->group_number; 2874 req->cdb[11] = 0; 2875 io_header->cmd_len = 12; 2876 } else { 2877 /* 16-byte CDB */ 2878 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2879 req->cdb[1] = req->cdb1; 2880 stq_be_p(&req->cdb[2], lba); 2881 stl_be_p(&req->cdb[10], nb_logical_blocks); 2882 req->cdb[14] = req->group_number; 2883 req->cdb[15] = 0; 2884 io_header->cmd_len = 16; 2885 } 2886 2887 /* The rest is as in scsi-generic.c. */ 2888 io_header->mx_sb_len = sizeof(r->req.sense); 2889 io_header->sbp = r->req.sense; 2890 io_header->timeout = s->qdev.io_timeout * 1000; 2891 io_header->usr_ptr = r; 2892 io_header->flags |= SG_FLAG_DIRECT_IO; 2893 req->cb = cb; 2894 req->cb_opaque = opaque; 2895 trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba, 2896 nb_logical_blocks, io_header->timeout); 2897 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, scsi_block_sgio_complete, req); 2898 assert(aiocb != NULL); 2899 return aiocb; 2900 } 2901 2902 static bool scsi_block_no_fua(SCSICommand *cmd) 2903 { 2904 return false; 2905 } 2906 2907 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2908 QEMUIOVector *iov, 2909 BlockCompletionFunc *cb, void *cb_opaque, 2910 void *opaque) 2911 { 2912 SCSIBlockReq *r = opaque; 2913 return scsi_block_do_sgio(r, offset, iov, 2914 SG_DXFER_FROM_DEV, cb, cb_opaque); 2915 } 2916 2917 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2918 QEMUIOVector *iov, 2919 BlockCompletionFunc *cb, void *cb_opaque, 2920 void *opaque) 2921 { 2922 SCSIBlockReq *r = opaque; 2923 return scsi_block_do_sgio(r, offset, iov, 2924 SG_DXFER_TO_DEV, cb, cb_opaque); 2925 } 2926 2927 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2928 { 2929 switch (buf[0]) { 2930 case VERIFY_10: 2931 case VERIFY_12: 2932 case VERIFY_16: 2933 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2934 * for the number of logical blocks specified in the length 2935 * field). For other modes, do not use scatter/gather operation. 2936 */ 2937 if ((buf[1] & 6) == 2) { 2938 return false; 2939 } 2940 break; 2941 2942 case READ_6: 2943 case READ_10: 2944 case READ_12: 2945 case READ_16: 2946 case WRITE_6: 2947 case WRITE_10: 2948 case WRITE_12: 2949 case WRITE_16: 2950 case WRITE_VERIFY_10: 2951 case WRITE_VERIFY_12: 2952 case WRITE_VERIFY_16: 2953 /* MMC writing cannot be done via DMA helpers, because it sometimes 2954 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2955 * We might use scsi_block_dma_reqops as long as no writing commands are 2956 * seen, but performance usually isn't paramount on optical media. So, 2957 * just make scsi-block operate the same as scsi-generic for them. 2958 */ 2959 if (s->qdev.type != TYPE_ROM) { 2960 return false; 2961 } 2962 break; 2963 2964 default: 2965 break; 2966 } 2967 2968 return true; 2969 } 2970 2971 2972 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2973 { 2974 SCSIBlockReq *r = (SCSIBlockReq *)req; 2975 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2976 2977 r->cmd = req->cmd.buf[0]; 2978 switch (r->cmd >> 5) { 2979 case 0: 2980 /* 6-byte CDB. */ 2981 r->cdb1 = r->group_number = 0; 2982 break; 2983 case 1: 2984 /* 10-byte CDB. */ 2985 r->cdb1 = req->cmd.buf[1]; 2986 r->group_number = req->cmd.buf[6]; 2987 break; 2988 case 4: 2989 /* 12-byte CDB. */ 2990 r->cdb1 = req->cmd.buf[1]; 2991 r->group_number = req->cmd.buf[10]; 2992 break; 2993 case 5: 2994 /* 16-byte CDB. */ 2995 r->cdb1 = req->cmd.buf[1]; 2996 r->group_number = req->cmd.buf[14]; 2997 break; 2998 default: 2999 abort(); 3000 } 3001 3002 /* Protection information is not supported. For SCSI versions 2 and 3003 * older (as determined by snooping the guest's INQUIRY commands), 3004 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 3005 */ 3006 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 3007 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 3008 return 0; 3009 } 3010 3011 return scsi_disk_dma_command(req, buf); 3012 } 3013 3014 static const SCSIReqOps scsi_block_dma_reqops = { 3015 .size = sizeof(SCSIBlockReq), 3016 .free_req = scsi_free_request, 3017 .send_command = scsi_block_dma_command, 3018 .read_data = scsi_read_data, 3019 .write_data = scsi_write_data, 3020 .get_buf = scsi_get_buf, 3021 .load_request = scsi_disk_load_request, 3022 .save_request = scsi_disk_save_request, 3023 }; 3024 3025 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 3026 uint32_t lun, uint8_t *buf, 3027 void *hba_private) 3028 { 3029 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 3030 3031 if (scsi_block_is_passthrough(s, buf)) { 3032 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 3033 hba_private); 3034 } else { 3035 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 3036 hba_private); 3037 } 3038 } 3039 3040 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 3041 uint8_t *buf, size_t buf_len, 3042 void *hba_private) 3043 { 3044 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 3045 3046 if (scsi_block_is_passthrough(s, buf)) { 3047 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, buf_len, hba_private); 3048 } else { 3049 return scsi_req_parse_cdb(&s->qdev, cmd, buf, buf_len); 3050 } 3051 } 3052 3053 static void scsi_block_update_sense(SCSIRequest *req) 3054 { 3055 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 3056 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); 3057 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); 3058 } 3059 #endif 3060 3061 static 3062 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 3063 BlockCompletionFunc *cb, void *cb_opaque, 3064 void *opaque) 3065 { 3066 SCSIDiskReq *r = opaque; 3067 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 3068 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 3069 } 3070 3071 static 3072 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 3073 BlockCompletionFunc *cb, void *cb_opaque, 3074 void *opaque) 3075 { 3076 SCSIDiskReq *r = opaque; 3077 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 3078 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 3079 } 3080 3081 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 3082 { 3083 DeviceClass *dc = DEVICE_CLASS(klass); 3084 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3085 3086 dc->fw_name = "disk"; 3087 dc->reset = scsi_disk_reset; 3088 sdc->dma_readv = scsi_dma_readv; 3089 sdc->dma_writev = scsi_dma_writev; 3090 sdc->need_fua_emulation = scsi_is_cmd_fua; 3091 } 3092 3093 static const TypeInfo scsi_disk_base_info = { 3094 .name = TYPE_SCSI_DISK_BASE, 3095 .parent = TYPE_SCSI_DEVICE, 3096 .class_init = scsi_disk_base_class_initfn, 3097 .instance_size = sizeof(SCSIDiskState), 3098 .class_size = sizeof(SCSIDiskClass), 3099 .abstract = true, 3100 }; 3101 3102 #define DEFINE_SCSI_DISK_PROPERTIES() \ 3103 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ 3104 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ 3105 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3106 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 3107 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 3108 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 3109 DEFINE_PROP_STRING("product", SCSIDiskState, product), \ 3110 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) 3111 3112 3113 static Property scsi_hd_properties[] = { 3114 DEFINE_SCSI_DISK_PROPERTIES(), 3115 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3116 SCSI_DISK_F_REMOVABLE, false), 3117 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3118 SCSI_DISK_F_DPOFUA, false), 3119 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3120 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3121 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3122 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3123 DEFAULT_MAX_UNMAP_SIZE), 3124 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3125 DEFAULT_MAX_IO_SIZE), 3126 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3127 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3128 5), 3129 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState, 3130 quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE, 3131 0), 3132 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 3133 DEFINE_PROP_END_OF_LIST(), 3134 }; 3135 3136 static const VMStateDescription vmstate_scsi_disk_state = { 3137 .name = "scsi-disk", 3138 .version_id = 1, 3139 .minimum_version_id = 1, 3140 .fields = (const VMStateField[]) { 3141 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 3142 VMSTATE_BOOL(media_changed, SCSIDiskState), 3143 VMSTATE_BOOL(media_event, SCSIDiskState), 3144 VMSTATE_BOOL(eject_request, SCSIDiskState), 3145 VMSTATE_BOOL(tray_open, SCSIDiskState), 3146 VMSTATE_BOOL(tray_locked, SCSIDiskState), 3147 VMSTATE_END_OF_LIST() 3148 } 3149 }; 3150 3151 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 3152 { 3153 DeviceClass *dc = DEVICE_CLASS(klass); 3154 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3155 3156 sc->realize = scsi_hd_realize; 3157 sc->unrealize = scsi_unrealize; 3158 sc->alloc_req = scsi_new_request; 3159 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3160 dc->desc = "virtual SCSI disk"; 3161 device_class_set_props(dc, scsi_hd_properties); 3162 dc->vmsd = &vmstate_scsi_disk_state; 3163 } 3164 3165 static const TypeInfo scsi_hd_info = { 3166 .name = "scsi-hd", 3167 .parent = TYPE_SCSI_DISK_BASE, 3168 .class_init = scsi_hd_class_initfn, 3169 }; 3170 3171 static Property scsi_cd_properties[] = { 3172 DEFINE_SCSI_DISK_PROPERTIES(), 3173 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3174 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3175 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3176 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3177 DEFAULT_MAX_IO_SIZE), 3178 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3179 5), 3180 DEFINE_PROP_BIT("quirk_mode_page_apple_vendor", SCSIDiskState, quirks, 3181 SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR, 0), 3182 DEFINE_PROP_BIT("quirk_mode_sense_rom_use_dbd", SCSIDiskState, quirks, 3183 SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD, 0), 3184 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState, 3185 quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE, 3186 0), 3187 DEFINE_PROP_BIT("quirk_mode_page_truncated", SCSIDiskState, quirks, 3188 SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED, 0), 3189 DEFINE_PROP_END_OF_LIST(), 3190 }; 3191 3192 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3193 { 3194 DeviceClass *dc = DEVICE_CLASS(klass); 3195 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3196 3197 sc->realize = scsi_cd_realize; 3198 sc->alloc_req = scsi_new_request; 3199 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3200 dc->desc = "virtual SCSI CD-ROM"; 3201 device_class_set_props(dc, scsi_cd_properties); 3202 dc->vmsd = &vmstate_scsi_disk_state; 3203 } 3204 3205 static const TypeInfo scsi_cd_info = { 3206 .name = "scsi-cd", 3207 .parent = TYPE_SCSI_DISK_BASE, 3208 .class_init = scsi_cd_class_initfn, 3209 }; 3210 3211 #ifdef __linux__ 3212 static Property scsi_block_properties[] = { 3213 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), 3214 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3215 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3216 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3217 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3218 DEFAULT_MAX_UNMAP_SIZE), 3219 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3220 DEFAULT_MAX_IO_SIZE), 3221 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3222 -1), 3223 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout, 3224 DEFAULT_IO_TIMEOUT), 3225 DEFINE_PROP_END_OF_LIST(), 3226 }; 3227 3228 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3229 { 3230 DeviceClass *dc = DEVICE_CLASS(klass); 3231 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3232 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3233 3234 sc->realize = scsi_block_realize; 3235 sc->alloc_req = scsi_block_new_request; 3236 sc->parse_cdb = scsi_block_parse_cdb; 3237 sdc->dma_readv = scsi_block_dma_readv; 3238 sdc->dma_writev = scsi_block_dma_writev; 3239 sdc->update_sense = scsi_block_update_sense; 3240 sdc->need_fua_emulation = scsi_block_no_fua; 3241 dc->desc = "SCSI block device passthrough"; 3242 device_class_set_props(dc, scsi_block_properties); 3243 dc->vmsd = &vmstate_scsi_disk_state; 3244 } 3245 3246 static const TypeInfo scsi_block_info = { 3247 .name = "scsi-block", 3248 .parent = TYPE_SCSI_DISK_BASE, 3249 .class_init = scsi_block_class_initfn, 3250 }; 3251 #endif 3252 3253 static void scsi_disk_register_types(void) 3254 { 3255 type_register_static(&scsi_disk_base_info); 3256 type_register_static(&scsi_hd_info); 3257 type_register_static(&scsi_cd_info); 3258 #ifdef __linux__ 3259 type_register_static(&scsi_block_info); 3260 #endif 3261 } 3262 3263 type_init(scsi_disk_register_types) 3264