1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/units.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "qemu/main-loop.h" 27 #include "qemu/module.h" 28 #include "qemu/hw-version.h" 29 #include "qemu/memalign.h" 30 #include "hw/scsi/scsi.h" 31 #include "migration/qemu-file-types.h" 32 #include "migration/vmstate.h" 33 #include "hw/scsi/emulation.h" 34 #include "scsi/constants.h" 35 #include "sysemu/block-backend.h" 36 #include "sysemu/blockdev.h" 37 #include "hw/block/block.h" 38 #include "hw/qdev-properties.h" 39 #include "hw/qdev-properties-system.h" 40 #include "sysemu/dma.h" 41 #include "sysemu/sysemu.h" 42 #include "qemu/cutils.h" 43 #include "trace.h" 44 #include "qom/object.h" 45 46 #ifdef __linux 47 #include <scsi/sg.h> 48 #endif 49 50 #define SCSI_WRITE_SAME_MAX (512 * KiB) 51 #define SCSI_DMA_BUF_SIZE (128 * KiB) 52 #define SCSI_MAX_INQUIRY_LEN 256 53 #define SCSI_MAX_MODE_LEN 256 54 55 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 56 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 57 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 58 59 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 60 61 #define MAX_SERIAL_LEN 36 62 #define MAX_SERIAL_LEN_FOR_DEVID 20 63 64 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE) 65 66 struct SCSIDiskClass { 67 SCSIDeviceClass parent_class; 68 DMAIOFunc *dma_readv; 69 DMAIOFunc *dma_writev; 70 bool (*need_fua_emulation)(SCSICommand *cmd); 71 void (*update_sense)(SCSIRequest *r); 72 }; 73 74 typedef struct SCSIDiskReq { 75 SCSIRequest req; 76 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */ 77 uint64_t sector; 78 uint32_t sector_count; 79 uint32_t buflen; 80 bool started; 81 bool need_fua_emulation; 82 struct iovec iov; 83 QEMUIOVector qiov; 84 BlockAcctCookie acct; 85 } SCSIDiskReq; 86 87 #define SCSI_DISK_F_REMOVABLE 0 88 #define SCSI_DISK_F_DPOFUA 1 89 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 90 91 struct SCSIDiskState { 92 SCSIDevice qdev; 93 uint32_t features; 94 bool media_changed; 95 bool media_event; 96 bool eject_request; 97 uint16_t port_index; 98 uint64_t max_unmap_size; 99 uint64_t max_io_size; 100 uint32_t quirks; 101 QEMUBH *bh; 102 char *version; 103 char *serial; 104 char *vendor; 105 char *product; 106 char *device_id; 107 bool tray_open; 108 bool tray_locked; 109 /* 110 * 0x0000 - rotation rate not reported 111 * 0x0001 - non-rotating medium (SSD) 112 * 0x0002-0x0400 - reserved 113 * 0x0401-0xffe - rotations per minute 114 * 0xffff - reserved 115 */ 116 uint16_t rotation_rate; 117 bool migrate_emulated_scsi_request; 118 }; 119 120 static void scsi_free_request(SCSIRequest *req) 121 { 122 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 123 124 qemu_vfree(r->iov.iov_base); 125 } 126 127 /* Helper function for command completion with sense. */ 128 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 129 { 130 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, 131 sense.ascq); 132 scsi_req_build_sense(&r->req, sense); 133 scsi_req_complete(&r->req, CHECK_CONDITION); 134 } 135 136 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 137 { 138 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 139 140 if (!r->iov.iov_base) { 141 r->buflen = size; 142 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 143 } 144 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen); 145 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 146 } 147 148 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 149 { 150 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 151 152 qemu_put_be64s(f, &r->sector); 153 qemu_put_be32s(f, &r->sector_count); 154 qemu_put_be32s(f, &r->buflen); 155 if (r->buflen) { 156 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 157 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 158 } else if (!req->retry) { 159 uint32_t len = r->iov.iov_len; 160 qemu_put_be32s(f, &len); 161 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 162 } 163 } 164 } 165 166 static void scsi_disk_emulate_save_request(QEMUFile *f, SCSIRequest *req) 167 { 168 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 169 170 if (s->migrate_emulated_scsi_request) { 171 scsi_disk_save_request(f, req); 172 } 173 } 174 175 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 176 { 177 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 178 179 qemu_get_be64s(f, &r->sector); 180 qemu_get_be32s(f, &r->sector_count); 181 qemu_get_be32s(f, &r->buflen); 182 if (r->buflen) { 183 scsi_init_iovec(r, r->buflen); 184 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 185 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 186 } else if (!r->req.retry) { 187 uint32_t len; 188 qemu_get_be32s(f, &len); 189 r->iov.iov_len = len; 190 assert(r->iov.iov_len <= r->buflen); 191 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 192 } 193 } 194 195 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 196 } 197 198 static void scsi_disk_emulate_load_request(QEMUFile *f, SCSIRequest *req) 199 { 200 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 201 202 if (s->migrate_emulated_scsi_request) { 203 scsi_disk_load_request(f, req); 204 } 205 } 206 207 /* 208 * scsi_handle_rw_error has two return values. False means that the error 209 * must be ignored, true means that the error has been processed and the 210 * caller should not do anything else for this request. Note that 211 * scsi_handle_rw_error always manages its reference counts, independent 212 * of the return value. 213 */ 214 static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed) 215 { 216 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 217 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 218 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 219 SCSISense sense = SENSE_CODE(NO_SENSE); 220 int error = 0; 221 bool req_has_sense = false; 222 BlockErrorAction action; 223 int status; 224 225 if (ret < 0) { 226 status = scsi_sense_from_errno(-ret, &sense); 227 error = -ret; 228 } else { 229 /* A passthrough command has completed with nonzero status. */ 230 status = ret; 231 if (status == CHECK_CONDITION) { 232 req_has_sense = true; 233 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 234 } else { 235 error = EINVAL; 236 } 237 } 238 239 /* 240 * Check whether the error has to be handled by the guest or should 241 * rather follow the rerror=/werror= settings. Guest-handled errors 242 * are usually retried immediately, so do not post them to QMP and 243 * do not account them as failed I/O. 244 */ 245 if (req_has_sense && 246 scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { 247 action = BLOCK_ERROR_ACTION_REPORT; 248 acct_failed = false; 249 } else { 250 action = blk_get_error_action(s->qdev.conf.blk, is_read, error); 251 blk_error_action(s->qdev.conf.blk, action, is_read, error); 252 } 253 254 switch (action) { 255 case BLOCK_ERROR_ACTION_REPORT: 256 if (acct_failed) { 257 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 258 } 259 if (req_has_sense) { 260 sdc->update_sense(&r->req); 261 } else if (status == CHECK_CONDITION) { 262 scsi_req_build_sense(&r->req, sense); 263 } 264 scsi_req_complete(&r->req, status); 265 return true; 266 267 case BLOCK_ERROR_ACTION_IGNORE: 268 return false; 269 270 case BLOCK_ERROR_ACTION_STOP: 271 scsi_req_retry(&r->req); 272 return true; 273 274 default: 275 g_assert_not_reached(); 276 } 277 } 278 279 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 280 { 281 if (r->req.io_canceled) { 282 scsi_req_cancel_complete(&r->req); 283 return true; 284 } 285 286 if (ret < 0) { 287 return scsi_handle_rw_error(r, ret, acct_failed); 288 } 289 290 return false; 291 } 292 293 static void scsi_aio_complete(void *opaque, int ret) 294 { 295 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 296 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 297 298 /* The request must only run in the BlockBackend's AioContext */ 299 assert(blk_get_aio_context(s->qdev.conf.blk) == 300 qemu_get_current_aio_context()); 301 302 assert(r->req.aiocb != NULL); 303 r->req.aiocb = NULL; 304 305 if (scsi_disk_req_check_error(r, ret, true)) { 306 goto done; 307 } 308 309 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 310 scsi_req_complete(&r->req, GOOD); 311 312 done: 313 scsi_req_unref(&r->req); 314 } 315 316 static bool scsi_is_cmd_fua(SCSICommand *cmd) 317 { 318 switch (cmd->buf[0]) { 319 case READ_10: 320 case READ_12: 321 case READ_16: 322 case WRITE_10: 323 case WRITE_12: 324 case WRITE_16: 325 return (cmd->buf[1] & 8) != 0; 326 327 case VERIFY_10: 328 case VERIFY_12: 329 case VERIFY_16: 330 case WRITE_VERIFY_10: 331 case WRITE_VERIFY_12: 332 case WRITE_VERIFY_16: 333 return true; 334 335 case READ_6: 336 case WRITE_6: 337 default: 338 return false; 339 } 340 } 341 342 static void scsi_write_do_fua(SCSIDiskReq *r) 343 { 344 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 345 346 assert(r->req.aiocb == NULL); 347 assert(!r->req.io_canceled); 348 349 if (r->need_fua_emulation) { 350 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 351 BLOCK_ACCT_FLUSH); 352 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 353 return; 354 } 355 356 scsi_req_complete(&r->req, GOOD); 357 scsi_req_unref(&r->req); 358 } 359 360 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 361 { 362 assert(r->req.aiocb == NULL); 363 if (scsi_disk_req_check_error(r, ret, false)) { 364 goto done; 365 } 366 367 r->sector += r->sector_count; 368 r->sector_count = 0; 369 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 370 scsi_write_do_fua(r); 371 return; 372 } else { 373 scsi_req_complete(&r->req, GOOD); 374 } 375 376 done: 377 scsi_req_unref(&r->req); 378 } 379 380 static void scsi_dma_complete(void *opaque, int ret) 381 { 382 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 383 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 384 385 assert(r->req.aiocb != NULL); 386 r->req.aiocb = NULL; 387 388 if (ret < 0) { 389 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 390 } else { 391 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 392 } 393 scsi_dma_complete_noio(r, ret); 394 } 395 396 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) 397 { 398 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 399 uint32_t n; 400 401 /* The request must only run in the BlockBackend's AioContext */ 402 assert(blk_get_aio_context(s->qdev.conf.blk) == 403 qemu_get_current_aio_context()); 404 405 assert(r->req.aiocb == NULL); 406 if (scsi_disk_req_check_error(r, ret, false)) { 407 goto done; 408 } 409 410 n = r->qiov.size / BDRV_SECTOR_SIZE; 411 r->sector += n; 412 r->sector_count -= n; 413 scsi_req_data(&r->req, r->qiov.size); 414 415 done: 416 scsi_req_unref(&r->req); 417 } 418 419 static void scsi_read_complete(void *opaque, int ret) 420 { 421 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 422 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 423 424 assert(r->req.aiocb != NULL); 425 r->req.aiocb = NULL; 426 427 if (ret < 0) { 428 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 429 } else { 430 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 431 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); 432 } 433 scsi_read_complete_noio(r, ret); 434 } 435 436 /* Actually issue a read to the block device. */ 437 static void scsi_do_read(SCSIDiskReq *r, int ret) 438 { 439 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 440 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 441 442 assert (r->req.aiocb == NULL); 443 if (scsi_disk_req_check_error(r, ret, false)) { 444 goto done; 445 } 446 447 /* The request is used as the AIO opaque value, so add a ref. */ 448 scsi_req_ref(&r->req); 449 450 if (r->req.sg) { 451 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 452 r->req.residual -= r->req.sg->size; 453 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 454 r->req.sg, r->sector << BDRV_SECTOR_BITS, 455 BDRV_SECTOR_SIZE, 456 sdc->dma_readv, r, scsi_dma_complete, r, 457 DMA_DIRECTION_FROM_DEVICE); 458 } else { 459 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 460 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 461 r->qiov.size, BLOCK_ACCT_READ); 462 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 463 scsi_read_complete, r, r); 464 } 465 466 done: 467 scsi_req_unref(&r->req); 468 } 469 470 static void scsi_do_read_cb(void *opaque, int ret) 471 { 472 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 473 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 474 475 assert (r->req.aiocb != NULL); 476 r->req.aiocb = NULL; 477 478 if (ret < 0) { 479 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 480 } else { 481 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 482 } 483 scsi_do_read(opaque, ret); 484 } 485 486 /* Read more data from scsi device into buffer. */ 487 static void scsi_read_data(SCSIRequest *req) 488 { 489 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 490 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 491 bool first; 492 493 trace_scsi_disk_read_data_count(r->sector_count); 494 if (r->sector_count == 0) { 495 /* This also clears the sense buffer for REQUEST SENSE. */ 496 scsi_req_complete(&r->req, GOOD); 497 return; 498 } 499 500 /* No data transfer may already be in progress */ 501 assert(r->req.aiocb == NULL); 502 503 /* The request is used as the AIO opaque value, so add a ref. */ 504 scsi_req_ref(&r->req); 505 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 506 trace_scsi_disk_read_data_invalid(); 507 scsi_read_complete_noio(r, -EINVAL); 508 return; 509 } 510 511 if (!blk_is_available(req->dev->conf.blk)) { 512 scsi_read_complete_noio(r, -ENOMEDIUM); 513 return; 514 } 515 516 first = !r->started; 517 r->started = true; 518 if (first && r->need_fua_emulation) { 519 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 520 BLOCK_ACCT_FLUSH); 521 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 522 } else { 523 scsi_do_read(r, 0); 524 } 525 } 526 527 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 528 { 529 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 530 uint32_t n; 531 532 /* The request must only run in the BlockBackend's AioContext */ 533 assert(blk_get_aio_context(s->qdev.conf.blk) == 534 qemu_get_current_aio_context()); 535 536 assert (r->req.aiocb == NULL); 537 if (scsi_disk_req_check_error(r, ret, false)) { 538 goto done; 539 } 540 541 n = r->qiov.size / BDRV_SECTOR_SIZE; 542 r->sector += n; 543 r->sector_count -= n; 544 if (r->sector_count == 0) { 545 scsi_write_do_fua(r); 546 return; 547 } else { 548 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 549 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); 550 scsi_req_data(&r->req, r->qiov.size); 551 } 552 553 done: 554 scsi_req_unref(&r->req); 555 } 556 557 static void scsi_write_complete(void * opaque, int ret) 558 { 559 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 560 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 561 562 assert (r->req.aiocb != NULL); 563 r->req.aiocb = NULL; 564 565 if (ret < 0) { 566 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 567 } else { 568 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 569 } 570 scsi_write_complete_noio(r, ret); 571 } 572 573 static void scsi_write_data(SCSIRequest *req) 574 { 575 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 576 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 577 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 578 579 /* No data transfer may already be in progress */ 580 assert(r->req.aiocb == NULL); 581 582 /* The request is used as the AIO opaque value, so add a ref. */ 583 scsi_req_ref(&r->req); 584 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 585 trace_scsi_disk_write_data_invalid(); 586 scsi_write_complete_noio(r, -EINVAL); 587 return; 588 } 589 590 if (!r->req.sg && !r->qiov.size) { 591 /* Called for the first time. Ask the driver to send us more data. */ 592 r->started = true; 593 scsi_write_complete_noio(r, 0); 594 return; 595 } 596 if (!blk_is_available(req->dev->conf.blk)) { 597 scsi_write_complete_noio(r, -ENOMEDIUM); 598 return; 599 } 600 601 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 602 r->req.cmd.buf[0] == VERIFY_16) { 603 if (r->req.sg) { 604 scsi_dma_complete_noio(r, 0); 605 } else { 606 scsi_write_complete_noio(r, 0); 607 } 608 return; 609 } 610 611 if (r->req.sg) { 612 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 613 r->req.residual -= r->req.sg->size; 614 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 615 r->req.sg, r->sector << BDRV_SECTOR_BITS, 616 BDRV_SECTOR_SIZE, 617 sdc->dma_writev, r, scsi_dma_complete, r, 618 DMA_DIRECTION_TO_DEVICE); 619 } else { 620 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 621 r->qiov.size, BLOCK_ACCT_WRITE); 622 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 623 scsi_write_complete, r, r); 624 } 625 } 626 627 /* Return a pointer to the data buffer. */ 628 static uint8_t *scsi_get_buf(SCSIRequest *req) 629 { 630 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 631 632 return (uint8_t *)r->iov.iov_base; 633 } 634 635 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 636 { 637 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 638 uint8_t page_code = req->cmd.buf[2]; 639 int start, buflen = 0; 640 641 outbuf[buflen++] = s->qdev.type & 0x1f; 642 outbuf[buflen++] = page_code; 643 outbuf[buflen++] = 0x00; 644 outbuf[buflen++] = 0x00; 645 start = buflen; 646 647 switch (page_code) { 648 case 0x00: /* Supported page codes, mandatory */ 649 { 650 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); 651 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 652 if (s->serial) { 653 outbuf[buflen++] = 0x80; /* unit serial number */ 654 } 655 outbuf[buflen++] = 0x83; /* device identification */ 656 if (s->qdev.type == TYPE_DISK) { 657 outbuf[buflen++] = 0xb0; /* block limits */ 658 outbuf[buflen++] = 0xb1; /* block device characteristics */ 659 outbuf[buflen++] = 0xb2; /* thin provisioning */ 660 } 661 break; 662 } 663 case 0x80: /* Device serial number, optional */ 664 { 665 int l; 666 667 if (!s->serial) { 668 trace_scsi_disk_emulate_vpd_page_80_not_supported(); 669 return -1; 670 } 671 672 l = strlen(s->serial); 673 if (l > MAX_SERIAL_LEN) { 674 l = MAX_SERIAL_LEN; 675 } 676 677 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); 678 memcpy(outbuf + buflen, s->serial, l); 679 buflen += l; 680 break; 681 } 682 683 case 0x83: /* Device identification page, mandatory */ 684 { 685 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; 686 687 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); 688 689 if (id_len) { 690 outbuf[buflen++] = 0x2; /* ASCII */ 691 outbuf[buflen++] = 0; /* not officially assigned */ 692 outbuf[buflen++] = 0; /* reserved */ 693 outbuf[buflen++] = id_len; /* length of data following */ 694 memcpy(outbuf + buflen, s->device_id, id_len); 695 buflen += id_len; 696 } 697 698 if (s->qdev.wwn) { 699 outbuf[buflen++] = 0x1; /* Binary */ 700 outbuf[buflen++] = 0x3; /* NAA */ 701 outbuf[buflen++] = 0; /* reserved */ 702 outbuf[buflen++] = 8; 703 stq_be_p(&outbuf[buflen], s->qdev.wwn); 704 buflen += 8; 705 } 706 707 if (s->qdev.port_wwn) { 708 outbuf[buflen++] = 0x61; /* SAS / Binary */ 709 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 710 outbuf[buflen++] = 0; /* reserved */ 711 outbuf[buflen++] = 8; 712 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 713 buflen += 8; 714 } 715 716 if (s->port_index) { 717 outbuf[buflen++] = 0x61; /* SAS / Binary */ 718 719 /* PIV/Target port/relative target port */ 720 outbuf[buflen++] = 0x94; 721 722 outbuf[buflen++] = 0; /* reserved */ 723 outbuf[buflen++] = 4; 724 stw_be_p(&outbuf[buflen + 2], s->port_index); 725 buflen += 4; 726 } 727 break; 728 } 729 case 0xb0: /* block limits */ 730 { 731 SCSIBlockLimits bl = {}; 732 733 if (s->qdev.type == TYPE_ROM) { 734 trace_scsi_disk_emulate_vpd_page_b0_not_supported(); 735 return -1; 736 } 737 bl.wsnz = 1; 738 bl.unmap_sectors = 739 s->qdev.conf.discard_granularity / s->qdev.blocksize; 740 bl.min_io_size = 741 s->qdev.conf.min_io_size / s->qdev.blocksize; 742 bl.opt_io_size = 743 s->qdev.conf.opt_io_size / s->qdev.blocksize; 744 bl.max_unmap_sectors = 745 s->max_unmap_size / s->qdev.blocksize; 746 bl.max_io_sectors = 747 s->max_io_size / s->qdev.blocksize; 748 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 749 bl.max_unmap_descr = 255; 750 751 if (s->qdev.type == TYPE_DISK) { 752 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 753 int max_io_sectors_blk = 754 max_transfer_blk / s->qdev.blocksize; 755 756 bl.max_io_sectors = 757 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 758 } 759 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 760 break; 761 } 762 case 0xb1: /* block device characteristics */ 763 { 764 buflen = 0x40; 765 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 766 outbuf[5] = s->rotation_rate & 0xff; 767 outbuf[6] = 0; /* PRODUCT TYPE */ 768 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 769 outbuf[8] = 0; /* VBULS */ 770 break; 771 } 772 case 0xb2: /* thin provisioning */ 773 { 774 buflen = 8; 775 outbuf[4] = 0; 776 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 777 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 778 outbuf[7] = 0; 779 break; 780 } 781 default: 782 return -1; 783 } 784 /* done with EVPD */ 785 assert(buflen - start <= 255); 786 outbuf[start - 1] = buflen - start; 787 return buflen; 788 } 789 790 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 791 { 792 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 793 int buflen = 0; 794 795 if (req->cmd.buf[1] & 0x1) { 796 /* Vital product data */ 797 return scsi_disk_emulate_vpd_page(req, outbuf); 798 } 799 800 /* Standard INQUIRY data */ 801 if (req->cmd.buf[2] != 0) { 802 return -1; 803 } 804 805 /* PAGE CODE == 0 */ 806 buflen = req->cmd.xfer; 807 if (buflen > SCSI_MAX_INQUIRY_LEN) { 808 buflen = SCSI_MAX_INQUIRY_LEN; 809 } 810 811 outbuf[0] = s->qdev.type & 0x1f; 812 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 813 814 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 815 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 816 817 memset(&outbuf[32], 0, 4); 818 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 819 /* 820 * We claim conformance to SPC-3, which is required for guests 821 * to ask for modern features like READ CAPACITY(16) or the 822 * block characteristics VPD page by default. Not all of SPC-3 823 * is actually implemented, but we're good enough. 824 */ 825 outbuf[2] = s->qdev.default_scsi_version; 826 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 827 828 if (buflen > 36) { 829 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 830 } else { 831 /* If the allocation length of CDB is too small, 832 the additional length is not adjusted */ 833 outbuf[4] = 36 - 5; 834 } 835 836 /* Sync data transfer and TCQ. */ 837 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 838 return buflen; 839 } 840 841 static inline bool media_is_dvd(SCSIDiskState *s) 842 { 843 uint64_t nb_sectors; 844 if (s->qdev.type != TYPE_ROM) { 845 return false; 846 } 847 if (!blk_is_available(s->qdev.conf.blk)) { 848 return false; 849 } 850 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 851 return nb_sectors > CD_MAX_SECTORS; 852 } 853 854 static inline bool media_is_cd(SCSIDiskState *s) 855 { 856 uint64_t nb_sectors; 857 if (s->qdev.type != TYPE_ROM) { 858 return false; 859 } 860 if (!blk_is_available(s->qdev.conf.blk)) { 861 return false; 862 } 863 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 864 return nb_sectors <= CD_MAX_SECTORS; 865 } 866 867 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 868 uint8_t *outbuf) 869 { 870 uint8_t type = r->req.cmd.buf[1] & 7; 871 872 if (s->qdev.type != TYPE_ROM) { 873 return -1; 874 } 875 876 /* Types 1/2 are only defined for Blu-Ray. */ 877 if (type != 0) { 878 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 879 return -1; 880 } 881 882 memset(outbuf, 0, 34); 883 outbuf[1] = 32; 884 outbuf[2] = 0xe; /* last session complete, disc finalized */ 885 outbuf[3] = 1; /* first track on disc */ 886 outbuf[4] = 1; /* # of sessions */ 887 outbuf[5] = 1; /* first track of last session */ 888 outbuf[6] = 1; /* last track of last session */ 889 outbuf[7] = 0x20; /* unrestricted use */ 890 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 891 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 892 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 893 /* 24-31: disc bar code */ 894 /* 32: disc application code */ 895 /* 33: number of OPC tables */ 896 897 return 34; 898 } 899 900 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 901 uint8_t *outbuf) 902 { 903 static const int rds_caps_size[5] = { 904 [0] = 2048 + 4, 905 [1] = 4 + 4, 906 [3] = 188 + 4, 907 [4] = 2048 + 4, 908 }; 909 910 uint8_t media = r->req.cmd.buf[1]; 911 uint8_t layer = r->req.cmd.buf[6]; 912 uint8_t format = r->req.cmd.buf[7]; 913 int size = -1; 914 915 if (s->qdev.type != TYPE_ROM) { 916 return -1; 917 } 918 if (media != 0) { 919 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 920 return -1; 921 } 922 923 if (format != 0xff) { 924 if (!blk_is_available(s->qdev.conf.blk)) { 925 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 926 return -1; 927 } 928 if (media_is_cd(s)) { 929 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 930 return -1; 931 } 932 if (format >= ARRAY_SIZE(rds_caps_size)) { 933 return -1; 934 } 935 size = rds_caps_size[format]; 936 memset(outbuf, 0, size); 937 } 938 939 switch (format) { 940 case 0x00: { 941 /* Physical format information */ 942 uint64_t nb_sectors; 943 if (layer != 0) { 944 goto fail; 945 } 946 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 947 948 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 949 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 950 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 951 outbuf[7] = 0; /* default densities */ 952 953 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 954 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 955 break; 956 } 957 958 case 0x01: /* DVD copyright information, all zeros */ 959 break; 960 961 case 0x03: /* BCA information - invalid field for no BCA info */ 962 return -1; 963 964 case 0x04: /* DVD disc manufacturing information, all zeros */ 965 break; 966 967 case 0xff: { /* List capabilities */ 968 int i; 969 size = 4; 970 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 971 if (!rds_caps_size[i]) { 972 continue; 973 } 974 outbuf[size] = i; 975 outbuf[size + 1] = 0x40; /* Not writable, readable */ 976 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 977 size += 4; 978 } 979 break; 980 } 981 982 default: 983 return -1; 984 } 985 986 /* Size of buffer, not including 2 byte size field */ 987 stw_be_p(outbuf, size - 2); 988 return size; 989 990 fail: 991 return -1; 992 } 993 994 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 995 { 996 uint8_t event_code, media_status; 997 998 media_status = 0; 999 if (s->tray_open) { 1000 media_status = MS_TRAY_OPEN; 1001 } else if (blk_is_inserted(s->qdev.conf.blk)) { 1002 media_status = MS_MEDIA_PRESENT; 1003 } 1004 1005 /* Event notification descriptor */ 1006 event_code = MEC_NO_CHANGE; 1007 if (media_status != MS_TRAY_OPEN) { 1008 if (s->media_event) { 1009 event_code = MEC_NEW_MEDIA; 1010 s->media_event = false; 1011 } else if (s->eject_request) { 1012 event_code = MEC_EJECT_REQUESTED; 1013 s->eject_request = false; 1014 } 1015 } 1016 1017 outbuf[0] = event_code; 1018 outbuf[1] = media_status; 1019 1020 /* These fields are reserved, just clear them. */ 1021 outbuf[2] = 0; 1022 outbuf[3] = 0; 1023 return 4; 1024 } 1025 1026 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 1027 uint8_t *outbuf) 1028 { 1029 int size; 1030 uint8_t *buf = r->req.cmd.buf; 1031 uint8_t notification_class_request = buf[4]; 1032 if (s->qdev.type != TYPE_ROM) { 1033 return -1; 1034 } 1035 if ((buf[1] & 1) == 0) { 1036 /* asynchronous */ 1037 return -1; 1038 } 1039 1040 size = 4; 1041 outbuf[0] = outbuf[1] = 0; 1042 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1043 if (notification_class_request & (1 << GESN_MEDIA)) { 1044 outbuf[2] = GESN_MEDIA; 1045 size += scsi_event_status_media(s, &outbuf[size]); 1046 } else { 1047 outbuf[2] = 0x80; 1048 } 1049 stw_be_p(outbuf, size - 4); 1050 return size; 1051 } 1052 1053 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1054 { 1055 int current; 1056 1057 if (s->qdev.type != TYPE_ROM) { 1058 return -1; 1059 } 1060 1061 if (media_is_dvd(s)) { 1062 current = MMC_PROFILE_DVD_ROM; 1063 } else if (media_is_cd(s)) { 1064 current = MMC_PROFILE_CD_ROM; 1065 } else { 1066 current = MMC_PROFILE_NONE; 1067 } 1068 1069 memset(outbuf, 0, 40); 1070 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1071 stw_be_p(&outbuf[6], current); 1072 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1073 outbuf[10] = 0x03; /* persistent, current */ 1074 outbuf[11] = 8; /* two profiles */ 1075 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1076 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1077 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1078 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1079 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1080 stw_be_p(&outbuf[20], 1); 1081 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1082 outbuf[23] = 8; 1083 stl_be_p(&outbuf[24], 1); /* SCSI */ 1084 outbuf[28] = 1; /* DBE = 1, mandatory */ 1085 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1086 stw_be_p(&outbuf[32], 3); 1087 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1088 outbuf[35] = 4; 1089 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1090 /* TODO: Random readable, CD read, DVD read, drive serial number, 1091 power management */ 1092 return 40; 1093 } 1094 1095 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1096 { 1097 if (s->qdev.type != TYPE_ROM) { 1098 return -1; 1099 } 1100 memset(outbuf, 0, 8); 1101 outbuf[5] = 1; /* CD-ROM */ 1102 return 8; 1103 } 1104 1105 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1106 int page_control) 1107 { 1108 static const int mode_sense_valid[0x3f] = { 1109 [MODE_PAGE_VENDOR_SPECIFIC] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1110 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1111 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1112 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1113 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1114 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1115 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1116 [MODE_PAGE_APPLE_VENDOR] = (1 << TYPE_ROM), 1117 }; 1118 1119 uint8_t *p = *p_outbuf + 2; 1120 int length; 1121 1122 assert(page < ARRAY_SIZE(mode_sense_valid)); 1123 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1124 return -1; 1125 } 1126 1127 /* 1128 * If Changeable Values are requested, a mask denoting those mode parameters 1129 * that are changeable shall be returned. As we currently don't support 1130 * parameter changes via MODE_SELECT all bits are returned set to zero. 1131 * The buffer was already menset to zero by the caller of this function. 1132 * 1133 * The offsets here are off by two compared to the descriptions in the 1134 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1135 * but it is done so that offsets are consistent within our implementation 1136 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1137 * 2-byte and 4-byte headers. 1138 */ 1139 switch (page) { 1140 case MODE_PAGE_HD_GEOMETRY: 1141 length = 0x16; 1142 if (page_control == 1) { /* Changeable Values */ 1143 break; 1144 } 1145 /* if a geometry hint is available, use it */ 1146 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1147 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1148 p[2] = s->qdev.conf.cyls & 0xff; 1149 p[3] = s->qdev.conf.heads & 0xff; 1150 /* Write precomp start cylinder, disabled */ 1151 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1152 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1153 p[6] = s->qdev.conf.cyls & 0xff; 1154 /* Reduced current start cylinder, disabled */ 1155 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1156 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1157 p[9] = s->qdev.conf.cyls & 0xff; 1158 /* Device step rate [ns], 200ns */ 1159 p[10] = 0; 1160 p[11] = 200; 1161 /* Landing zone cylinder */ 1162 p[12] = 0xff; 1163 p[13] = 0xff; 1164 p[14] = 0xff; 1165 /* Medium rotation rate [rpm], 5400 rpm */ 1166 p[18] = (5400 >> 8) & 0xff; 1167 p[19] = 5400 & 0xff; 1168 break; 1169 1170 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1171 length = 0x1e; 1172 if (page_control == 1) { /* Changeable Values */ 1173 break; 1174 } 1175 /* Transfer rate [kbit/s], 5Mbit/s */ 1176 p[0] = 5000 >> 8; 1177 p[1] = 5000 & 0xff; 1178 /* if a geometry hint is available, use it */ 1179 p[2] = s->qdev.conf.heads & 0xff; 1180 p[3] = s->qdev.conf.secs & 0xff; 1181 p[4] = s->qdev.blocksize >> 8; 1182 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1183 p[7] = s->qdev.conf.cyls & 0xff; 1184 /* Write precomp start cylinder, disabled */ 1185 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1186 p[9] = s->qdev.conf.cyls & 0xff; 1187 /* Reduced current start cylinder, disabled */ 1188 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1189 p[11] = s->qdev.conf.cyls & 0xff; 1190 /* Device step rate [100us], 100us */ 1191 p[12] = 0; 1192 p[13] = 1; 1193 /* Device step pulse width [us], 1us */ 1194 p[14] = 1; 1195 /* Device head settle delay [100us], 100us */ 1196 p[15] = 0; 1197 p[16] = 1; 1198 /* Motor on delay [0.1s], 0.1s */ 1199 p[17] = 1; 1200 /* Motor off delay [0.1s], 0.1s */ 1201 p[18] = 1; 1202 /* Medium rotation rate [rpm], 5400 rpm */ 1203 p[26] = (5400 >> 8) & 0xff; 1204 p[27] = 5400 & 0xff; 1205 break; 1206 1207 case MODE_PAGE_CACHING: 1208 length = 0x12; 1209 if (page_control == 1 || /* Changeable Values */ 1210 blk_enable_write_cache(s->qdev.conf.blk)) { 1211 p[0] = 4; /* WCE */ 1212 } 1213 break; 1214 1215 case MODE_PAGE_R_W_ERROR: 1216 length = 10; 1217 if (page_control == 1) { /* Changeable Values */ 1218 if (s->qdev.type == TYPE_ROM) { 1219 /* Automatic Write Reallocation Enabled */ 1220 p[0] = 0x80; 1221 } 1222 break; 1223 } 1224 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1225 if (s->qdev.type == TYPE_ROM) { 1226 p[1] = 0x20; /* Read Retry Count */ 1227 } 1228 break; 1229 1230 case MODE_PAGE_AUDIO_CTL: 1231 length = 14; 1232 break; 1233 1234 case MODE_PAGE_CAPABILITIES: 1235 length = 0x14; 1236 if (page_control == 1) { /* Changeable Values */ 1237 break; 1238 } 1239 1240 p[0] = 0x3b; /* CD-R & CD-RW read */ 1241 p[1] = 0; /* Writing not supported */ 1242 p[2] = 0x7f; /* Audio, composite, digital out, 1243 mode 2 form 1&2, multi session */ 1244 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1245 RW corrected, C2 errors, ISRC, 1246 UPC, Bar code */ 1247 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1248 /* Locking supported, jumper present, eject, tray */ 1249 p[5] = 0; /* no volume & mute control, no 1250 changer */ 1251 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1252 p[7] = (50 * 176) & 0xff; 1253 p[8] = 2 >> 8; /* Two volume levels */ 1254 p[9] = 2 & 0xff; 1255 p[10] = 2048 >> 8; /* 2M buffer */ 1256 p[11] = 2048 & 0xff; 1257 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1258 p[13] = (16 * 176) & 0xff; 1259 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1260 p[17] = (16 * 176) & 0xff; 1261 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1262 p[19] = (16 * 176) & 0xff; 1263 break; 1264 1265 case MODE_PAGE_APPLE_VENDOR: 1266 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR)) { 1267 length = 0x1e; 1268 if (page_control == 1) { /* Changeable Values */ 1269 break; 1270 } 1271 1272 memset(p, 0, length); 1273 strcpy((char *)p + 8, "APPLE COMPUTER, INC "); 1274 break; 1275 } else { 1276 return -1; 1277 } 1278 1279 case MODE_PAGE_VENDOR_SPECIFIC: 1280 if (s->qdev.type == TYPE_DISK && (s->quirks & 1281 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) { 1282 length = 0x2; 1283 if (page_control == 1) { /* Changeable Values */ 1284 p[0] = 0xff; 1285 p[1] = 0xff; 1286 break; 1287 } 1288 p[0] = 0; 1289 p[1] = 0; 1290 break; 1291 } else { 1292 return -1; 1293 } 1294 1295 default: 1296 return -1; 1297 } 1298 1299 assert(length < 256); 1300 (*p_outbuf)[0] = page; 1301 (*p_outbuf)[1] = length; 1302 *p_outbuf += length + 2; 1303 return length + 2; 1304 } 1305 1306 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1307 { 1308 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1309 uint64_t nb_sectors; 1310 bool dbd; 1311 int page, buflen, ret, page_control; 1312 uint8_t *p; 1313 uint8_t dev_specific_param; 1314 1315 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1316 page = r->req.cmd.buf[2] & 0x3f; 1317 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1318 1319 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 1320 10, page, r->req.cmd.xfer, page_control); 1321 memset(outbuf, 0, r->req.cmd.xfer); 1322 p = outbuf; 1323 1324 if (s->qdev.type == TYPE_DISK) { 1325 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1326 if (!blk_is_writable(s->qdev.conf.blk)) { 1327 dev_specific_param |= 0x80; /* Readonly. */ 1328 } 1329 } else { 1330 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD)) { 1331 /* Use DBD from the request... */ 1332 dev_specific_param = 0x00; 1333 1334 /* 1335 * ... unless we receive a request for MODE_PAGE_APPLE_VENDOR 1336 * which should never return a block descriptor even though DBD is 1337 * not set, otherwise CDROM detection fails in MacOS 1338 */ 1339 if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR) && 1340 page == MODE_PAGE_APPLE_VENDOR) { 1341 dbd = true; 1342 } 1343 } else { 1344 /* 1345 * MMC prescribes that CD/DVD drives have no block descriptors, 1346 * and defines no device-specific parameter. 1347 */ 1348 dev_specific_param = 0x00; 1349 dbd = true; 1350 } 1351 } 1352 1353 if (r->req.cmd.buf[0] == MODE_SENSE) { 1354 p[1] = 0; /* Default media type. */ 1355 p[2] = dev_specific_param; 1356 p[3] = 0; /* Block descriptor length. */ 1357 p += 4; 1358 } else { /* MODE_SENSE_10 */ 1359 p[2] = 0; /* Default media type. */ 1360 p[3] = dev_specific_param; 1361 p[6] = p[7] = 0; /* Block descriptor length. */ 1362 p += 8; 1363 } 1364 1365 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1366 if (!dbd && nb_sectors) { 1367 if (r->req.cmd.buf[0] == MODE_SENSE) { 1368 outbuf[3] = 8; /* Block descriptor length */ 1369 } else { /* MODE_SENSE_10 */ 1370 outbuf[7] = 8; /* Block descriptor length */ 1371 } 1372 nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1373 if (nb_sectors > 0xffffff) { 1374 nb_sectors = 0; 1375 } 1376 p[0] = 0; /* media density code */ 1377 p[1] = (nb_sectors >> 16) & 0xff; 1378 p[2] = (nb_sectors >> 8) & 0xff; 1379 p[3] = nb_sectors & 0xff; 1380 p[4] = 0; /* reserved */ 1381 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1382 p[6] = s->qdev.blocksize >> 8; 1383 p[7] = 0; 1384 p += 8; 1385 } 1386 1387 if (page_control == 3) { 1388 /* Saved Values */ 1389 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1390 return -1; 1391 } 1392 1393 if (page == 0x3f) { 1394 for (page = 0; page <= 0x3e; page++) { 1395 mode_sense_page(s, page, &p, page_control); 1396 } 1397 } else { 1398 ret = mode_sense_page(s, page, &p, page_control); 1399 if (ret == -1) { 1400 return -1; 1401 } 1402 } 1403 1404 buflen = p - outbuf; 1405 /* 1406 * The mode data length field specifies the length in bytes of the 1407 * following data that is available to be transferred. The mode data 1408 * length does not include itself. 1409 */ 1410 if (r->req.cmd.buf[0] == MODE_SENSE) { 1411 outbuf[0] = buflen - 1; 1412 } else { /* MODE_SENSE_10 */ 1413 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1414 outbuf[1] = (buflen - 2) & 0xff; 1415 } 1416 return buflen; 1417 } 1418 1419 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1420 { 1421 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1422 int start_track, format, msf, toclen; 1423 uint64_t nb_sectors; 1424 1425 msf = req->cmd.buf[1] & 2; 1426 format = req->cmd.buf[2] & 0xf; 1427 start_track = req->cmd.buf[6]; 1428 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1429 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); 1430 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 1431 switch (format) { 1432 case 0: 1433 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1434 break; 1435 case 1: 1436 /* multi session : only a single session defined */ 1437 toclen = 12; 1438 memset(outbuf, 0, 12); 1439 outbuf[1] = 0x0a; 1440 outbuf[2] = 0x01; 1441 outbuf[3] = 0x01; 1442 break; 1443 case 2: 1444 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1445 break; 1446 default: 1447 return -1; 1448 } 1449 return toclen; 1450 } 1451 1452 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1453 { 1454 SCSIRequest *req = &r->req; 1455 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1456 bool start = req->cmd.buf[4] & 1; 1457 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1458 int pwrcnd = req->cmd.buf[4] & 0xf0; 1459 1460 if (pwrcnd) { 1461 /* eject/load only happens for power condition == 0 */ 1462 return 0; 1463 } 1464 1465 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1466 if (!start && !s->tray_open && s->tray_locked) { 1467 scsi_check_condition(r, 1468 blk_is_inserted(s->qdev.conf.blk) 1469 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1470 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1471 return -1; 1472 } 1473 1474 if (s->tray_open != !start) { 1475 blk_eject(s->qdev.conf.blk, !start); 1476 s->tray_open = !start; 1477 } 1478 } 1479 return 0; 1480 } 1481 1482 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1483 { 1484 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1485 int buflen = r->iov.iov_len; 1486 1487 if (buflen) { 1488 trace_scsi_disk_emulate_read_data(buflen); 1489 r->iov.iov_len = 0; 1490 r->started = true; 1491 scsi_req_data(&r->req, buflen); 1492 return; 1493 } 1494 1495 /* This also clears the sense buffer for REQUEST SENSE. */ 1496 scsi_req_complete(&r->req, GOOD); 1497 } 1498 1499 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1500 uint8_t *inbuf, int inlen) 1501 { 1502 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1503 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1504 uint8_t *p; 1505 int len, expected_len, changeable_len, i; 1506 1507 /* The input buffer does not include the page header, so it is 1508 * off by 2 bytes. 1509 */ 1510 expected_len = inlen + 2; 1511 if (expected_len > SCSI_MAX_MODE_LEN) { 1512 return -1; 1513 } 1514 1515 /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */ 1516 if (page == MODE_PAGE_ALLS) { 1517 return -1; 1518 } 1519 1520 p = mode_current; 1521 memset(mode_current, 0, inlen + 2); 1522 len = mode_sense_page(s, page, &p, 0); 1523 if (len < 0 || len != expected_len) { 1524 return -1; 1525 } 1526 1527 p = mode_changeable; 1528 memset(mode_changeable, 0, inlen + 2); 1529 changeable_len = mode_sense_page(s, page, &p, 1); 1530 assert(changeable_len == len); 1531 1532 /* Check that unchangeable bits are the same as what MODE SENSE 1533 * would return. 1534 */ 1535 for (i = 2; i < len; i++) { 1536 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1537 return -1; 1538 } 1539 } 1540 return 0; 1541 } 1542 1543 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1544 { 1545 switch (page) { 1546 case MODE_PAGE_CACHING: 1547 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1548 break; 1549 1550 default: 1551 break; 1552 } 1553 } 1554 1555 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1556 { 1557 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1558 1559 while (len > 0) { 1560 int page, subpage, page_len; 1561 1562 /* Parse both possible formats for the mode page headers. */ 1563 page = p[0] & 0x3f; 1564 if (p[0] & 0x40) { 1565 if (len < 4) { 1566 goto invalid_param_len; 1567 } 1568 subpage = p[1]; 1569 page_len = lduw_be_p(&p[2]); 1570 p += 4; 1571 len -= 4; 1572 } else { 1573 if (len < 2) { 1574 goto invalid_param_len; 1575 } 1576 subpage = 0; 1577 page_len = p[1]; 1578 p += 2; 1579 len -= 2; 1580 } 1581 1582 if (subpage) { 1583 goto invalid_param; 1584 } 1585 if (page_len > len) { 1586 if (!(s->quirks & SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED)) { 1587 goto invalid_param_len; 1588 } 1589 trace_scsi_disk_mode_select_page_truncated(page, page_len, len); 1590 } 1591 1592 if (!change) { 1593 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1594 goto invalid_param; 1595 } 1596 } else { 1597 scsi_disk_apply_mode_select(s, page, p); 1598 } 1599 1600 p += page_len; 1601 len -= page_len; 1602 } 1603 return 0; 1604 1605 invalid_param: 1606 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1607 return -1; 1608 1609 invalid_param_len: 1610 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1611 return -1; 1612 } 1613 1614 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1615 { 1616 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1617 uint8_t *p = inbuf; 1618 int cmd = r->req.cmd.buf[0]; 1619 int len = r->req.cmd.xfer; 1620 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1621 int bd_len, bs; 1622 int pass; 1623 1624 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1625 if (!(s->quirks & 1626 (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) { 1627 /* We only support PF=1, SP=0. */ 1628 goto invalid_field; 1629 } 1630 } 1631 1632 if (len < hdr_len) { 1633 goto invalid_param_len; 1634 } 1635 1636 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1637 len -= hdr_len; 1638 p += hdr_len; 1639 if (len < bd_len) { 1640 goto invalid_param_len; 1641 } 1642 if (bd_len != 0 && bd_len != 8) { 1643 goto invalid_param; 1644 } 1645 1646 /* Allow changing the block size */ 1647 if (bd_len) { 1648 bs = p[5] << 16 | p[6] << 8 | p[7]; 1649 1650 /* 1651 * Since the existing code only checks/updates bits 8-15 of the block 1652 * size, restrict ourselves to the same requirement for now to ensure 1653 * that a block size set by a block descriptor and then read back by 1654 * a subsequent SCSI command will be the same. Also disallow a block 1655 * size of 256 since we cannot handle anything below BDRV_SECTOR_SIZE. 1656 */ 1657 if (bs && !(bs & ~0xfe00) && bs != s->qdev.blocksize) { 1658 s->qdev.blocksize = bs; 1659 trace_scsi_disk_mode_select_set_blocksize(s->qdev.blocksize); 1660 } 1661 } 1662 1663 len -= bd_len; 1664 p += bd_len; 1665 1666 /* Ensure no change is made if there is an error! */ 1667 for (pass = 0; pass < 2; pass++) { 1668 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1669 assert(pass == 0); 1670 return; 1671 } 1672 } 1673 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1674 /* The request is used as the AIO opaque value, so add a ref. */ 1675 scsi_req_ref(&r->req); 1676 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1677 BLOCK_ACCT_FLUSH); 1678 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1679 return; 1680 } 1681 1682 scsi_req_complete(&r->req, GOOD); 1683 return; 1684 1685 invalid_param: 1686 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1687 return; 1688 1689 invalid_param_len: 1690 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1691 return; 1692 1693 invalid_field: 1694 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1695 } 1696 1697 /* sector_num and nb_sectors expected to be in qdev blocksize */ 1698 static inline bool check_lba_range(SCSIDiskState *s, 1699 uint64_t sector_num, uint32_t nb_sectors) 1700 { 1701 /* 1702 * The first line tests that no overflow happens when computing the last 1703 * sector. The second line tests that the last accessed sector is in 1704 * range. 1705 * 1706 * Careful, the computations should not underflow for nb_sectors == 0, 1707 * and a 0-block read to the first LBA beyond the end of device is 1708 * valid. 1709 */ 1710 return (sector_num <= sector_num + nb_sectors && 1711 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1712 } 1713 1714 typedef struct UnmapCBData { 1715 SCSIDiskReq *r; 1716 uint8_t *inbuf; 1717 int count; 1718 } UnmapCBData; 1719 1720 static void scsi_unmap_complete(void *opaque, int ret); 1721 1722 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1723 { 1724 SCSIDiskReq *r = data->r; 1725 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1726 1727 assert(r->req.aiocb == NULL); 1728 1729 if (data->count > 0) { 1730 uint64_t sector_num = ldq_be_p(&data->inbuf[0]); 1731 uint32_t nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1732 r->sector = sector_num * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1733 r->sector_count = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1734 1735 if (!check_lba_range(s, sector_num, nb_sectors)) { 1736 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), 1737 BLOCK_ACCT_UNMAP); 1738 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1739 goto done; 1740 } 1741 1742 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1743 r->sector_count * BDRV_SECTOR_SIZE, 1744 BLOCK_ACCT_UNMAP); 1745 1746 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1747 r->sector * BDRV_SECTOR_SIZE, 1748 r->sector_count * BDRV_SECTOR_SIZE, 1749 scsi_unmap_complete, data); 1750 data->count--; 1751 data->inbuf += 16; 1752 return; 1753 } 1754 1755 scsi_req_complete(&r->req, GOOD); 1756 1757 done: 1758 scsi_req_unref(&r->req); 1759 g_free(data); 1760 } 1761 1762 static void scsi_unmap_complete(void *opaque, int ret) 1763 { 1764 UnmapCBData *data = opaque; 1765 SCSIDiskReq *r = data->r; 1766 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1767 1768 assert(r->req.aiocb != NULL); 1769 r->req.aiocb = NULL; 1770 1771 if (scsi_disk_req_check_error(r, ret, true)) { 1772 scsi_req_unref(&r->req); 1773 g_free(data); 1774 } else { 1775 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1776 scsi_unmap_complete_noio(data, ret); 1777 } 1778 } 1779 1780 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1781 { 1782 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1783 uint8_t *p = inbuf; 1784 int len = r->req.cmd.xfer; 1785 UnmapCBData *data; 1786 1787 /* Reject ANCHOR=1. */ 1788 if (r->req.cmd.buf[1] & 0x1) { 1789 goto invalid_field; 1790 } 1791 1792 if (len < 8) { 1793 goto invalid_param_len; 1794 } 1795 if (len < lduw_be_p(&p[0]) + 2) { 1796 goto invalid_param_len; 1797 } 1798 if (len < lduw_be_p(&p[2]) + 8) { 1799 goto invalid_param_len; 1800 } 1801 if (lduw_be_p(&p[2]) & 15) { 1802 goto invalid_param_len; 1803 } 1804 1805 if (!blk_is_writable(s->qdev.conf.blk)) { 1806 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1807 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1808 return; 1809 } 1810 1811 data = g_new0(UnmapCBData, 1); 1812 data->r = r; 1813 data->inbuf = &p[8]; 1814 data->count = lduw_be_p(&p[2]) >> 4; 1815 1816 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1817 scsi_req_ref(&r->req); 1818 scsi_unmap_complete_noio(data, 0); 1819 return; 1820 1821 invalid_param_len: 1822 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1823 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1824 return; 1825 1826 invalid_field: 1827 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1828 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1829 } 1830 1831 typedef struct WriteSameCBData { 1832 SCSIDiskReq *r; 1833 int64_t sector; 1834 int nb_sectors; 1835 QEMUIOVector qiov; 1836 struct iovec iov; 1837 } WriteSameCBData; 1838 1839 static void scsi_write_same_complete(void *opaque, int ret) 1840 { 1841 WriteSameCBData *data = opaque; 1842 SCSIDiskReq *r = data->r; 1843 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1844 1845 assert(r->req.aiocb != NULL); 1846 r->req.aiocb = NULL; 1847 1848 if (scsi_disk_req_check_error(r, ret, true)) { 1849 goto done; 1850 } 1851 1852 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1853 1854 data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE; 1855 data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE; 1856 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, 1857 data->iov.iov_len); 1858 if (data->iov.iov_len) { 1859 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1860 data->iov.iov_len, BLOCK_ACCT_WRITE); 1861 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1862 * where final qiov may need smaller size */ 1863 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1864 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1865 data->sector << BDRV_SECTOR_BITS, 1866 &data->qiov, 0, 1867 scsi_write_same_complete, data); 1868 return; 1869 } 1870 1871 scsi_req_complete(&r->req, GOOD); 1872 1873 done: 1874 scsi_req_unref(&r->req); 1875 qemu_vfree(data->iov.iov_base); 1876 g_free(data); 1877 } 1878 1879 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1880 { 1881 SCSIRequest *req = &r->req; 1882 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1883 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1884 WriteSameCBData *data; 1885 uint8_t *buf; 1886 int i, l; 1887 1888 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1889 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1890 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1891 return; 1892 } 1893 1894 if (!blk_is_writable(s->qdev.conf.blk)) { 1895 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1896 return; 1897 } 1898 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1899 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1900 return; 1901 } 1902 1903 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1904 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1905 1906 /* The request is used as the AIO opaque value, so add a ref. */ 1907 scsi_req_ref(&r->req); 1908 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1909 nb_sectors * s->qdev.blocksize, 1910 BLOCK_ACCT_WRITE); 1911 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1912 r->req.cmd.lba * s->qdev.blocksize, 1913 nb_sectors * s->qdev.blocksize, 1914 flags, scsi_aio_complete, r); 1915 return; 1916 } 1917 1918 data = g_new0(WriteSameCBData, 1); 1919 data->r = r; 1920 data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1921 data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1922 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, 1923 SCSI_WRITE_SAME_MAX); 1924 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1925 data->iov.iov_len); 1926 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1927 1928 for (i = 0; i < data->iov.iov_len; i += l) { 1929 l = MIN(s->qdev.blocksize, data->iov.iov_len - i); 1930 memcpy(&buf[i], inbuf, l); 1931 } 1932 1933 scsi_req_ref(&r->req); 1934 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1935 data->iov.iov_len, BLOCK_ACCT_WRITE); 1936 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1937 data->sector << BDRV_SECTOR_BITS, 1938 &data->qiov, 0, 1939 scsi_write_same_complete, data); 1940 } 1941 1942 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1943 { 1944 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1945 1946 if (r->iov.iov_len) { 1947 int buflen = r->iov.iov_len; 1948 trace_scsi_disk_emulate_write_data(buflen); 1949 r->iov.iov_len = 0; 1950 scsi_req_data(&r->req, buflen); 1951 return; 1952 } 1953 1954 switch (req->cmd.buf[0]) { 1955 case MODE_SELECT: 1956 case MODE_SELECT_10: 1957 /* This also clears the sense buffer for REQUEST SENSE. */ 1958 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1959 break; 1960 1961 case UNMAP: 1962 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1963 break; 1964 1965 case VERIFY_10: 1966 case VERIFY_12: 1967 case VERIFY_16: 1968 if (r->req.status == -1) { 1969 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1970 } 1971 break; 1972 1973 case WRITE_SAME_10: 1974 case WRITE_SAME_16: 1975 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1976 break; 1977 1978 case FORMAT_UNIT: 1979 scsi_req_complete(&r->req, GOOD); 1980 break; 1981 1982 default: 1983 abort(); 1984 } 1985 } 1986 1987 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1988 { 1989 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1990 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1991 uint64_t nb_sectors; 1992 uint8_t *outbuf; 1993 int buflen; 1994 1995 switch (req->cmd.buf[0]) { 1996 case INQUIRY: 1997 case MODE_SENSE: 1998 case MODE_SENSE_10: 1999 case RESERVE: 2000 case RESERVE_10: 2001 case RELEASE: 2002 case RELEASE_10: 2003 case START_STOP: 2004 case ALLOW_MEDIUM_REMOVAL: 2005 case GET_CONFIGURATION: 2006 case GET_EVENT_STATUS_NOTIFICATION: 2007 case MECHANISM_STATUS: 2008 case REQUEST_SENSE: 2009 break; 2010 2011 default: 2012 if (!blk_is_available(s->qdev.conf.blk)) { 2013 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2014 return 0; 2015 } 2016 break; 2017 } 2018 2019 /* 2020 * FIXME: we shouldn't return anything bigger than 4k, but the code 2021 * requires the buffer to be as big as req->cmd.xfer in several 2022 * places. So, do not allow CDBs with a very large ALLOCATION 2023 * LENGTH. The real fix would be to modify scsi_read_data and 2024 * dma_buf_read, so that they return data beyond the buflen 2025 * as all zeros. 2026 */ 2027 if (req->cmd.xfer > 65536) { 2028 goto illegal_request; 2029 } 2030 r->buflen = MAX(4096, req->cmd.xfer); 2031 2032 if (!r->iov.iov_base) { 2033 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 2034 } 2035 2036 outbuf = r->iov.iov_base; 2037 memset(outbuf, 0, r->buflen); 2038 switch (req->cmd.buf[0]) { 2039 case TEST_UNIT_READY: 2040 assert(blk_is_available(s->qdev.conf.blk)); 2041 break; 2042 case INQUIRY: 2043 buflen = scsi_disk_emulate_inquiry(req, outbuf); 2044 if (buflen < 0) { 2045 goto illegal_request; 2046 } 2047 break; 2048 case MODE_SENSE: 2049 case MODE_SENSE_10: 2050 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 2051 if (buflen < 0) { 2052 goto illegal_request; 2053 } 2054 break; 2055 case READ_TOC: 2056 buflen = scsi_disk_emulate_read_toc(req, outbuf); 2057 if (buflen < 0) { 2058 goto illegal_request; 2059 } 2060 break; 2061 case RESERVE: 2062 if (req->cmd.buf[1] & 1) { 2063 goto illegal_request; 2064 } 2065 break; 2066 case RESERVE_10: 2067 if (req->cmd.buf[1] & 3) { 2068 goto illegal_request; 2069 } 2070 break; 2071 case RELEASE: 2072 if (req->cmd.buf[1] & 1) { 2073 goto illegal_request; 2074 } 2075 break; 2076 case RELEASE_10: 2077 if (req->cmd.buf[1] & 3) { 2078 goto illegal_request; 2079 } 2080 break; 2081 case START_STOP: 2082 if (scsi_disk_emulate_start_stop(r) < 0) { 2083 return 0; 2084 } 2085 break; 2086 case ALLOW_MEDIUM_REMOVAL: 2087 s->tray_locked = req->cmd.buf[4] & 1; 2088 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 2089 break; 2090 case READ_CAPACITY_10: 2091 /* The normal LEN field for this command is zero. */ 2092 memset(outbuf, 0, 8); 2093 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2094 if (!nb_sectors) { 2095 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2096 return 0; 2097 } 2098 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 2099 goto illegal_request; 2100 } 2101 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2102 /* Returned value is the address of the last sector. */ 2103 nb_sectors--; 2104 /* Remember the new size for read/write sanity checking. */ 2105 s->qdev.max_lba = nb_sectors; 2106 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 2107 if (nb_sectors > UINT32_MAX) { 2108 nb_sectors = UINT32_MAX; 2109 } 2110 outbuf[0] = (nb_sectors >> 24) & 0xff; 2111 outbuf[1] = (nb_sectors >> 16) & 0xff; 2112 outbuf[2] = (nb_sectors >> 8) & 0xff; 2113 outbuf[3] = nb_sectors & 0xff; 2114 outbuf[4] = 0; 2115 outbuf[5] = 0; 2116 outbuf[6] = s->qdev.blocksize >> 8; 2117 outbuf[7] = 0; 2118 break; 2119 case REQUEST_SENSE: 2120 /* Just return "NO SENSE". */ 2121 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2122 (req->cmd.buf[1] & 1) == 0); 2123 if (buflen < 0) { 2124 goto illegal_request; 2125 } 2126 break; 2127 case MECHANISM_STATUS: 2128 buflen = scsi_emulate_mechanism_status(s, outbuf); 2129 if (buflen < 0) { 2130 goto illegal_request; 2131 } 2132 break; 2133 case GET_CONFIGURATION: 2134 buflen = scsi_get_configuration(s, outbuf); 2135 if (buflen < 0) { 2136 goto illegal_request; 2137 } 2138 break; 2139 case GET_EVENT_STATUS_NOTIFICATION: 2140 buflen = scsi_get_event_status_notification(s, r, outbuf); 2141 if (buflen < 0) { 2142 goto illegal_request; 2143 } 2144 break; 2145 case READ_DISC_INFORMATION: 2146 buflen = scsi_read_disc_information(s, r, outbuf); 2147 if (buflen < 0) { 2148 goto illegal_request; 2149 } 2150 break; 2151 case READ_DVD_STRUCTURE: 2152 buflen = scsi_read_dvd_structure(s, r, outbuf); 2153 if (buflen < 0) { 2154 goto illegal_request; 2155 } 2156 break; 2157 case SERVICE_ACTION_IN_16: 2158 /* Service Action In subcommands. */ 2159 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2160 trace_scsi_disk_emulate_command_SAI_16(); 2161 memset(outbuf, 0, req->cmd.xfer); 2162 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2163 if (!nb_sectors) { 2164 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2165 return 0; 2166 } 2167 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2168 goto illegal_request; 2169 } 2170 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2171 /* Returned value is the address of the last sector. */ 2172 nb_sectors--; 2173 /* Remember the new size for read/write sanity checking. */ 2174 s->qdev.max_lba = nb_sectors; 2175 outbuf[0] = (nb_sectors >> 56) & 0xff; 2176 outbuf[1] = (nb_sectors >> 48) & 0xff; 2177 outbuf[2] = (nb_sectors >> 40) & 0xff; 2178 outbuf[3] = (nb_sectors >> 32) & 0xff; 2179 outbuf[4] = (nb_sectors >> 24) & 0xff; 2180 outbuf[5] = (nb_sectors >> 16) & 0xff; 2181 outbuf[6] = (nb_sectors >> 8) & 0xff; 2182 outbuf[7] = nb_sectors & 0xff; 2183 outbuf[8] = 0; 2184 outbuf[9] = 0; 2185 outbuf[10] = s->qdev.blocksize >> 8; 2186 outbuf[11] = 0; 2187 outbuf[12] = 0; 2188 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2189 2190 /* set TPE bit if the format supports discard */ 2191 if (s->qdev.conf.discard_granularity) { 2192 outbuf[14] = 0x80; 2193 } 2194 2195 /* Protection, exponent and lowest lba field left blank. */ 2196 break; 2197 } 2198 trace_scsi_disk_emulate_command_SAI_unsupported(); 2199 goto illegal_request; 2200 case SYNCHRONIZE_CACHE: 2201 /* The request is used as the AIO opaque value, so add a ref. */ 2202 scsi_req_ref(&r->req); 2203 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2204 BLOCK_ACCT_FLUSH); 2205 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2206 return 0; 2207 case SEEK_10: 2208 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); 2209 if (r->req.cmd.lba > s->qdev.max_lba) { 2210 goto illegal_lba; 2211 } 2212 break; 2213 case MODE_SELECT: 2214 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); 2215 break; 2216 case MODE_SELECT_10: 2217 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 2218 break; 2219 case UNMAP: 2220 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); 2221 break; 2222 case VERIFY_10: 2223 case VERIFY_12: 2224 case VERIFY_16: 2225 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 2226 if (req->cmd.buf[1] & 6) { 2227 goto illegal_request; 2228 } 2229 break; 2230 case WRITE_SAME_10: 2231 case WRITE_SAME_16: 2232 trace_scsi_disk_emulate_command_WRITE_SAME( 2233 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); 2234 break; 2235 case FORMAT_UNIT: 2236 trace_scsi_disk_emulate_command_FORMAT_UNIT(r->req.cmd.xfer); 2237 break; 2238 default: 2239 trace_scsi_disk_emulate_command_UNKNOWN(buf[0], 2240 scsi_command_name(buf[0])); 2241 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2242 return 0; 2243 } 2244 assert(!r->req.aiocb); 2245 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2246 if (r->iov.iov_len == 0) { 2247 scsi_req_complete(&r->req, GOOD); 2248 } 2249 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2250 assert(r->iov.iov_len == req->cmd.xfer); 2251 return -r->iov.iov_len; 2252 } else { 2253 return r->iov.iov_len; 2254 } 2255 2256 illegal_request: 2257 if (r->req.status == -1) { 2258 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2259 } 2260 return 0; 2261 2262 illegal_lba: 2263 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2264 return 0; 2265 } 2266 2267 /* Execute a scsi command. Returns the length of the data expected by the 2268 command. This will be Positive for data transfers from the device 2269 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2270 and zero if the command does not transfer any data. */ 2271 2272 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2273 { 2274 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2275 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2276 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2277 uint32_t len; 2278 uint8_t command; 2279 2280 command = buf[0]; 2281 2282 if (!blk_is_available(s->qdev.conf.blk)) { 2283 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2284 return 0; 2285 } 2286 2287 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2288 switch (command) { 2289 case READ_6: 2290 case READ_10: 2291 case READ_12: 2292 case READ_16: 2293 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); 2294 /* Protection information is not supported. For SCSI versions 2 and 2295 * older (as determined by snooping the guest's INQUIRY commands), 2296 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2297 */ 2298 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2299 goto illegal_request; 2300 } 2301 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2302 goto illegal_lba; 2303 } 2304 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2305 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2306 break; 2307 case WRITE_6: 2308 case WRITE_10: 2309 case WRITE_12: 2310 case WRITE_16: 2311 case WRITE_VERIFY_10: 2312 case WRITE_VERIFY_12: 2313 case WRITE_VERIFY_16: 2314 if (!blk_is_writable(s->qdev.conf.blk)) { 2315 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2316 return 0; 2317 } 2318 trace_scsi_disk_dma_command_WRITE( 2319 (command & 0xe) == 0xe ? "And Verify " : "", 2320 r->req.cmd.lba, len); 2321 /* fall through */ 2322 case VERIFY_10: 2323 case VERIFY_12: 2324 case VERIFY_16: 2325 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2326 * As far as DMA is concerned, we can treat it the same as a write; 2327 * scsi_block_do_sgio will send VERIFY commands. 2328 */ 2329 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2330 goto illegal_request; 2331 } 2332 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2333 goto illegal_lba; 2334 } 2335 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2336 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2337 break; 2338 default: 2339 abort(); 2340 illegal_request: 2341 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2342 return 0; 2343 illegal_lba: 2344 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2345 return 0; 2346 } 2347 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2348 if (r->sector_count == 0) { 2349 scsi_req_complete(&r->req, GOOD); 2350 } 2351 assert(r->iov.iov_len == 0); 2352 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2353 return -r->sector_count * BDRV_SECTOR_SIZE; 2354 } else { 2355 return r->sector_count * BDRV_SECTOR_SIZE; 2356 } 2357 } 2358 2359 static void scsi_disk_reset(DeviceState *dev) 2360 { 2361 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2362 uint64_t nb_sectors; 2363 2364 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2365 2366 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2367 2368 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2369 if (nb_sectors) { 2370 nb_sectors--; 2371 } 2372 s->qdev.max_lba = nb_sectors; 2373 /* reset tray statuses */ 2374 s->tray_locked = 0; 2375 s->tray_open = 0; 2376 2377 s->qdev.scsi_version = s->qdev.default_scsi_version; 2378 } 2379 2380 static void scsi_disk_drained_begin(void *opaque) 2381 { 2382 SCSIDiskState *s = opaque; 2383 2384 scsi_device_drained_begin(&s->qdev); 2385 } 2386 2387 static void scsi_disk_drained_end(void *opaque) 2388 { 2389 SCSIDiskState *s = opaque; 2390 2391 scsi_device_drained_end(&s->qdev); 2392 } 2393 2394 static void scsi_disk_resize_cb(void *opaque) 2395 { 2396 SCSIDiskState *s = opaque; 2397 2398 /* SPC lists this sense code as available only for 2399 * direct-access devices. 2400 */ 2401 if (s->qdev.type == TYPE_DISK) { 2402 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2403 } 2404 } 2405 2406 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2407 { 2408 SCSIDiskState *s = opaque; 2409 2410 /* 2411 * When a CD gets changed, we have to report an ejected state and 2412 * then a loaded state to guests so that they detect tray 2413 * open/close and media change events. Guests that do not use 2414 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2415 * states rely on this behavior. 2416 * 2417 * media_changed governs the state machine used for unit attention 2418 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2419 */ 2420 s->media_changed = load; 2421 s->tray_open = !load; 2422 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2423 s->media_event = true; 2424 s->eject_request = false; 2425 } 2426 2427 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2428 { 2429 SCSIDiskState *s = opaque; 2430 2431 s->eject_request = true; 2432 if (force) { 2433 s->tray_locked = false; 2434 } 2435 } 2436 2437 static bool scsi_cd_is_tray_open(void *opaque) 2438 { 2439 return ((SCSIDiskState *)opaque)->tray_open; 2440 } 2441 2442 static bool scsi_cd_is_medium_locked(void *opaque) 2443 { 2444 return ((SCSIDiskState *)opaque)->tray_locked; 2445 } 2446 2447 static const BlockDevOps scsi_disk_removable_block_ops = { 2448 .change_media_cb = scsi_cd_change_media_cb, 2449 .drained_begin = scsi_disk_drained_begin, 2450 .drained_end = scsi_disk_drained_end, 2451 .eject_request_cb = scsi_cd_eject_request_cb, 2452 .is_medium_locked = scsi_cd_is_medium_locked, 2453 .is_tray_open = scsi_cd_is_tray_open, 2454 .resize_cb = scsi_disk_resize_cb, 2455 }; 2456 2457 static const BlockDevOps scsi_disk_block_ops = { 2458 .drained_begin = scsi_disk_drained_begin, 2459 .drained_end = scsi_disk_drained_end, 2460 .resize_cb = scsi_disk_resize_cb, 2461 }; 2462 2463 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2464 { 2465 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2466 if (s->media_changed) { 2467 s->media_changed = false; 2468 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2469 } 2470 } 2471 2472 static void scsi_realize(SCSIDevice *dev, Error **errp) 2473 { 2474 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2475 bool read_only; 2476 2477 if (!s->qdev.conf.blk) { 2478 error_setg(errp, "drive property not set"); 2479 return; 2480 } 2481 2482 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2483 !blk_is_inserted(s->qdev.conf.blk)) { 2484 error_setg(errp, "Device needs media, but drive is empty"); 2485 return; 2486 } 2487 2488 if (!blkconf_blocksizes(&s->qdev.conf, errp)) { 2489 return; 2490 } 2491 2492 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && 2493 !s->qdev.hba_supports_iothread) 2494 { 2495 error_setg(errp, "HBA does not support iothreads"); 2496 return; 2497 } 2498 2499 if (dev->type == TYPE_DISK) { 2500 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2501 return; 2502 } 2503 } 2504 2505 read_only = !blk_supports_write_perm(s->qdev.conf.blk); 2506 if (dev->type == TYPE_ROM) { 2507 read_only = true; 2508 } 2509 2510 if (!blkconf_apply_backend_options(&dev->conf, read_only, 2511 dev->type == TYPE_DISK, errp)) { 2512 return; 2513 } 2514 2515 if (s->qdev.conf.discard_granularity == -1) { 2516 s->qdev.conf.discard_granularity = 2517 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2518 } 2519 2520 if (!s->version) { 2521 s->version = g_strdup(qemu_hw_version()); 2522 } 2523 if (!s->vendor) { 2524 s->vendor = g_strdup("QEMU"); 2525 } 2526 if (s->serial && strlen(s->serial) > MAX_SERIAL_LEN) { 2527 error_setg(errp, "The serial number can't be longer than %d characters", 2528 MAX_SERIAL_LEN); 2529 return; 2530 } 2531 if (!s->device_id) { 2532 if (s->serial) { 2533 if (strlen(s->serial) > MAX_SERIAL_LEN_FOR_DEVID) { 2534 error_setg(errp, "The serial number can't be longer than %d " 2535 "characters when it is also used as the default for " 2536 "device_id", MAX_SERIAL_LEN_FOR_DEVID); 2537 return; 2538 } 2539 s->device_id = g_strdup(s->serial); 2540 } else { 2541 const char *str = blk_name(s->qdev.conf.blk); 2542 if (str && *str) { 2543 s->device_id = g_strdup(str); 2544 } 2545 } 2546 } 2547 2548 if (blk_is_sg(s->qdev.conf.blk)) { 2549 error_setg(errp, "unwanted /dev/sg*"); 2550 return; 2551 } 2552 2553 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2554 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2555 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2556 } else { 2557 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2558 } 2559 2560 blk_iostatus_enable(s->qdev.conf.blk); 2561 2562 add_boot_device_lchs(&dev->qdev, NULL, 2563 dev->conf.lcyls, 2564 dev->conf.lheads, 2565 dev->conf.lsecs); 2566 } 2567 2568 static void scsi_unrealize(SCSIDevice *dev) 2569 { 2570 del_boot_device_lchs(&dev->qdev, NULL); 2571 } 2572 2573 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2574 { 2575 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2576 2577 /* can happen for devices without drive. The error message for missing 2578 * backend will be issued in scsi_realize 2579 */ 2580 if (s->qdev.conf.blk) { 2581 if (!blkconf_blocksizes(&s->qdev.conf, errp)) { 2582 return; 2583 } 2584 } 2585 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2586 s->qdev.type = TYPE_DISK; 2587 if (!s->product) { 2588 s->product = g_strdup("QEMU HARDDISK"); 2589 } 2590 scsi_realize(&s->qdev, errp); 2591 } 2592 2593 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2594 { 2595 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2596 int ret; 2597 uint32_t blocksize = 2048; 2598 2599 if (!dev->conf.blk) { 2600 /* Anonymous BlockBackend for an empty drive. As we put it into 2601 * dev->conf, qdev takes care of detaching on unplug. */ 2602 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 2603 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2604 assert(ret == 0); 2605 } 2606 2607 if (dev->conf.physical_block_size != 0) { 2608 blocksize = dev->conf.physical_block_size; 2609 } 2610 2611 s->qdev.blocksize = blocksize; 2612 s->qdev.type = TYPE_ROM; 2613 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2614 if (!s->product) { 2615 s->product = g_strdup("QEMU CD-ROM"); 2616 } 2617 scsi_realize(&s->qdev, errp); 2618 } 2619 2620 2621 static const SCSIReqOps scsi_disk_emulate_reqops = { 2622 .size = sizeof(SCSIDiskReq), 2623 .free_req = scsi_free_request, 2624 .send_command = scsi_disk_emulate_command, 2625 .read_data = scsi_disk_emulate_read_data, 2626 .write_data = scsi_disk_emulate_write_data, 2627 .get_buf = scsi_get_buf, 2628 .load_request = scsi_disk_emulate_load_request, 2629 .save_request = scsi_disk_emulate_save_request, 2630 }; 2631 2632 static const SCSIReqOps scsi_disk_dma_reqops = { 2633 .size = sizeof(SCSIDiskReq), 2634 .free_req = scsi_free_request, 2635 .send_command = scsi_disk_dma_command, 2636 .read_data = scsi_read_data, 2637 .write_data = scsi_write_data, 2638 .get_buf = scsi_get_buf, 2639 .load_request = scsi_disk_load_request, 2640 .save_request = scsi_disk_save_request, 2641 }; 2642 2643 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2644 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2645 [INQUIRY] = &scsi_disk_emulate_reqops, 2646 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2647 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2648 [START_STOP] = &scsi_disk_emulate_reqops, 2649 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2650 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2651 [READ_TOC] = &scsi_disk_emulate_reqops, 2652 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2653 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2654 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2655 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2656 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2657 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2658 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2659 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2660 [SEEK_10] = &scsi_disk_emulate_reqops, 2661 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2662 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2663 [UNMAP] = &scsi_disk_emulate_reqops, 2664 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2665 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2666 [VERIFY_10] = &scsi_disk_emulate_reqops, 2667 [VERIFY_12] = &scsi_disk_emulate_reqops, 2668 [VERIFY_16] = &scsi_disk_emulate_reqops, 2669 [FORMAT_UNIT] = &scsi_disk_emulate_reqops, 2670 2671 [READ_6] = &scsi_disk_dma_reqops, 2672 [READ_10] = &scsi_disk_dma_reqops, 2673 [READ_12] = &scsi_disk_dma_reqops, 2674 [READ_16] = &scsi_disk_dma_reqops, 2675 [WRITE_6] = &scsi_disk_dma_reqops, 2676 [WRITE_10] = &scsi_disk_dma_reqops, 2677 [WRITE_12] = &scsi_disk_dma_reqops, 2678 [WRITE_16] = &scsi_disk_dma_reqops, 2679 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2680 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2681 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2682 }; 2683 2684 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) 2685 { 2686 int len = scsi_cdb_length(buf); 2687 g_autoptr(GString) str = NULL; 2688 2689 assert(len > 0 && len <= 16); 2690 str = qemu_hexdump_line(NULL, buf, len, 1, 0); 2691 trace_scsi_disk_new_request(lun, tag, str->str); 2692 } 2693 2694 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2695 uint8_t *buf, void *hba_private) 2696 { 2697 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2698 SCSIRequest *req; 2699 const SCSIReqOps *ops; 2700 uint8_t command; 2701 2702 command = buf[0]; 2703 ops = scsi_disk_reqops_dispatch[command]; 2704 if (!ops) { 2705 ops = &scsi_disk_emulate_reqops; 2706 } 2707 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2708 2709 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { 2710 scsi_disk_new_request_dump(lun, tag, buf); 2711 } 2712 2713 return req; 2714 } 2715 2716 #ifdef __linux__ 2717 static int get_device_type(SCSIDiskState *s) 2718 { 2719 uint8_t cmd[16]; 2720 uint8_t buf[36]; 2721 int ret; 2722 2723 memset(cmd, 0, sizeof(cmd)); 2724 memset(buf, 0, sizeof(buf)); 2725 cmd[0] = INQUIRY; 2726 cmd[4] = sizeof(buf); 2727 2728 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2729 buf, sizeof(buf), s->qdev.io_timeout); 2730 if (ret < 0) { 2731 return -1; 2732 } 2733 s->qdev.type = buf[0]; 2734 if (buf[1] & 0x80) { 2735 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2736 } 2737 return 0; 2738 } 2739 2740 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2741 { 2742 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2743 int sg_version; 2744 int rc; 2745 2746 if (!s->qdev.conf.blk) { 2747 error_setg(errp, "drive property not set"); 2748 return; 2749 } 2750 2751 if (s->rotation_rate) { 2752 error_report_once("rotation_rate is specified for scsi-block but is " 2753 "not implemented. This option is deprecated and will " 2754 "be removed in a future version"); 2755 } 2756 2757 /* check we are using a driver managing SG_IO (version 3 and after) */ 2758 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2759 if (rc < 0) { 2760 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2761 if (rc != -EPERM) { 2762 error_append_hint(errp, "Is this a SCSI device?\n"); 2763 } 2764 return; 2765 } 2766 if (sg_version < 30000) { 2767 error_setg(errp, "scsi generic interface too old"); 2768 return; 2769 } 2770 2771 /* get device type from INQUIRY data */ 2772 rc = get_device_type(s); 2773 if (rc < 0) { 2774 error_setg(errp, "INQUIRY failed"); 2775 return; 2776 } 2777 2778 /* Make a guess for the block size, we'll fix it when the guest sends. 2779 * READ CAPACITY. If they don't, they likely would assume these sizes 2780 * anyway. (TODO: check in /sys). 2781 */ 2782 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2783 s->qdev.blocksize = 2048; 2784 } else { 2785 s->qdev.blocksize = 512; 2786 } 2787 2788 /* Makes the scsi-block device not removable by using HMP and QMP eject 2789 * command. 2790 */ 2791 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2792 2793 scsi_realize(&s->qdev, errp); 2794 scsi_generic_read_device_inquiry(&s->qdev); 2795 } 2796 2797 typedef struct SCSIBlockReq { 2798 SCSIDiskReq req; 2799 sg_io_hdr_t io_header; 2800 2801 /* Selected bytes of the original CDB, copied into our own CDB. */ 2802 uint8_t cmd, cdb1, group_number; 2803 2804 /* CDB passed to SG_IO. */ 2805 uint8_t cdb[16]; 2806 BlockCompletionFunc *cb; 2807 void *cb_opaque; 2808 } SCSIBlockReq; 2809 2810 static void scsi_block_sgio_complete(void *opaque, int ret) 2811 { 2812 SCSIBlockReq *req = (SCSIBlockReq *)opaque; 2813 SCSIDiskReq *r = &req->req; 2814 sg_io_hdr_t *io_hdr = &req->io_header; 2815 2816 if (ret == 0) { 2817 if (io_hdr->host_status != SCSI_HOST_OK) { 2818 scsi_req_complete_failed(&r->req, io_hdr->host_status); 2819 scsi_req_unref(&r->req); 2820 return; 2821 } 2822 2823 if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) { 2824 ret = BUSY; 2825 } else { 2826 ret = io_hdr->status; 2827 } 2828 2829 if (ret > 0) { 2830 if (scsi_handle_rw_error(r, ret, true)) { 2831 scsi_req_unref(&r->req); 2832 return; 2833 } 2834 2835 /* Ignore error. */ 2836 ret = 0; 2837 } 2838 } 2839 2840 req->cb(req->cb_opaque, ret); 2841 } 2842 2843 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2844 int64_t offset, QEMUIOVector *iov, 2845 int direction, 2846 BlockCompletionFunc *cb, void *opaque) 2847 { 2848 sg_io_hdr_t *io_header = &req->io_header; 2849 SCSIDiskReq *r = &req->req; 2850 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2851 int nb_logical_blocks; 2852 uint64_t lba; 2853 BlockAIOCB *aiocb; 2854 2855 /* This is not supported yet. It can only happen if the guest does 2856 * reads and writes that are not aligned to one logical sectors 2857 * _and_ cover multiple MemoryRegions. 2858 */ 2859 assert(offset % s->qdev.blocksize == 0); 2860 assert(iov->size % s->qdev.blocksize == 0); 2861 2862 io_header->interface_id = 'S'; 2863 2864 /* The data transfer comes from the QEMUIOVector. */ 2865 io_header->dxfer_direction = direction; 2866 io_header->dxfer_len = iov->size; 2867 io_header->dxferp = (void *)iov->iov; 2868 io_header->iovec_count = iov->niov; 2869 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2870 2871 /* Build a new CDB with the LBA and length patched in, in case 2872 * DMA helpers split the transfer in multiple segments. Do not 2873 * build a CDB smaller than what the guest wanted, and only build 2874 * a larger one if strictly necessary. 2875 */ 2876 io_header->cmdp = req->cdb; 2877 lba = offset / s->qdev.blocksize; 2878 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2879 2880 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2881 /* 6-byte CDB */ 2882 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2883 req->cdb[4] = nb_logical_blocks; 2884 req->cdb[5] = 0; 2885 io_header->cmd_len = 6; 2886 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2887 /* 10-byte CDB */ 2888 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2889 req->cdb[1] = req->cdb1; 2890 stl_be_p(&req->cdb[2], lba); 2891 req->cdb[6] = req->group_number; 2892 stw_be_p(&req->cdb[7], nb_logical_blocks); 2893 req->cdb[9] = 0; 2894 io_header->cmd_len = 10; 2895 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2896 /* 12-byte CDB */ 2897 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2898 req->cdb[1] = req->cdb1; 2899 stl_be_p(&req->cdb[2], lba); 2900 stl_be_p(&req->cdb[6], nb_logical_blocks); 2901 req->cdb[10] = req->group_number; 2902 req->cdb[11] = 0; 2903 io_header->cmd_len = 12; 2904 } else { 2905 /* 16-byte CDB */ 2906 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2907 req->cdb[1] = req->cdb1; 2908 stq_be_p(&req->cdb[2], lba); 2909 stl_be_p(&req->cdb[10], nb_logical_blocks); 2910 req->cdb[14] = req->group_number; 2911 req->cdb[15] = 0; 2912 io_header->cmd_len = 16; 2913 } 2914 2915 /* The rest is as in scsi-generic.c. */ 2916 io_header->mx_sb_len = sizeof(r->req.sense); 2917 io_header->sbp = r->req.sense; 2918 io_header->timeout = s->qdev.io_timeout * 1000; 2919 io_header->usr_ptr = r; 2920 io_header->flags |= SG_FLAG_DIRECT_IO; 2921 req->cb = cb; 2922 req->cb_opaque = opaque; 2923 trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba, 2924 nb_logical_blocks, io_header->timeout); 2925 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, scsi_block_sgio_complete, req); 2926 assert(aiocb != NULL); 2927 return aiocb; 2928 } 2929 2930 static bool scsi_block_no_fua(SCSICommand *cmd) 2931 { 2932 return false; 2933 } 2934 2935 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2936 QEMUIOVector *iov, 2937 BlockCompletionFunc *cb, void *cb_opaque, 2938 void *opaque) 2939 { 2940 SCSIBlockReq *r = opaque; 2941 return scsi_block_do_sgio(r, offset, iov, 2942 SG_DXFER_FROM_DEV, cb, cb_opaque); 2943 } 2944 2945 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2946 QEMUIOVector *iov, 2947 BlockCompletionFunc *cb, void *cb_opaque, 2948 void *opaque) 2949 { 2950 SCSIBlockReq *r = opaque; 2951 return scsi_block_do_sgio(r, offset, iov, 2952 SG_DXFER_TO_DEV, cb, cb_opaque); 2953 } 2954 2955 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2956 { 2957 switch (buf[0]) { 2958 case VERIFY_10: 2959 case VERIFY_12: 2960 case VERIFY_16: 2961 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2962 * for the number of logical blocks specified in the length 2963 * field). For other modes, do not use scatter/gather operation. 2964 */ 2965 if ((buf[1] & 6) == 2) { 2966 return false; 2967 } 2968 break; 2969 2970 case READ_6: 2971 case READ_10: 2972 case READ_12: 2973 case READ_16: 2974 case WRITE_6: 2975 case WRITE_10: 2976 case WRITE_12: 2977 case WRITE_16: 2978 case WRITE_VERIFY_10: 2979 case WRITE_VERIFY_12: 2980 case WRITE_VERIFY_16: 2981 /* MMC writing cannot be done via DMA helpers, because it sometimes 2982 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2983 * We might use scsi_block_dma_reqops as long as no writing commands are 2984 * seen, but performance usually isn't paramount on optical media. So, 2985 * just make scsi-block operate the same as scsi-generic for them. 2986 */ 2987 if (s->qdev.type != TYPE_ROM) { 2988 return false; 2989 } 2990 break; 2991 2992 default: 2993 break; 2994 } 2995 2996 return true; 2997 } 2998 2999 3000 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 3001 { 3002 SCSIBlockReq *r = (SCSIBlockReq *)req; 3003 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 3004 3005 r->cmd = req->cmd.buf[0]; 3006 switch (r->cmd >> 5) { 3007 case 0: 3008 /* 6-byte CDB. */ 3009 r->cdb1 = r->group_number = 0; 3010 break; 3011 case 1: 3012 /* 10-byte CDB. */ 3013 r->cdb1 = req->cmd.buf[1]; 3014 r->group_number = req->cmd.buf[6]; 3015 break; 3016 case 4: 3017 /* 12-byte CDB. */ 3018 r->cdb1 = req->cmd.buf[1]; 3019 r->group_number = req->cmd.buf[10]; 3020 break; 3021 case 5: 3022 /* 16-byte CDB. */ 3023 r->cdb1 = req->cmd.buf[1]; 3024 r->group_number = req->cmd.buf[14]; 3025 break; 3026 default: 3027 abort(); 3028 } 3029 3030 /* Protection information is not supported. For SCSI versions 2 and 3031 * older (as determined by snooping the guest's INQUIRY commands), 3032 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 3033 */ 3034 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 3035 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 3036 return 0; 3037 } 3038 3039 return scsi_disk_dma_command(req, buf); 3040 } 3041 3042 static const SCSIReqOps scsi_block_dma_reqops = { 3043 .size = sizeof(SCSIBlockReq), 3044 .free_req = scsi_free_request, 3045 .send_command = scsi_block_dma_command, 3046 .read_data = scsi_read_data, 3047 .write_data = scsi_write_data, 3048 .get_buf = scsi_get_buf, 3049 .load_request = scsi_disk_load_request, 3050 .save_request = scsi_disk_save_request, 3051 }; 3052 3053 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 3054 uint32_t lun, uint8_t *buf, 3055 void *hba_private) 3056 { 3057 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 3058 3059 if (scsi_block_is_passthrough(s, buf)) { 3060 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 3061 hba_private); 3062 } else { 3063 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 3064 hba_private); 3065 } 3066 } 3067 3068 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 3069 uint8_t *buf, size_t buf_len, 3070 void *hba_private) 3071 { 3072 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 3073 3074 if (scsi_block_is_passthrough(s, buf)) { 3075 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, buf_len, hba_private); 3076 } else { 3077 return scsi_req_parse_cdb(&s->qdev, cmd, buf, buf_len); 3078 } 3079 } 3080 3081 static void scsi_block_update_sense(SCSIRequest *req) 3082 { 3083 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 3084 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); 3085 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); 3086 } 3087 #endif 3088 3089 static 3090 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 3091 BlockCompletionFunc *cb, void *cb_opaque, 3092 void *opaque) 3093 { 3094 SCSIDiskReq *r = opaque; 3095 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 3096 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 3097 } 3098 3099 static 3100 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 3101 BlockCompletionFunc *cb, void *cb_opaque, 3102 void *opaque) 3103 { 3104 SCSIDiskReq *r = opaque; 3105 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 3106 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 3107 } 3108 3109 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 3110 { 3111 DeviceClass *dc = DEVICE_CLASS(klass); 3112 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3113 3114 dc->fw_name = "disk"; 3115 dc->reset = scsi_disk_reset; 3116 sdc->dma_readv = scsi_dma_readv; 3117 sdc->dma_writev = scsi_dma_writev; 3118 sdc->need_fua_emulation = scsi_is_cmd_fua; 3119 } 3120 3121 static const TypeInfo scsi_disk_base_info = { 3122 .name = TYPE_SCSI_DISK_BASE, 3123 .parent = TYPE_SCSI_DEVICE, 3124 .class_init = scsi_disk_base_class_initfn, 3125 .instance_size = sizeof(SCSIDiskState), 3126 .class_size = sizeof(SCSIDiskClass), 3127 .abstract = true, 3128 }; 3129 3130 #define DEFINE_SCSI_DISK_PROPERTIES() \ 3131 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ 3132 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ 3133 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3134 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 3135 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 3136 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 3137 DEFINE_PROP_STRING("product", SCSIDiskState, product), \ 3138 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id), \ 3139 DEFINE_PROP_BOOL("migrate-emulated-scsi-request", SCSIDiskState, migrate_emulated_scsi_request, true) 3140 3141 3142 static Property scsi_hd_properties[] = { 3143 DEFINE_SCSI_DISK_PROPERTIES(), 3144 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3145 SCSI_DISK_F_REMOVABLE, false), 3146 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3147 SCSI_DISK_F_DPOFUA, false), 3148 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3149 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3150 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3151 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3152 DEFAULT_MAX_UNMAP_SIZE), 3153 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3154 DEFAULT_MAX_IO_SIZE), 3155 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3156 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3157 5), 3158 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState, 3159 quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE, 3160 0), 3161 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 3162 DEFINE_PROP_END_OF_LIST(), 3163 }; 3164 3165 static const VMStateDescription vmstate_scsi_disk_state = { 3166 .name = "scsi-disk", 3167 .version_id = 1, 3168 .minimum_version_id = 1, 3169 .fields = (const VMStateField[]) { 3170 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 3171 VMSTATE_BOOL(media_changed, SCSIDiskState), 3172 VMSTATE_BOOL(media_event, SCSIDiskState), 3173 VMSTATE_BOOL(eject_request, SCSIDiskState), 3174 VMSTATE_BOOL(tray_open, SCSIDiskState), 3175 VMSTATE_BOOL(tray_locked, SCSIDiskState), 3176 VMSTATE_END_OF_LIST() 3177 } 3178 }; 3179 3180 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 3181 { 3182 DeviceClass *dc = DEVICE_CLASS(klass); 3183 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3184 3185 sc->realize = scsi_hd_realize; 3186 sc->unrealize = scsi_unrealize; 3187 sc->alloc_req = scsi_new_request; 3188 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3189 dc->desc = "virtual SCSI disk"; 3190 device_class_set_props(dc, scsi_hd_properties); 3191 dc->vmsd = &vmstate_scsi_disk_state; 3192 } 3193 3194 static const TypeInfo scsi_hd_info = { 3195 .name = "scsi-hd", 3196 .parent = TYPE_SCSI_DISK_BASE, 3197 .class_init = scsi_hd_class_initfn, 3198 }; 3199 3200 static Property scsi_cd_properties[] = { 3201 DEFINE_SCSI_DISK_PROPERTIES(), 3202 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3203 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3204 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3205 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3206 DEFAULT_MAX_IO_SIZE), 3207 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3208 5), 3209 DEFINE_PROP_BIT("quirk_mode_page_apple_vendor", SCSIDiskState, quirks, 3210 SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR, 0), 3211 DEFINE_PROP_BIT("quirk_mode_sense_rom_use_dbd", SCSIDiskState, quirks, 3212 SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD, 0), 3213 DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState, 3214 quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE, 3215 0), 3216 DEFINE_PROP_BIT("quirk_mode_page_truncated", SCSIDiskState, quirks, 3217 SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED, 0), 3218 DEFINE_PROP_END_OF_LIST(), 3219 }; 3220 3221 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3222 { 3223 DeviceClass *dc = DEVICE_CLASS(klass); 3224 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3225 3226 sc->realize = scsi_cd_realize; 3227 sc->alloc_req = scsi_new_request; 3228 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3229 dc->desc = "virtual SCSI CD-ROM"; 3230 device_class_set_props(dc, scsi_cd_properties); 3231 dc->vmsd = &vmstate_scsi_disk_state; 3232 } 3233 3234 static const TypeInfo scsi_cd_info = { 3235 .name = "scsi-cd", 3236 .parent = TYPE_SCSI_DISK_BASE, 3237 .class_init = scsi_cd_class_initfn, 3238 }; 3239 3240 #ifdef __linux__ 3241 static Property scsi_block_properties[] = { 3242 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), 3243 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3244 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3245 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3246 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3247 DEFAULT_MAX_UNMAP_SIZE), 3248 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3249 DEFAULT_MAX_IO_SIZE), 3250 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3251 -1), 3252 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout, 3253 DEFAULT_IO_TIMEOUT), 3254 DEFINE_PROP_END_OF_LIST(), 3255 }; 3256 3257 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3258 { 3259 DeviceClass *dc = DEVICE_CLASS(klass); 3260 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3261 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3262 3263 sc->realize = scsi_block_realize; 3264 sc->alloc_req = scsi_block_new_request; 3265 sc->parse_cdb = scsi_block_parse_cdb; 3266 sdc->dma_readv = scsi_block_dma_readv; 3267 sdc->dma_writev = scsi_block_dma_writev; 3268 sdc->update_sense = scsi_block_update_sense; 3269 sdc->need_fua_emulation = scsi_block_no_fua; 3270 dc->desc = "SCSI block device passthrough"; 3271 device_class_set_props(dc, scsi_block_properties); 3272 dc->vmsd = &vmstate_scsi_disk_state; 3273 } 3274 3275 static const TypeInfo scsi_block_info = { 3276 .name = "scsi-block", 3277 .parent = TYPE_SCSI_DISK_BASE, 3278 .class_init = scsi_block_class_initfn, 3279 }; 3280 #endif 3281 3282 static void scsi_disk_register_types(void) 3283 { 3284 type_register_static(&scsi_disk_base_info); 3285 type_register_static(&scsi_hd_info); 3286 type_register_static(&scsi_cd_info); 3287 #ifdef __linux__ 3288 type_register_static(&scsi_block_info); 3289 #endif 3290 } 3291 3292 type_init(scsi_disk_register_types) 3293