1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/units.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "qemu/main-loop.h" 27 #include "qemu/module.h" 28 #include "hw/scsi/scsi.h" 29 #include "migration/qemu-file-types.h" 30 #include "migration/vmstate.h" 31 #include "hw/scsi/emulation.h" 32 #include "scsi/constants.h" 33 #include "sysemu/block-backend.h" 34 #include "sysemu/blockdev.h" 35 #include "hw/block/block.h" 36 #include "hw/qdev-properties.h" 37 #include "hw/qdev-properties-system.h" 38 #include "sysemu/dma.h" 39 #include "sysemu/sysemu.h" 40 #include "qemu/cutils.h" 41 #include "trace.h" 42 #include "qom/object.h" 43 44 #ifdef __linux 45 #include <scsi/sg.h> 46 #endif 47 48 #define SCSI_WRITE_SAME_MAX (512 * KiB) 49 #define SCSI_DMA_BUF_SIZE (128 * KiB) 50 #define SCSI_MAX_INQUIRY_LEN 256 51 #define SCSI_MAX_MODE_LEN 256 52 53 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 54 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 55 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 56 57 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 58 59 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE) 60 61 struct SCSIDiskClass { 62 SCSIDeviceClass parent_class; 63 DMAIOFunc *dma_readv; 64 DMAIOFunc *dma_writev; 65 bool (*need_fua_emulation)(SCSICommand *cmd); 66 void (*update_sense)(SCSIRequest *r); 67 }; 68 69 typedef struct SCSIDiskReq { 70 SCSIRequest req; 71 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */ 72 uint64_t sector; 73 uint32_t sector_count; 74 uint32_t buflen; 75 bool started; 76 bool need_fua_emulation; 77 struct iovec iov; 78 QEMUIOVector qiov; 79 BlockAcctCookie acct; 80 } SCSIDiskReq; 81 82 #define SCSI_DISK_F_REMOVABLE 0 83 #define SCSI_DISK_F_DPOFUA 1 84 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 85 86 struct SCSIDiskState { 87 SCSIDevice qdev; 88 uint32_t features; 89 bool media_changed; 90 bool media_event; 91 bool eject_request; 92 uint16_t port_index; 93 uint64_t max_unmap_size; 94 uint64_t max_io_size; 95 QEMUBH *bh; 96 char *version; 97 char *serial; 98 char *vendor; 99 char *product; 100 char *device_id; 101 bool tray_open; 102 bool tray_locked; 103 /* 104 * 0x0000 - rotation rate not reported 105 * 0x0001 - non-rotating medium (SSD) 106 * 0x0002-0x0400 - reserved 107 * 0x0401-0xffe - rotations per minute 108 * 0xffff - reserved 109 */ 110 uint16_t rotation_rate; 111 }; 112 113 static void scsi_free_request(SCSIRequest *req) 114 { 115 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 116 117 qemu_vfree(r->iov.iov_base); 118 } 119 120 /* Helper function for command completion with sense. */ 121 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 122 { 123 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, 124 sense.ascq); 125 scsi_req_build_sense(&r->req, sense); 126 scsi_req_complete(&r->req, CHECK_CONDITION); 127 } 128 129 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 130 { 131 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 132 133 if (!r->iov.iov_base) { 134 r->buflen = size; 135 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 136 } 137 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen); 138 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 139 } 140 141 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 142 { 143 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 144 145 qemu_put_be64s(f, &r->sector); 146 qemu_put_be32s(f, &r->sector_count); 147 qemu_put_be32s(f, &r->buflen); 148 if (r->buflen) { 149 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 150 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 151 } else if (!req->retry) { 152 uint32_t len = r->iov.iov_len; 153 qemu_put_be32s(f, &len); 154 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 155 } 156 } 157 } 158 159 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 160 { 161 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 162 163 qemu_get_be64s(f, &r->sector); 164 qemu_get_be32s(f, &r->sector_count); 165 qemu_get_be32s(f, &r->buflen); 166 if (r->buflen) { 167 scsi_init_iovec(r, r->buflen); 168 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 169 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 170 } else if (!r->req.retry) { 171 uint32_t len; 172 qemu_get_be32s(f, &len); 173 r->iov.iov_len = len; 174 assert(r->iov.iov_len <= r->buflen); 175 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 176 } 177 } 178 179 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 180 } 181 182 /* 183 * scsi_handle_rw_error has two return values. False means that the error 184 * must be ignored, true means that the error has been processed and the 185 * caller should not do anything else for this request. Note that 186 * scsi_handle_rw_error always manages its reference counts, independent 187 * of the return value. 188 */ 189 static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed) 190 { 191 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 192 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 193 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 194 SCSISense sense = SENSE_CODE(NO_SENSE); 195 int error = 0; 196 bool req_has_sense = false; 197 BlockErrorAction action; 198 int status; 199 200 if (ret < 0) { 201 status = scsi_sense_from_errno(-ret, &sense); 202 error = -ret; 203 } else { 204 /* A passthrough command has completed with nonzero status. */ 205 status = ret; 206 if (status == CHECK_CONDITION) { 207 req_has_sense = true; 208 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 209 } else { 210 error = EINVAL; 211 } 212 } 213 214 /* 215 * Check whether the error has to be handled by the guest or should 216 * rather follow the rerror=/werror= settings. Guest-handled errors 217 * are usually retried immediately, so do not post them to QMP and 218 * do not account them as failed I/O. 219 */ 220 if (req_has_sense && 221 scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { 222 action = BLOCK_ERROR_ACTION_REPORT; 223 acct_failed = false; 224 } else { 225 action = blk_get_error_action(s->qdev.conf.blk, is_read, error); 226 blk_error_action(s->qdev.conf.blk, action, is_read, error); 227 } 228 229 switch (action) { 230 case BLOCK_ERROR_ACTION_REPORT: 231 if (acct_failed) { 232 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 233 } 234 if (req_has_sense) { 235 sdc->update_sense(&r->req); 236 } else if (status == CHECK_CONDITION) { 237 scsi_req_build_sense(&r->req, sense); 238 } 239 scsi_req_complete(&r->req, status); 240 return true; 241 242 case BLOCK_ERROR_ACTION_IGNORE: 243 return false; 244 245 case BLOCK_ERROR_ACTION_STOP: 246 scsi_req_retry(&r->req); 247 return true; 248 249 default: 250 g_assert_not_reached(); 251 } 252 } 253 254 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 255 { 256 if (r->req.io_canceled) { 257 scsi_req_cancel_complete(&r->req); 258 return true; 259 } 260 261 if (ret < 0) { 262 return scsi_handle_rw_error(r, ret, acct_failed); 263 } 264 265 return false; 266 } 267 268 static void scsi_aio_complete(void *opaque, int ret) 269 { 270 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 271 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 272 273 assert(r->req.aiocb != NULL); 274 r->req.aiocb = NULL; 275 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 276 if (scsi_disk_req_check_error(r, ret, true)) { 277 goto done; 278 } 279 280 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 281 scsi_req_complete(&r->req, GOOD); 282 283 done: 284 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 285 scsi_req_unref(&r->req); 286 } 287 288 static bool scsi_is_cmd_fua(SCSICommand *cmd) 289 { 290 switch (cmd->buf[0]) { 291 case READ_10: 292 case READ_12: 293 case READ_16: 294 case WRITE_10: 295 case WRITE_12: 296 case WRITE_16: 297 return (cmd->buf[1] & 8) != 0; 298 299 case VERIFY_10: 300 case VERIFY_12: 301 case VERIFY_16: 302 case WRITE_VERIFY_10: 303 case WRITE_VERIFY_12: 304 case WRITE_VERIFY_16: 305 return true; 306 307 case READ_6: 308 case WRITE_6: 309 default: 310 return false; 311 } 312 } 313 314 static void scsi_write_do_fua(SCSIDiskReq *r) 315 { 316 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 317 318 assert(r->req.aiocb == NULL); 319 assert(!r->req.io_canceled); 320 321 if (r->need_fua_emulation) { 322 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 323 BLOCK_ACCT_FLUSH); 324 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 325 return; 326 } 327 328 scsi_req_complete(&r->req, GOOD); 329 scsi_req_unref(&r->req); 330 } 331 332 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 333 { 334 assert(r->req.aiocb == NULL); 335 if (scsi_disk_req_check_error(r, ret, false)) { 336 goto done; 337 } 338 339 r->sector += r->sector_count; 340 r->sector_count = 0; 341 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 342 scsi_write_do_fua(r); 343 return; 344 } else { 345 scsi_req_complete(&r->req, GOOD); 346 } 347 348 done: 349 scsi_req_unref(&r->req); 350 } 351 352 static void scsi_dma_complete(void *opaque, int ret) 353 { 354 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 355 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 356 357 assert(r->req.aiocb != NULL); 358 r->req.aiocb = NULL; 359 360 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 361 if (ret < 0) { 362 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 363 } else { 364 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 365 } 366 scsi_dma_complete_noio(r, ret); 367 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 368 } 369 370 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) 371 { 372 uint32_t n; 373 374 assert(r->req.aiocb == NULL); 375 if (scsi_disk_req_check_error(r, ret, false)) { 376 goto done; 377 } 378 379 n = r->qiov.size / BDRV_SECTOR_SIZE; 380 r->sector += n; 381 r->sector_count -= n; 382 scsi_req_data(&r->req, r->qiov.size); 383 384 done: 385 scsi_req_unref(&r->req); 386 } 387 388 static void scsi_read_complete(void *opaque, int ret) 389 { 390 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 391 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 392 393 assert(r->req.aiocb != NULL); 394 r->req.aiocb = NULL; 395 396 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 397 if (ret < 0) { 398 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 399 } else { 400 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 401 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); 402 } 403 scsi_read_complete_noio(r, ret); 404 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 405 } 406 407 /* Actually issue a read to the block device. */ 408 static void scsi_do_read(SCSIDiskReq *r, int ret) 409 { 410 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 411 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 412 413 assert (r->req.aiocb == NULL); 414 if (scsi_disk_req_check_error(r, ret, false)) { 415 goto done; 416 } 417 418 /* The request is used as the AIO opaque value, so add a ref. */ 419 scsi_req_ref(&r->req); 420 421 if (r->req.sg) { 422 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 423 r->req.residual -= r->req.sg->size; 424 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 425 r->req.sg, r->sector << BDRV_SECTOR_BITS, 426 BDRV_SECTOR_SIZE, 427 sdc->dma_readv, r, scsi_dma_complete, r, 428 DMA_DIRECTION_FROM_DEVICE); 429 } else { 430 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 431 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 432 r->qiov.size, BLOCK_ACCT_READ); 433 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 434 scsi_read_complete, r, r); 435 } 436 437 done: 438 scsi_req_unref(&r->req); 439 } 440 441 static void scsi_do_read_cb(void *opaque, int ret) 442 { 443 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 444 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 445 446 assert (r->req.aiocb != NULL); 447 r->req.aiocb = NULL; 448 449 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 450 if (ret < 0) { 451 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 452 } else { 453 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 454 } 455 scsi_do_read(opaque, ret); 456 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 457 } 458 459 /* Read more data from scsi device into buffer. */ 460 static void scsi_read_data(SCSIRequest *req) 461 { 462 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 463 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 464 bool first; 465 466 trace_scsi_disk_read_data_count(r->sector_count); 467 if (r->sector_count == 0) { 468 /* This also clears the sense buffer for REQUEST SENSE. */ 469 scsi_req_complete(&r->req, GOOD); 470 return; 471 } 472 473 /* No data transfer may already be in progress */ 474 assert(r->req.aiocb == NULL); 475 476 /* The request is used as the AIO opaque value, so add a ref. */ 477 scsi_req_ref(&r->req); 478 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 479 trace_scsi_disk_read_data_invalid(); 480 scsi_read_complete_noio(r, -EINVAL); 481 return; 482 } 483 484 if (!blk_is_available(req->dev->conf.blk)) { 485 scsi_read_complete_noio(r, -ENOMEDIUM); 486 return; 487 } 488 489 first = !r->started; 490 r->started = true; 491 if (first && r->need_fua_emulation) { 492 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 493 BLOCK_ACCT_FLUSH); 494 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 495 } else { 496 scsi_do_read(r, 0); 497 } 498 } 499 500 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 501 { 502 uint32_t n; 503 504 assert (r->req.aiocb == NULL); 505 if (scsi_disk_req_check_error(r, ret, false)) { 506 goto done; 507 } 508 509 n = r->qiov.size / BDRV_SECTOR_SIZE; 510 r->sector += n; 511 r->sector_count -= n; 512 if (r->sector_count == 0) { 513 scsi_write_do_fua(r); 514 return; 515 } else { 516 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 517 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); 518 scsi_req_data(&r->req, r->qiov.size); 519 } 520 521 done: 522 scsi_req_unref(&r->req); 523 } 524 525 static void scsi_write_complete(void * opaque, int ret) 526 { 527 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 528 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 529 530 assert (r->req.aiocb != NULL); 531 r->req.aiocb = NULL; 532 533 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 534 if (ret < 0) { 535 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 536 } else { 537 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 538 } 539 scsi_write_complete_noio(r, ret); 540 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 541 } 542 543 static void scsi_write_data(SCSIRequest *req) 544 { 545 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 546 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 547 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 548 549 /* No data transfer may already be in progress */ 550 assert(r->req.aiocb == NULL); 551 552 /* The request is used as the AIO opaque value, so add a ref. */ 553 scsi_req_ref(&r->req); 554 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 555 trace_scsi_disk_write_data_invalid(); 556 scsi_write_complete_noio(r, -EINVAL); 557 return; 558 } 559 560 if (!r->req.sg && !r->qiov.size) { 561 /* Called for the first time. Ask the driver to send us more data. */ 562 r->started = true; 563 scsi_write_complete_noio(r, 0); 564 return; 565 } 566 if (!blk_is_available(req->dev->conf.blk)) { 567 scsi_write_complete_noio(r, -ENOMEDIUM); 568 return; 569 } 570 571 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 572 r->req.cmd.buf[0] == VERIFY_16) { 573 if (r->req.sg) { 574 scsi_dma_complete_noio(r, 0); 575 } else { 576 scsi_write_complete_noio(r, 0); 577 } 578 return; 579 } 580 581 if (r->req.sg) { 582 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 583 r->req.residual -= r->req.sg->size; 584 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 585 r->req.sg, r->sector << BDRV_SECTOR_BITS, 586 BDRV_SECTOR_SIZE, 587 sdc->dma_writev, r, scsi_dma_complete, r, 588 DMA_DIRECTION_TO_DEVICE); 589 } else { 590 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 591 r->qiov.size, BLOCK_ACCT_WRITE); 592 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 593 scsi_write_complete, r, r); 594 } 595 } 596 597 /* Return a pointer to the data buffer. */ 598 static uint8_t *scsi_get_buf(SCSIRequest *req) 599 { 600 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 601 602 return (uint8_t *)r->iov.iov_base; 603 } 604 605 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 606 { 607 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 608 uint8_t page_code = req->cmd.buf[2]; 609 int start, buflen = 0; 610 611 outbuf[buflen++] = s->qdev.type & 0x1f; 612 outbuf[buflen++] = page_code; 613 outbuf[buflen++] = 0x00; 614 outbuf[buflen++] = 0x00; 615 start = buflen; 616 617 switch (page_code) { 618 case 0x00: /* Supported page codes, mandatory */ 619 { 620 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); 621 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 622 if (s->serial) { 623 outbuf[buflen++] = 0x80; /* unit serial number */ 624 } 625 outbuf[buflen++] = 0x83; /* device identification */ 626 if (s->qdev.type == TYPE_DISK) { 627 outbuf[buflen++] = 0xb0; /* block limits */ 628 outbuf[buflen++] = 0xb1; /* block device characteristics */ 629 outbuf[buflen++] = 0xb2; /* thin provisioning */ 630 } 631 break; 632 } 633 case 0x80: /* Device serial number, optional */ 634 { 635 int l; 636 637 if (!s->serial) { 638 trace_scsi_disk_emulate_vpd_page_80_not_supported(); 639 return -1; 640 } 641 642 l = strlen(s->serial); 643 if (l > 36) { 644 l = 36; 645 } 646 647 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); 648 memcpy(outbuf + buflen, s->serial, l); 649 buflen += l; 650 break; 651 } 652 653 case 0x83: /* Device identification page, mandatory */ 654 { 655 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; 656 657 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); 658 659 if (id_len) { 660 outbuf[buflen++] = 0x2; /* ASCII */ 661 outbuf[buflen++] = 0; /* not officially assigned */ 662 outbuf[buflen++] = 0; /* reserved */ 663 outbuf[buflen++] = id_len; /* length of data following */ 664 memcpy(outbuf + buflen, s->device_id, id_len); 665 buflen += id_len; 666 } 667 668 if (s->qdev.wwn) { 669 outbuf[buflen++] = 0x1; /* Binary */ 670 outbuf[buflen++] = 0x3; /* NAA */ 671 outbuf[buflen++] = 0; /* reserved */ 672 outbuf[buflen++] = 8; 673 stq_be_p(&outbuf[buflen], s->qdev.wwn); 674 buflen += 8; 675 } 676 677 if (s->qdev.port_wwn) { 678 outbuf[buflen++] = 0x61; /* SAS / Binary */ 679 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 680 outbuf[buflen++] = 0; /* reserved */ 681 outbuf[buflen++] = 8; 682 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 683 buflen += 8; 684 } 685 686 if (s->port_index) { 687 outbuf[buflen++] = 0x61; /* SAS / Binary */ 688 689 /* PIV/Target port/relative target port */ 690 outbuf[buflen++] = 0x94; 691 692 outbuf[buflen++] = 0; /* reserved */ 693 outbuf[buflen++] = 4; 694 stw_be_p(&outbuf[buflen + 2], s->port_index); 695 buflen += 4; 696 } 697 break; 698 } 699 case 0xb0: /* block limits */ 700 { 701 SCSIBlockLimits bl = {}; 702 703 if (s->qdev.type == TYPE_ROM) { 704 trace_scsi_disk_emulate_vpd_page_b0_not_supported(); 705 return -1; 706 } 707 bl.wsnz = 1; 708 bl.unmap_sectors = 709 s->qdev.conf.discard_granularity / s->qdev.blocksize; 710 bl.min_io_size = 711 s->qdev.conf.min_io_size / s->qdev.blocksize; 712 bl.opt_io_size = 713 s->qdev.conf.opt_io_size / s->qdev.blocksize; 714 bl.max_unmap_sectors = 715 s->max_unmap_size / s->qdev.blocksize; 716 bl.max_io_sectors = 717 s->max_io_size / s->qdev.blocksize; 718 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 719 bl.max_unmap_descr = 255; 720 721 if (s->qdev.type == TYPE_DISK) { 722 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 723 int max_io_sectors_blk = 724 max_transfer_blk / s->qdev.blocksize; 725 726 bl.max_io_sectors = 727 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 728 } 729 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 730 break; 731 } 732 case 0xb1: /* block device characteristics */ 733 { 734 buflen = 0x40; 735 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 736 outbuf[5] = s->rotation_rate & 0xff; 737 outbuf[6] = 0; /* PRODUCT TYPE */ 738 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 739 outbuf[8] = 0; /* VBULS */ 740 break; 741 } 742 case 0xb2: /* thin provisioning */ 743 { 744 buflen = 8; 745 outbuf[4] = 0; 746 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 747 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 748 outbuf[7] = 0; 749 break; 750 } 751 default: 752 return -1; 753 } 754 /* done with EVPD */ 755 assert(buflen - start <= 255); 756 outbuf[start - 1] = buflen - start; 757 return buflen; 758 } 759 760 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 761 { 762 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 763 int buflen = 0; 764 765 if (req->cmd.buf[1] & 0x1) { 766 /* Vital product data */ 767 return scsi_disk_emulate_vpd_page(req, outbuf); 768 } 769 770 /* Standard INQUIRY data */ 771 if (req->cmd.buf[2] != 0) { 772 return -1; 773 } 774 775 /* PAGE CODE == 0 */ 776 buflen = req->cmd.xfer; 777 if (buflen > SCSI_MAX_INQUIRY_LEN) { 778 buflen = SCSI_MAX_INQUIRY_LEN; 779 } 780 781 outbuf[0] = s->qdev.type & 0x1f; 782 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 783 784 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 785 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 786 787 memset(&outbuf[32], 0, 4); 788 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 789 /* 790 * We claim conformance to SPC-3, which is required for guests 791 * to ask for modern features like READ CAPACITY(16) or the 792 * block characteristics VPD page by default. Not all of SPC-3 793 * is actually implemented, but we're good enough. 794 */ 795 outbuf[2] = s->qdev.default_scsi_version; 796 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 797 798 if (buflen > 36) { 799 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 800 } else { 801 /* If the allocation length of CDB is too small, 802 the additional length is not adjusted */ 803 outbuf[4] = 36 - 5; 804 } 805 806 /* Sync data transfer and TCQ. */ 807 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 808 return buflen; 809 } 810 811 static inline bool media_is_dvd(SCSIDiskState *s) 812 { 813 uint64_t nb_sectors; 814 if (s->qdev.type != TYPE_ROM) { 815 return false; 816 } 817 if (!blk_is_available(s->qdev.conf.blk)) { 818 return false; 819 } 820 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 821 return nb_sectors > CD_MAX_SECTORS; 822 } 823 824 static inline bool media_is_cd(SCSIDiskState *s) 825 { 826 uint64_t nb_sectors; 827 if (s->qdev.type != TYPE_ROM) { 828 return false; 829 } 830 if (!blk_is_available(s->qdev.conf.blk)) { 831 return false; 832 } 833 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 834 return nb_sectors <= CD_MAX_SECTORS; 835 } 836 837 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 838 uint8_t *outbuf) 839 { 840 uint8_t type = r->req.cmd.buf[1] & 7; 841 842 if (s->qdev.type != TYPE_ROM) { 843 return -1; 844 } 845 846 /* Types 1/2 are only defined for Blu-Ray. */ 847 if (type != 0) { 848 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 849 return -1; 850 } 851 852 memset(outbuf, 0, 34); 853 outbuf[1] = 32; 854 outbuf[2] = 0xe; /* last session complete, disc finalized */ 855 outbuf[3] = 1; /* first track on disc */ 856 outbuf[4] = 1; /* # of sessions */ 857 outbuf[5] = 1; /* first track of last session */ 858 outbuf[6] = 1; /* last track of last session */ 859 outbuf[7] = 0x20; /* unrestricted use */ 860 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 861 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 862 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 863 /* 24-31: disc bar code */ 864 /* 32: disc application code */ 865 /* 33: number of OPC tables */ 866 867 return 34; 868 } 869 870 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 871 uint8_t *outbuf) 872 { 873 static const int rds_caps_size[5] = { 874 [0] = 2048 + 4, 875 [1] = 4 + 4, 876 [3] = 188 + 4, 877 [4] = 2048 + 4, 878 }; 879 880 uint8_t media = r->req.cmd.buf[1]; 881 uint8_t layer = r->req.cmd.buf[6]; 882 uint8_t format = r->req.cmd.buf[7]; 883 int size = -1; 884 885 if (s->qdev.type != TYPE_ROM) { 886 return -1; 887 } 888 if (media != 0) { 889 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 890 return -1; 891 } 892 893 if (format != 0xff) { 894 if (!blk_is_available(s->qdev.conf.blk)) { 895 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 896 return -1; 897 } 898 if (media_is_cd(s)) { 899 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 900 return -1; 901 } 902 if (format >= ARRAY_SIZE(rds_caps_size)) { 903 return -1; 904 } 905 size = rds_caps_size[format]; 906 memset(outbuf, 0, size); 907 } 908 909 switch (format) { 910 case 0x00: { 911 /* Physical format information */ 912 uint64_t nb_sectors; 913 if (layer != 0) { 914 goto fail; 915 } 916 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 917 918 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 919 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 920 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 921 outbuf[7] = 0; /* default densities */ 922 923 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 924 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 925 break; 926 } 927 928 case 0x01: /* DVD copyright information, all zeros */ 929 break; 930 931 case 0x03: /* BCA information - invalid field for no BCA info */ 932 return -1; 933 934 case 0x04: /* DVD disc manufacturing information, all zeros */ 935 break; 936 937 case 0xff: { /* List capabilities */ 938 int i; 939 size = 4; 940 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 941 if (!rds_caps_size[i]) { 942 continue; 943 } 944 outbuf[size] = i; 945 outbuf[size + 1] = 0x40; /* Not writable, readable */ 946 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 947 size += 4; 948 } 949 break; 950 } 951 952 default: 953 return -1; 954 } 955 956 /* Size of buffer, not including 2 byte size field */ 957 stw_be_p(outbuf, size - 2); 958 return size; 959 960 fail: 961 return -1; 962 } 963 964 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 965 { 966 uint8_t event_code, media_status; 967 968 media_status = 0; 969 if (s->tray_open) { 970 media_status = MS_TRAY_OPEN; 971 } else if (blk_is_inserted(s->qdev.conf.blk)) { 972 media_status = MS_MEDIA_PRESENT; 973 } 974 975 /* Event notification descriptor */ 976 event_code = MEC_NO_CHANGE; 977 if (media_status != MS_TRAY_OPEN) { 978 if (s->media_event) { 979 event_code = MEC_NEW_MEDIA; 980 s->media_event = false; 981 } else if (s->eject_request) { 982 event_code = MEC_EJECT_REQUESTED; 983 s->eject_request = false; 984 } 985 } 986 987 outbuf[0] = event_code; 988 outbuf[1] = media_status; 989 990 /* These fields are reserved, just clear them. */ 991 outbuf[2] = 0; 992 outbuf[3] = 0; 993 return 4; 994 } 995 996 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 997 uint8_t *outbuf) 998 { 999 int size; 1000 uint8_t *buf = r->req.cmd.buf; 1001 uint8_t notification_class_request = buf[4]; 1002 if (s->qdev.type != TYPE_ROM) { 1003 return -1; 1004 } 1005 if ((buf[1] & 1) == 0) { 1006 /* asynchronous */ 1007 return -1; 1008 } 1009 1010 size = 4; 1011 outbuf[0] = outbuf[1] = 0; 1012 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1013 if (notification_class_request & (1 << GESN_MEDIA)) { 1014 outbuf[2] = GESN_MEDIA; 1015 size += scsi_event_status_media(s, &outbuf[size]); 1016 } else { 1017 outbuf[2] = 0x80; 1018 } 1019 stw_be_p(outbuf, size - 4); 1020 return size; 1021 } 1022 1023 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1024 { 1025 int current; 1026 1027 if (s->qdev.type != TYPE_ROM) { 1028 return -1; 1029 } 1030 1031 if (media_is_dvd(s)) { 1032 current = MMC_PROFILE_DVD_ROM; 1033 } else if (media_is_cd(s)) { 1034 current = MMC_PROFILE_CD_ROM; 1035 } else { 1036 current = MMC_PROFILE_NONE; 1037 } 1038 1039 memset(outbuf, 0, 40); 1040 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1041 stw_be_p(&outbuf[6], current); 1042 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1043 outbuf[10] = 0x03; /* persistent, current */ 1044 outbuf[11] = 8; /* two profiles */ 1045 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1046 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1047 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1048 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1049 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1050 stw_be_p(&outbuf[20], 1); 1051 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1052 outbuf[23] = 8; 1053 stl_be_p(&outbuf[24], 1); /* SCSI */ 1054 outbuf[28] = 1; /* DBE = 1, mandatory */ 1055 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1056 stw_be_p(&outbuf[32], 3); 1057 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1058 outbuf[35] = 4; 1059 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1060 /* TODO: Random readable, CD read, DVD read, drive serial number, 1061 power management */ 1062 return 40; 1063 } 1064 1065 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1066 { 1067 if (s->qdev.type != TYPE_ROM) { 1068 return -1; 1069 } 1070 memset(outbuf, 0, 8); 1071 outbuf[5] = 1; /* CD-ROM */ 1072 return 8; 1073 } 1074 1075 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1076 int page_control) 1077 { 1078 static const int mode_sense_valid[0x3f] = { 1079 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1080 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1081 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1082 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1083 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1084 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1085 }; 1086 1087 uint8_t *p = *p_outbuf + 2; 1088 int length; 1089 1090 assert(page < ARRAY_SIZE(mode_sense_valid)); 1091 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1092 return -1; 1093 } 1094 1095 /* 1096 * If Changeable Values are requested, a mask denoting those mode parameters 1097 * that are changeable shall be returned. As we currently don't support 1098 * parameter changes via MODE_SELECT all bits are returned set to zero. 1099 * The buffer was already menset to zero by the caller of this function. 1100 * 1101 * The offsets here are off by two compared to the descriptions in the 1102 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1103 * but it is done so that offsets are consistent within our implementation 1104 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1105 * 2-byte and 4-byte headers. 1106 */ 1107 switch (page) { 1108 case MODE_PAGE_HD_GEOMETRY: 1109 length = 0x16; 1110 if (page_control == 1) { /* Changeable Values */ 1111 break; 1112 } 1113 /* if a geometry hint is available, use it */ 1114 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1115 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1116 p[2] = s->qdev.conf.cyls & 0xff; 1117 p[3] = s->qdev.conf.heads & 0xff; 1118 /* Write precomp start cylinder, disabled */ 1119 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1120 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1121 p[6] = s->qdev.conf.cyls & 0xff; 1122 /* Reduced current start cylinder, disabled */ 1123 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1124 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1125 p[9] = s->qdev.conf.cyls & 0xff; 1126 /* Device step rate [ns], 200ns */ 1127 p[10] = 0; 1128 p[11] = 200; 1129 /* Landing zone cylinder */ 1130 p[12] = 0xff; 1131 p[13] = 0xff; 1132 p[14] = 0xff; 1133 /* Medium rotation rate [rpm], 5400 rpm */ 1134 p[18] = (5400 >> 8) & 0xff; 1135 p[19] = 5400 & 0xff; 1136 break; 1137 1138 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1139 length = 0x1e; 1140 if (page_control == 1) { /* Changeable Values */ 1141 break; 1142 } 1143 /* Transfer rate [kbit/s], 5Mbit/s */ 1144 p[0] = 5000 >> 8; 1145 p[1] = 5000 & 0xff; 1146 /* if a geometry hint is available, use it */ 1147 p[2] = s->qdev.conf.heads & 0xff; 1148 p[3] = s->qdev.conf.secs & 0xff; 1149 p[4] = s->qdev.blocksize >> 8; 1150 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1151 p[7] = s->qdev.conf.cyls & 0xff; 1152 /* Write precomp start cylinder, disabled */ 1153 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1154 p[9] = s->qdev.conf.cyls & 0xff; 1155 /* Reduced current start cylinder, disabled */ 1156 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1157 p[11] = s->qdev.conf.cyls & 0xff; 1158 /* Device step rate [100us], 100us */ 1159 p[12] = 0; 1160 p[13] = 1; 1161 /* Device step pulse width [us], 1us */ 1162 p[14] = 1; 1163 /* Device head settle delay [100us], 100us */ 1164 p[15] = 0; 1165 p[16] = 1; 1166 /* Motor on delay [0.1s], 0.1s */ 1167 p[17] = 1; 1168 /* Motor off delay [0.1s], 0.1s */ 1169 p[18] = 1; 1170 /* Medium rotation rate [rpm], 5400 rpm */ 1171 p[26] = (5400 >> 8) & 0xff; 1172 p[27] = 5400 & 0xff; 1173 break; 1174 1175 case MODE_PAGE_CACHING: 1176 length = 0x12; 1177 if (page_control == 1 || /* Changeable Values */ 1178 blk_enable_write_cache(s->qdev.conf.blk)) { 1179 p[0] = 4; /* WCE */ 1180 } 1181 break; 1182 1183 case MODE_PAGE_R_W_ERROR: 1184 length = 10; 1185 if (page_control == 1) { /* Changeable Values */ 1186 break; 1187 } 1188 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1189 if (s->qdev.type == TYPE_ROM) { 1190 p[1] = 0x20; /* Read Retry Count */ 1191 } 1192 break; 1193 1194 case MODE_PAGE_AUDIO_CTL: 1195 length = 14; 1196 break; 1197 1198 case MODE_PAGE_CAPABILITIES: 1199 length = 0x14; 1200 if (page_control == 1) { /* Changeable Values */ 1201 break; 1202 } 1203 1204 p[0] = 0x3b; /* CD-R & CD-RW read */ 1205 p[1] = 0; /* Writing not supported */ 1206 p[2] = 0x7f; /* Audio, composite, digital out, 1207 mode 2 form 1&2, multi session */ 1208 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1209 RW corrected, C2 errors, ISRC, 1210 UPC, Bar code */ 1211 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1212 /* Locking supported, jumper present, eject, tray */ 1213 p[5] = 0; /* no volume & mute control, no 1214 changer */ 1215 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1216 p[7] = (50 * 176) & 0xff; 1217 p[8] = 2 >> 8; /* Two volume levels */ 1218 p[9] = 2 & 0xff; 1219 p[10] = 2048 >> 8; /* 2M buffer */ 1220 p[11] = 2048 & 0xff; 1221 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1222 p[13] = (16 * 176) & 0xff; 1223 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1224 p[17] = (16 * 176) & 0xff; 1225 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1226 p[19] = (16 * 176) & 0xff; 1227 break; 1228 1229 default: 1230 return -1; 1231 } 1232 1233 assert(length < 256); 1234 (*p_outbuf)[0] = page; 1235 (*p_outbuf)[1] = length; 1236 *p_outbuf += length + 2; 1237 return length + 2; 1238 } 1239 1240 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1241 { 1242 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1243 uint64_t nb_sectors; 1244 bool dbd; 1245 int page, buflen, ret, page_control; 1246 uint8_t *p; 1247 uint8_t dev_specific_param; 1248 1249 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1250 page = r->req.cmd.buf[2] & 0x3f; 1251 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1252 1253 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 1254 10, page, r->req.cmd.xfer, page_control); 1255 memset(outbuf, 0, r->req.cmd.xfer); 1256 p = outbuf; 1257 1258 if (s->qdev.type == TYPE_DISK) { 1259 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1260 if (!blk_is_writable(s->qdev.conf.blk)) { 1261 dev_specific_param |= 0x80; /* Readonly. */ 1262 } 1263 } else { 1264 /* MMC prescribes that CD/DVD drives have no block descriptors, 1265 * and defines no device-specific parameter. */ 1266 dev_specific_param = 0x00; 1267 dbd = true; 1268 } 1269 1270 if (r->req.cmd.buf[0] == MODE_SENSE) { 1271 p[1] = 0; /* Default media type. */ 1272 p[2] = dev_specific_param; 1273 p[3] = 0; /* Block descriptor length. */ 1274 p += 4; 1275 } else { /* MODE_SENSE_10 */ 1276 p[2] = 0; /* Default media type. */ 1277 p[3] = dev_specific_param; 1278 p[6] = p[7] = 0; /* Block descriptor length. */ 1279 p += 8; 1280 } 1281 1282 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1283 if (!dbd && nb_sectors) { 1284 if (r->req.cmd.buf[0] == MODE_SENSE) { 1285 outbuf[3] = 8; /* Block descriptor length */ 1286 } else { /* MODE_SENSE_10 */ 1287 outbuf[7] = 8; /* Block descriptor length */ 1288 } 1289 nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1290 if (nb_sectors > 0xffffff) { 1291 nb_sectors = 0; 1292 } 1293 p[0] = 0; /* media density code */ 1294 p[1] = (nb_sectors >> 16) & 0xff; 1295 p[2] = (nb_sectors >> 8) & 0xff; 1296 p[3] = nb_sectors & 0xff; 1297 p[4] = 0; /* reserved */ 1298 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1299 p[6] = s->qdev.blocksize >> 8; 1300 p[7] = 0; 1301 p += 8; 1302 } 1303 1304 if (page_control == 3) { 1305 /* Saved Values */ 1306 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1307 return -1; 1308 } 1309 1310 if (page == 0x3f) { 1311 for (page = 0; page <= 0x3e; page++) { 1312 mode_sense_page(s, page, &p, page_control); 1313 } 1314 } else { 1315 ret = mode_sense_page(s, page, &p, page_control); 1316 if (ret == -1) { 1317 return -1; 1318 } 1319 } 1320 1321 buflen = p - outbuf; 1322 /* 1323 * The mode data length field specifies the length in bytes of the 1324 * following data that is available to be transferred. The mode data 1325 * length does not include itself. 1326 */ 1327 if (r->req.cmd.buf[0] == MODE_SENSE) { 1328 outbuf[0] = buflen - 1; 1329 } else { /* MODE_SENSE_10 */ 1330 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1331 outbuf[1] = (buflen - 2) & 0xff; 1332 } 1333 return buflen; 1334 } 1335 1336 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1337 { 1338 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1339 int start_track, format, msf, toclen; 1340 uint64_t nb_sectors; 1341 1342 msf = req->cmd.buf[1] & 2; 1343 format = req->cmd.buf[2] & 0xf; 1344 start_track = req->cmd.buf[6]; 1345 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1346 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); 1347 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 1348 switch (format) { 1349 case 0: 1350 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1351 break; 1352 case 1: 1353 /* multi session : only a single session defined */ 1354 toclen = 12; 1355 memset(outbuf, 0, 12); 1356 outbuf[1] = 0x0a; 1357 outbuf[2] = 0x01; 1358 outbuf[3] = 0x01; 1359 break; 1360 case 2: 1361 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1362 break; 1363 default: 1364 return -1; 1365 } 1366 return toclen; 1367 } 1368 1369 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1370 { 1371 SCSIRequest *req = &r->req; 1372 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1373 bool start = req->cmd.buf[4] & 1; 1374 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1375 int pwrcnd = req->cmd.buf[4] & 0xf0; 1376 1377 if (pwrcnd) { 1378 /* eject/load only happens for power condition == 0 */ 1379 return 0; 1380 } 1381 1382 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1383 if (!start && !s->tray_open && s->tray_locked) { 1384 scsi_check_condition(r, 1385 blk_is_inserted(s->qdev.conf.blk) 1386 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1387 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1388 return -1; 1389 } 1390 1391 if (s->tray_open != !start) { 1392 blk_eject(s->qdev.conf.blk, !start); 1393 s->tray_open = !start; 1394 } 1395 } 1396 return 0; 1397 } 1398 1399 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1400 { 1401 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1402 int buflen = r->iov.iov_len; 1403 1404 if (buflen) { 1405 trace_scsi_disk_emulate_read_data(buflen); 1406 r->iov.iov_len = 0; 1407 r->started = true; 1408 scsi_req_data(&r->req, buflen); 1409 return; 1410 } 1411 1412 /* This also clears the sense buffer for REQUEST SENSE. */ 1413 scsi_req_complete(&r->req, GOOD); 1414 } 1415 1416 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1417 uint8_t *inbuf, int inlen) 1418 { 1419 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1420 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1421 uint8_t *p; 1422 int len, expected_len, changeable_len, i; 1423 1424 /* The input buffer does not include the page header, so it is 1425 * off by 2 bytes. 1426 */ 1427 expected_len = inlen + 2; 1428 if (expected_len > SCSI_MAX_MODE_LEN) { 1429 return -1; 1430 } 1431 1432 /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */ 1433 if (page == MODE_PAGE_ALLS) { 1434 return -1; 1435 } 1436 1437 p = mode_current; 1438 memset(mode_current, 0, inlen + 2); 1439 len = mode_sense_page(s, page, &p, 0); 1440 if (len < 0 || len != expected_len) { 1441 return -1; 1442 } 1443 1444 p = mode_changeable; 1445 memset(mode_changeable, 0, inlen + 2); 1446 changeable_len = mode_sense_page(s, page, &p, 1); 1447 assert(changeable_len == len); 1448 1449 /* Check that unchangeable bits are the same as what MODE SENSE 1450 * would return. 1451 */ 1452 for (i = 2; i < len; i++) { 1453 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1454 return -1; 1455 } 1456 } 1457 return 0; 1458 } 1459 1460 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1461 { 1462 switch (page) { 1463 case MODE_PAGE_CACHING: 1464 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1465 break; 1466 1467 default: 1468 break; 1469 } 1470 } 1471 1472 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1473 { 1474 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1475 1476 while (len > 0) { 1477 int page, subpage, page_len; 1478 1479 /* Parse both possible formats for the mode page headers. */ 1480 page = p[0] & 0x3f; 1481 if (p[0] & 0x40) { 1482 if (len < 4) { 1483 goto invalid_param_len; 1484 } 1485 subpage = p[1]; 1486 page_len = lduw_be_p(&p[2]); 1487 p += 4; 1488 len -= 4; 1489 } else { 1490 if (len < 2) { 1491 goto invalid_param_len; 1492 } 1493 subpage = 0; 1494 page_len = p[1]; 1495 p += 2; 1496 len -= 2; 1497 } 1498 1499 if (subpage) { 1500 goto invalid_param; 1501 } 1502 if (page_len > len) { 1503 goto invalid_param_len; 1504 } 1505 1506 if (!change) { 1507 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1508 goto invalid_param; 1509 } 1510 } else { 1511 scsi_disk_apply_mode_select(s, page, p); 1512 } 1513 1514 p += page_len; 1515 len -= page_len; 1516 } 1517 return 0; 1518 1519 invalid_param: 1520 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1521 return -1; 1522 1523 invalid_param_len: 1524 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1525 return -1; 1526 } 1527 1528 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1529 { 1530 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1531 uint8_t *p = inbuf; 1532 int cmd = r->req.cmd.buf[0]; 1533 int len = r->req.cmd.xfer; 1534 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1535 int bd_len; 1536 int pass; 1537 1538 /* We only support PF=1, SP=0. */ 1539 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1540 goto invalid_field; 1541 } 1542 1543 if (len < hdr_len) { 1544 goto invalid_param_len; 1545 } 1546 1547 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1548 len -= hdr_len; 1549 p += hdr_len; 1550 if (len < bd_len) { 1551 goto invalid_param_len; 1552 } 1553 if (bd_len != 0 && bd_len != 8) { 1554 goto invalid_param; 1555 } 1556 1557 len -= bd_len; 1558 p += bd_len; 1559 1560 /* Ensure no change is made if there is an error! */ 1561 for (pass = 0; pass < 2; pass++) { 1562 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1563 assert(pass == 0); 1564 return; 1565 } 1566 } 1567 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1568 /* The request is used as the AIO opaque value, so add a ref. */ 1569 scsi_req_ref(&r->req); 1570 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1571 BLOCK_ACCT_FLUSH); 1572 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1573 return; 1574 } 1575 1576 scsi_req_complete(&r->req, GOOD); 1577 return; 1578 1579 invalid_param: 1580 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1581 return; 1582 1583 invalid_param_len: 1584 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1585 return; 1586 1587 invalid_field: 1588 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1589 } 1590 1591 /* sector_num and nb_sectors expected to be in qdev blocksize */ 1592 static inline bool check_lba_range(SCSIDiskState *s, 1593 uint64_t sector_num, uint32_t nb_sectors) 1594 { 1595 /* 1596 * The first line tests that no overflow happens when computing the last 1597 * sector. The second line tests that the last accessed sector is in 1598 * range. 1599 * 1600 * Careful, the computations should not underflow for nb_sectors == 0, 1601 * and a 0-block read to the first LBA beyond the end of device is 1602 * valid. 1603 */ 1604 return (sector_num <= sector_num + nb_sectors && 1605 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1606 } 1607 1608 typedef struct UnmapCBData { 1609 SCSIDiskReq *r; 1610 uint8_t *inbuf; 1611 int count; 1612 } UnmapCBData; 1613 1614 static void scsi_unmap_complete(void *opaque, int ret); 1615 1616 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1617 { 1618 SCSIDiskReq *r = data->r; 1619 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1620 1621 assert(r->req.aiocb == NULL); 1622 1623 if (data->count > 0) { 1624 uint64_t sector_num = ldq_be_p(&data->inbuf[0]); 1625 uint32_t nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1626 r->sector = sector_num * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1627 r->sector_count = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1628 1629 if (!check_lba_range(s, sector_num, nb_sectors)) { 1630 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), 1631 BLOCK_ACCT_UNMAP); 1632 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1633 goto done; 1634 } 1635 1636 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1637 r->sector_count * BDRV_SECTOR_SIZE, 1638 BLOCK_ACCT_UNMAP); 1639 1640 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1641 r->sector * BDRV_SECTOR_SIZE, 1642 r->sector_count * BDRV_SECTOR_SIZE, 1643 scsi_unmap_complete, data); 1644 data->count--; 1645 data->inbuf += 16; 1646 return; 1647 } 1648 1649 scsi_req_complete(&r->req, GOOD); 1650 1651 done: 1652 scsi_req_unref(&r->req); 1653 g_free(data); 1654 } 1655 1656 static void scsi_unmap_complete(void *opaque, int ret) 1657 { 1658 UnmapCBData *data = opaque; 1659 SCSIDiskReq *r = data->r; 1660 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1661 1662 assert(r->req.aiocb != NULL); 1663 r->req.aiocb = NULL; 1664 1665 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1666 if (scsi_disk_req_check_error(r, ret, true)) { 1667 scsi_req_unref(&r->req); 1668 g_free(data); 1669 } else { 1670 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1671 scsi_unmap_complete_noio(data, ret); 1672 } 1673 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1674 } 1675 1676 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1677 { 1678 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1679 uint8_t *p = inbuf; 1680 int len = r->req.cmd.xfer; 1681 UnmapCBData *data; 1682 1683 /* Reject ANCHOR=1. */ 1684 if (r->req.cmd.buf[1] & 0x1) { 1685 goto invalid_field; 1686 } 1687 1688 if (len < 8) { 1689 goto invalid_param_len; 1690 } 1691 if (len < lduw_be_p(&p[0]) + 2) { 1692 goto invalid_param_len; 1693 } 1694 if (len < lduw_be_p(&p[2]) + 8) { 1695 goto invalid_param_len; 1696 } 1697 if (lduw_be_p(&p[2]) & 15) { 1698 goto invalid_param_len; 1699 } 1700 1701 if (!blk_is_writable(s->qdev.conf.blk)) { 1702 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1703 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1704 return; 1705 } 1706 1707 data = g_new0(UnmapCBData, 1); 1708 data->r = r; 1709 data->inbuf = &p[8]; 1710 data->count = lduw_be_p(&p[2]) >> 4; 1711 1712 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1713 scsi_req_ref(&r->req); 1714 scsi_unmap_complete_noio(data, 0); 1715 return; 1716 1717 invalid_param_len: 1718 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1719 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1720 return; 1721 1722 invalid_field: 1723 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1724 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1725 } 1726 1727 typedef struct WriteSameCBData { 1728 SCSIDiskReq *r; 1729 int64_t sector; 1730 int nb_sectors; 1731 QEMUIOVector qiov; 1732 struct iovec iov; 1733 } WriteSameCBData; 1734 1735 static void scsi_write_same_complete(void *opaque, int ret) 1736 { 1737 WriteSameCBData *data = opaque; 1738 SCSIDiskReq *r = data->r; 1739 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1740 1741 assert(r->req.aiocb != NULL); 1742 r->req.aiocb = NULL; 1743 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1744 if (scsi_disk_req_check_error(r, ret, true)) { 1745 goto done; 1746 } 1747 1748 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1749 1750 data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE; 1751 data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE; 1752 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, 1753 data->iov.iov_len); 1754 if (data->iov.iov_len) { 1755 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1756 data->iov.iov_len, BLOCK_ACCT_WRITE); 1757 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1758 * where final qiov may need smaller size */ 1759 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1760 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1761 data->sector << BDRV_SECTOR_BITS, 1762 &data->qiov, 0, 1763 scsi_write_same_complete, data); 1764 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1765 return; 1766 } 1767 1768 scsi_req_complete(&r->req, GOOD); 1769 1770 done: 1771 scsi_req_unref(&r->req); 1772 qemu_vfree(data->iov.iov_base); 1773 g_free(data); 1774 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1775 } 1776 1777 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1778 { 1779 SCSIRequest *req = &r->req; 1780 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1781 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1782 WriteSameCBData *data; 1783 uint8_t *buf; 1784 int i; 1785 1786 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1787 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1788 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1789 return; 1790 } 1791 1792 if (!blk_is_writable(s->qdev.conf.blk)) { 1793 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1794 return; 1795 } 1796 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1797 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1798 return; 1799 } 1800 1801 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1802 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1803 1804 /* The request is used as the AIO opaque value, so add a ref. */ 1805 scsi_req_ref(&r->req); 1806 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1807 nb_sectors * s->qdev.blocksize, 1808 BLOCK_ACCT_WRITE); 1809 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1810 r->req.cmd.lba * s->qdev.blocksize, 1811 nb_sectors * s->qdev.blocksize, 1812 flags, scsi_aio_complete, r); 1813 return; 1814 } 1815 1816 data = g_new0(WriteSameCBData, 1); 1817 data->r = r; 1818 data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1819 data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1820 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, 1821 SCSI_WRITE_SAME_MAX); 1822 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1823 data->iov.iov_len); 1824 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1825 1826 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1827 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1828 } 1829 1830 scsi_req_ref(&r->req); 1831 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1832 data->iov.iov_len, BLOCK_ACCT_WRITE); 1833 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1834 data->sector << BDRV_SECTOR_BITS, 1835 &data->qiov, 0, 1836 scsi_write_same_complete, data); 1837 } 1838 1839 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1840 { 1841 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1842 1843 if (r->iov.iov_len) { 1844 int buflen = r->iov.iov_len; 1845 trace_scsi_disk_emulate_write_data(buflen); 1846 r->iov.iov_len = 0; 1847 scsi_req_data(&r->req, buflen); 1848 return; 1849 } 1850 1851 switch (req->cmd.buf[0]) { 1852 case MODE_SELECT: 1853 case MODE_SELECT_10: 1854 /* This also clears the sense buffer for REQUEST SENSE. */ 1855 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1856 break; 1857 1858 case UNMAP: 1859 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1860 break; 1861 1862 case VERIFY_10: 1863 case VERIFY_12: 1864 case VERIFY_16: 1865 if (r->req.status == -1) { 1866 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1867 } 1868 break; 1869 1870 case WRITE_SAME_10: 1871 case WRITE_SAME_16: 1872 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1873 break; 1874 1875 default: 1876 abort(); 1877 } 1878 } 1879 1880 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1881 { 1882 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1883 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1884 uint64_t nb_sectors; 1885 uint8_t *outbuf; 1886 int buflen; 1887 1888 switch (req->cmd.buf[0]) { 1889 case INQUIRY: 1890 case MODE_SENSE: 1891 case MODE_SENSE_10: 1892 case RESERVE: 1893 case RESERVE_10: 1894 case RELEASE: 1895 case RELEASE_10: 1896 case START_STOP: 1897 case ALLOW_MEDIUM_REMOVAL: 1898 case GET_CONFIGURATION: 1899 case GET_EVENT_STATUS_NOTIFICATION: 1900 case MECHANISM_STATUS: 1901 case REQUEST_SENSE: 1902 break; 1903 1904 default: 1905 if (!blk_is_available(s->qdev.conf.blk)) { 1906 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1907 return 0; 1908 } 1909 break; 1910 } 1911 1912 /* 1913 * FIXME: we shouldn't return anything bigger than 4k, but the code 1914 * requires the buffer to be as big as req->cmd.xfer in several 1915 * places. So, do not allow CDBs with a very large ALLOCATION 1916 * LENGTH. The real fix would be to modify scsi_read_data and 1917 * dma_buf_read, so that they return data beyond the buflen 1918 * as all zeros. 1919 */ 1920 if (req->cmd.xfer > 65536) { 1921 goto illegal_request; 1922 } 1923 r->buflen = MAX(4096, req->cmd.xfer); 1924 1925 if (!r->iov.iov_base) { 1926 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1927 } 1928 1929 outbuf = r->iov.iov_base; 1930 memset(outbuf, 0, r->buflen); 1931 switch (req->cmd.buf[0]) { 1932 case TEST_UNIT_READY: 1933 assert(blk_is_available(s->qdev.conf.blk)); 1934 break; 1935 case INQUIRY: 1936 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1937 if (buflen < 0) { 1938 goto illegal_request; 1939 } 1940 break; 1941 case MODE_SENSE: 1942 case MODE_SENSE_10: 1943 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1944 if (buflen < 0) { 1945 goto illegal_request; 1946 } 1947 break; 1948 case READ_TOC: 1949 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1950 if (buflen < 0) { 1951 goto illegal_request; 1952 } 1953 break; 1954 case RESERVE: 1955 if (req->cmd.buf[1] & 1) { 1956 goto illegal_request; 1957 } 1958 break; 1959 case RESERVE_10: 1960 if (req->cmd.buf[1] & 3) { 1961 goto illegal_request; 1962 } 1963 break; 1964 case RELEASE: 1965 if (req->cmd.buf[1] & 1) { 1966 goto illegal_request; 1967 } 1968 break; 1969 case RELEASE_10: 1970 if (req->cmd.buf[1] & 3) { 1971 goto illegal_request; 1972 } 1973 break; 1974 case START_STOP: 1975 if (scsi_disk_emulate_start_stop(r) < 0) { 1976 return 0; 1977 } 1978 break; 1979 case ALLOW_MEDIUM_REMOVAL: 1980 s->tray_locked = req->cmd.buf[4] & 1; 1981 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1982 break; 1983 case READ_CAPACITY_10: 1984 /* The normal LEN field for this command is zero. */ 1985 memset(outbuf, 0, 8); 1986 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1987 if (!nb_sectors) { 1988 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1989 return 0; 1990 } 1991 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1992 goto illegal_request; 1993 } 1994 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 1995 /* Returned value is the address of the last sector. */ 1996 nb_sectors--; 1997 /* Remember the new size for read/write sanity checking. */ 1998 s->qdev.max_lba = nb_sectors; 1999 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 2000 if (nb_sectors > UINT32_MAX) { 2001 nb_sectors = UINT32_MAX; 2002 } 2003 outbuf[0] = (nb_sectors >> 24) & 0xff; 2004 outbuf[1] = (nb_sectors >> 16) & 0xff; 2005 outbuf[2] = (nb_sectors >> 8) & 0xff; 2006 outbuf[3] = nb_sectors & 0xff; 2007 outbuf[4] = 0; 2008 outbuf[5] = 0; 2009 outbuf[6] = s->qdev.blocksize >> 8; 2010 outbuf[7] = 0; 2011 break; 2012 case REQUEST_SENSE: 2013 /* Just return "NO SENSE". */ 2014 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2015 (req->cmd.buf[1] & 1) == 0); 2016 if (buflen < 0) { 2017 goto illegal_request; 2018 } 2019 break; 2020 case MECHANISM_STATUS: 2021 buflen = scsi_emulate_mechanism_status(s, outbuf); 2022 if (buflen < 0) { 2023 goto illegal_request; 2024 } 2025 break; 2026 case GET_CONFIGURATION: 2027 buflen = scsi_get_configuration(s, outbuf); 2028 if (buflen < 0) { 2029 goto illegal_request; 2030 } 2031 break; 2032 case GET_EVENT_STATUS_NOTIFICATION: 2033 buflen = scsi_get_event_status_notification(s, r, outbuf); 2034 if (buflen < 0) { 2035 goto illegal_request; 2036 } 2037 break; 2038 case READ_DISC_INFORMATION: 2039 buflen = scsi_read_disc_information(s, r, outbuf); 2040 if (buflen < 0) { 2041 goto illegal_request; 2042 } 2043 break; 2044 case READ_DVD_STRUCTURE: 2045 buflen = scsi_read_dvd_structure(s, r, outbuf); 2046 if (buflen < 0) { 2047 goto illegal_request; 2048 } 2049 break; 2050 case SERVICE_ACTION_IN_16: 2051 /* Service Action In subcommands. */ 2052 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2053 trace_scsi_disk_emulate_command_SAI_16(); 2054 memset(outbuf, 0, req->cmd.xfer); 2055 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2056 if (!nb_sectors) { 2057 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2058 return 0; 2059 } 2060 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2061 goto illegal_request; 2062 } 2063 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2064 /* Returned value is the address of the last sector. */ 2065 nb_sectors--; 2066 /* Remember the new size for read/write sanity checking. */ 2067 s->qdev.max_lba = nb_sectors; 2068 outbuf[0] = (nb_sectors >> 56) & 0xff; 2069 outbuf[1] = (nb_sectors >> 48) & 0xff; 2070 outbuf[2] = (nb_sectors >> 40) & 0xff; 2071 outbuf[3] = (nb_sectors >> 32) & 0xff; 2072 outbuf[4] = (nb_sectors >> 24) & 0xff; 2073 outbuf[5] = (nb_sectors >> 16) & 0xff; 2074 outbuf[6] = (nb_sectors >> 8) & 0xff; 2075 outbuf[7] = nb_sectors & 0xff; 2076 outbuf[8] = 0; 2077 outbuf[9] = 0; 2078 outbuf[10] = s->qdev.blocksize >> 8; 2079 outbuf[11] = 0; 2080 outbuf[12] = 0; 2081 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2082 2083 /* set TPE bit if the format supports discard */ 2084 if (s->qdev.conf.discard_granularity) { 2085 outbuf[14] = 0x80; 2086 } 2087 2088 /* Protection, exponent and lowest lba field left blank. */ 2089 break; 2090 } 2091 trace_scsi_disk_emulate_command_SAI_unsupported(); 2092 goto illegal_request; 2093 case SYNCHRONIZE_CACHE: 2094 /* The request is used as the AIO opaque value, so add a ref. */ 2095 scsi_req_ref(&r->req); 2096 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2097 BLOCK_ACCT_FLUSH); 2098 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2099 return 0; 2100 case SEEK_10: 2101 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); 2102 if (r->req.cmd.lba > s->qdev.max_lba) { 2103 goto illegal_lba; 2104 } 2105 break; 2106 case MODE_SELECT: 2107 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); 2108 break; 2109 case MODE_SELECT_10: 2110 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 2111 break; 2112 case UNMAP: 2113 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); 2114 break; 2115 case VERIFY_10: 2116 case VERIFY_12: 2117 case VERIFY_16: 2118 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 2119 if (req->cmd.buf[1] & 6) { 2120 goto illegal_request; 2121 } 2122 break; 2123 case WRITE_SAME_10: 2124 case WRITE_SAME_16: 2125 trace_scsi_disk_emulate_command_WRITE_SAME( 2126 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); 2127 break; 2128 default: 2129 trace_scsi_disk_emulate_command_UNKNOWN(buf[0], 2130 scsi_command_name(buf[0])); 2131 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2132 return 0; 2133 } 2134 assert(!r->req.aiocb); 2135 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2136 if (r->iov.iov_len == 0) { 2137 scsi_req_complete(&r->req, GOOD); 2138 } 2139 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2140 assert(r->iov.iov_len == req->cmd.xfer); 2141 return -r->iov.iov_len; 2142 } else { 2143 return r->iov.iov_len; 2144 } 2145 2146 illegal_request: 2147 if (r->req.status == -1) { 2148 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2149 } 2150 return 0; 2151 2152 illegal_lba: 2153 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2154 return 0; 2155 } 2156 2157 /* Execute a scsi command. Returns the length of the data expected by the 2158 command. This will be Positive for data transfers from the device 2159 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2160 and zero if the command does not transfer any data. */ 2161 2162 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2163 { 2164 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2165 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2166 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2167 uint32_t len; 2168 uint8_t command; 2169 2170 command = buf[0]; 2171 2172 if (!blk_is_available(s->qdev.conf.blk)) { 2173 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2174 return 0; 2175 } 2176 2177 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2178 switch (command) { 2179 case READ_6: 2180 case READ_10: 2181 case READ_12: 2182 case READ_16: 2183 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); 2184 /* Protection information is not supported. For SCSI versions 2 and 2185 * older (as determined by snooping the guest's INQUIRY commands), 2186 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2187 */ 2188 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2189 goto illegal_request; 2190 } 2191 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2192 goto illegal_lba; 2193 } 2194 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2195 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2196 break; 2197 case WRITE_6: 2198 case WRITE_10: 2199 case WRITE_12: 2200 case WRITE_16: 2201 case WRITE_VERIFY_10: 2202 case WRITE_VERIFY_12: 2203 case WRITE_VERIFY_16: 2204 if (!blk_is_writable(s->qdev.conf.blk)) { 2205 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2206 return 0; 2207 } 2208 trace_scsi_disk_dma_command_WRITE( 2209 (command & 0xe) == 0xe ? "And Verify " : "", 2210 r->req.cmd.lba, len); 2211 /* fall through */ 2212 case VERIFY_10: 2213 case VERIFY_12: 2214 case VERIFY_16: 2215 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2216 * As far as DMA is concerned, we can treat it the same as a write; 2217 * scsi_block_do_sgio will send VERIFY commands. 2218 */ 2219 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2220 goto illegal_request; 2221 } 2222 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2223 goto illegal_lba; 2224 } 2225 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2226 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2227 break; 2228 default: 2229 abort(); 2230 illegal_request: 2231 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2232 return 0; 2233 illegal_lba: 2234 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2235 return 0; 2236 } 2237 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2238 if (r->sector_count == 0) { 2239 scsi_req_complete(&r->req, GOOD); 2240 } 2241 assert(r->iov.iov_len == 0); 2242 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2243 return -r->sector_count * BDRV_SECTOR_SIZE; 2244 } else { 2245 return r->sector_count * BDRV_SECTOR_SIZE; 2246 } 2247 } 2248 2249 static void scsi_disk_reset(DeviceState *dev) 2250 { 2251 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2252 uint64_t nb_sectors; 2253 2254 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2255 2256 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2257 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2258 if (nb_sectors) { 2259 nb_sectors--; 2260 } 2261 s->qdev.max_lba = nb_sectors; 2262 /* reset tray statuses */ 2263 s->tray_locked = 0; 2264 s->tray_open = 0; 2265 2266 s->qdev.scsi_version = s->qdev.default_scsi_version; 2267 } 2268 2269 static void scsi_disk_resize_cb(void *opaque) 2270 { 2271 SCSIDiskState *s = opaque; 2272 2273 /* SPC lists this sense code as available only for 2274 * direct-access devices. 2275 */ 2276 if (s->qdev.type == TYPE_DISK) { 2277 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2278 } 2279 } 2280 2281 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2282 { 2283 SCSIDiskState *s = opaque; 2284 2285 /* 2286 * When a CD gets changed, we have to report an ejected state and 2287 * then a loaded state to guests so that they detect tray 2288 * open/close and media change events. Guests that do not use 2289 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2290 * states rely on this behavior. 2291 * 2292 * media_changed governs the state machine used for unit attention 2293 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2294 */ 2295 s->media_changed = load; 2296 s->tray_open = !load; 2297 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2298 s->media_event = true; 2299 s->eject_request = false; 2300 } 2301 2302 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2303 { 2304 SCSIDiskState *s = opaque; 2305 2306 s->eject_request = true; 2307 if (force) { 2308 s->tray_locked = false; 2309 } 2310 } 2311 2312 static bool scsi_cd_is_tray_open(void *opaque) 2313 { 2314 return ((SCSIDiskState *)opaque)->tray_open; 2315 } 2316 2317 static bool scsi_cd_is_medium_locked(void *opaque) 2318 { 2319 return ((SCSIDiskState *)opaque)->tray_locked; 2320 } 2321 2322 static const BlockDevOps scsi_disk_removable_block_ops = { 2323 .change_media_cb = scsi_cd_change_media_cb, 2324 .eject_request_cb = scsi_cd_eject_request_cb, 2325 .is_tray_open = scsi_cd_is_tray_open, 2326 .is_medium_locked = scsi_cd_is_medium_locked, 2327 2328 .resize_cb = scsi_disk_resize_cb, 2329 }; 2330 2331 static const BlockDevOps scsi_disk_block_ops = { 2332 .resize_cb = scsi_disk_resize_cb, 2333 }; 2334 2335 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2336 { 2337 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2338 if (s->media_changed) { 2339 s->media_changed = false; 2340 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2341 } 2342 } 2343 2344 static void scsi_realize(SCSIDevice *dev, Error **errp) 2345 { 2346 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2347 bool read_only; 2348 2349 if (!s->qdev.conf.blk) { 2350 error_setg(errp, "drive property not set"); 2351 return; 2352 } 2353 2354 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2355 !blk_is_inserted(s->qdev.conf.blk)) { 2356 error_setg(errp, "Device needs media, but drive is empty"); 2357 return; 2358 } 2359 2360 if (!blkconf_blocksizes(&s->qdev.conf, errp)) { 2361 return; 2362 } 2363 2364 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && 2365 !s->qdev.hba_supports_iothread) 2366 { 2367 error_setg(errp, "HBA does not support iothreads"); 2368 return; 2369 } 2370 2371 if (dev->type == TYPE_DISK) { 2372 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2373 return; 2374 } 2375 } 2376 2377 read_only = !blk_supports_write_perm(s->qdev.conf.blk); 2378 if (dev->type == TYPE_ROM) { 2379 read_only = true; 2380 } 2381 2382 if (!blkconf_apply_backend_options(&dev->conf, read_only, 2383 dev->type == TYPE_DISK, errp)) { 2384 return; 2385 } 2386 2387 if (s->qdev.conf.discard_granularity == -1) { 2388 s->qdev.conf.discard_granularity = 2389 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2390 } 2391 2392 if (!s->version) { 2393 s->version = g_strdup(qemu_hw_version()); 2394 } 2395 if (!s->vendor) { 2396 s->vendor = g_strdup("QEMU"); 2397 } 2398 if (!s->device_id) { 2399 if (s->serial) { 2400 s->device_id = g_strdup_printf("%.20s", s->serial); 2401 } else { 2402 const char *str = blk_name(s->qdev.conf.blk); 2403 if (str && *str) { 2404 s->device_id = g_strdup(str); 2405 } 2406 } 2407 } 2408 2409 if (blk_is_sg(s->qdev.conf.blk)) { 2410 error_setg(errp, "unwanted /dev/sg*"); 2411 return; 2412 } 2413 2414 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2415 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2416 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2417 } else { 2418 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2419 } 2420 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2421 2422 blk_iostatus_enable(s->qdev.conf.blk); 2423 2424 add_boot_device_lchs(&dev->qdev, NULL, 2425 dev->conf.lcyls, 2426 dev->conf.lheads, 2427 dev->conf.lsecs); 2428 } 2429 2430 static void scsi_unrealize(SCSIDevice *dev) 2431 { 2432 del_boot_device_lchs(&dev->qdev, NULL); 2433 } 2434 2435 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2436 { 2437 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2438 AioContext *ctx = NULL; 2439 /* can happen for devices without drive. The error message for missing 2440 * backend will be issued in scsi_realize 2441 */ 2442 if (s->qdev.conf.blk) { 2443 ctx = blk_get_aio_context(s->qdev.conf.blk); 2444 aio_context_acquire(ctx); 2445 if (!blkconf_blocksizes(&s->qdev.conf, errp)) { 2446 goto out; 2447 } 2448 } 2449 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2450 s->qdev.type = TYPE_DISK; 2451 if (!s->product) { 2452 s->product = g_strdup("QEMU HARDDISK"); 2453 } 2454 scsi_realize(&s->qdev, errp); 2455 out: 2456 if (ctx) { 2457 aio_context_release(ctx); 2458 } 2459 } 2460 2461 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2462 { 2463 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2464 AioContext *ctx; 2465 int ret; 2466 2467 if (!dev->conf.blk) { 2468 /* Anonymous BlockBackend for an empty drive. As we put it into 2469 * dev->conf, qdev takes care of detaching on unplug. */ 2470 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 2471 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2472 assert(ret == 0); 2473 } 2474 2475 ctx = blk_get_aio_context(dev->conf.blk); 2476 aio_context_acquire(ctx); 2477 s->qdev.blocksize = 2048; 2478 s->qdev.type = TYPE_ROM; 2479 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2480 if (!s->product) { 2481 s->product = g_strdup("QEMU CD-ROM"); 2482 } 2483 scsi_realize(&s->qdev, errp); 2484 aio_context_release(ctx); 2485 } 2486 2487 2488 static const SCSIReqOps scsi_disk_emulate_reqops = { 2489 .size = sizeof(SCSIDiskReq), 2490 .free_req = scsi_free_request, 2491 .send_command = scsi_disk_emulate_command, 2492 .read_data = scsi_disk_emulate_read_data, 2493 .write_data = scsi_disk_emulate_write_data, 2494 .get_buf = scsi_get_buf, 2495 }; 2496 2497 static const SCSIReqOps scsi_disk_dma_reqops = { 2498 .size = sizeof(SCSIDiskReq), 2499 .free_req = scsi_free_request, 2500 .send_command = scsi_disk_dma_command, 2501 .read_data = scsi_read_data, 2502 .write_data = scsi_write_data, 2503 .get_buf = scsi_get_buf, 2504 .load_request = scsi_disk_load_request, 2505 .save_request = scsi_disk_save_request, 2506 }; 2507 2508 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2509 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2510 [INQUIRY] = &scsi_disk_emulate_reqops, 2511 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2512 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2513 [START_STOP] = &scsi_disk_emulate_reqops, 2514 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2515 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2516 [READ_TOC] = &scsi_disk_emulate_reqops, 2517 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2518 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2519 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2520 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2521 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2522 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2523 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2524 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2525 [SEEK_10] = &scsi_disk_emulate_reqops, 2526 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2527 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2528 [UNMAP] = &scsi_disk_emulate_reqops, 2529 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2530 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2531 [VERIFY_10] = &scsi_disk_emulate_reqops, 2532 [VERIFY_12] = &scsi_disk_emulate_reqops, 2533 [VERIFY_16] = &scsi_disk_emulate_reqops, 2534 2535 [READ_6] = &scsi_disk_dma_reqops, 2536 [READ_10] = &scsi_disk_dma_reqops, 2537 [READ_12] = &scsi_disk_dma_reqops, 2538 [READ_16] = &scsi_disk_dma_reqops, 2539 [WRITE_6] = &scsi_disk_dma_reqops, 2540 [WRITE_10] = &scsi_disk_dma_reqops, 2541 [WRITE_12] = &scsi_disk_dma_reqops, 2542 [WRITE_16] = &scsi_disk_dma_reqops, 2543 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2544 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2545 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2546 }; 2547 2548 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) 2549 { 2550 int i; 2551 int len = scsi_cdb_length(buf); 2552 char *line_buffer, *p; 2553 2554 assert(len > 0 && len <= 16); 2555 line_buffer = g_malloc(len * 5 + 1); 2556 2557 for (i = 0, p = line_buffer; i < len; i++) { 2558 p += sprintf(p, " 0x%02x", buf[i]); 2559 } 2560 trace_scsi_disk_new_request(lun, tag, line_buffer); 2561 2562 g_free(line_buffer); 2563 } 2564 2565 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2566 uint8_t *buf, void *hba_private) 2567 { 2568 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2569 SCSIRequest *req; 2570 const SCSIReqOps *ops; 2571 uint8_t command; 2572 2573 command = buf[0]; 2574 ops = scsi_disk_reqops_dispatch[command]; 2575 if (!ops) { 2576 ops = &scsi_disk_emulate_reqops; 2577 } 2578 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2579 2580 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { 2581 scsi_disk_new_request_dump(lun, tag, buf); 2582 } 2583 2584 return req; 2585 } 2586 2587 #ifdef __linux__ 2588 static int get_device_type(SCSIDiskState *s) 2589 { 2590 uint8_t cmd[16]; 2591 uint8_t buf[36]; 2592 int ret; 2593 2594 memset(cmd, 0, sizeof(cmd)); 2595 memset(buf, 0, sizeof(buf)); 2596 cmd[0] = INQUIRY; 2597 cmd[4] = sizeof(buf); 2598 2599 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2600 buf, sizeof(buf), s->qdev.io_timeout); 2601 if (ret < 0) { 2602 return -1; 2603 } 2604 s->qdev.type = buf[0]; 2605 if (buf[1] & 0x80) { 2606 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2607 } 2608 return 0; 2609 } 2610 2611 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2612 { 2613 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2614 AioContext *ctx; 2615 int sg_version; 2616 int rc; 2617 2618 if (!s->qdev.conf.blk) { 2619 error_setg(errp, "drive property not set"); 2620 return; 2621 } 2622 2623 if (s->rotation_rate) { 2624 error_report_once("rotation_rate is specified for scsi-block but is " 2625 "not implemented. This option is deprecated and will " 2626 "be removed in a future version"); 2627 } 2628 2629 ctx = blk_get_aio_context(s->qdev.conf.blk); 2630 aio_context_acquire(ctx); 2631 2632 /* check we are using a driver managing SG_IO (version 3 and after) */ 2633 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2634 if (rc < 0) { 2635 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2636 if (rc != -EPERM) { 2637 error_append_hint(errp, "Is this a SCSI device?\n"); 2638 } 2639 goto out; 2640 } 2641 if (sg_version < 30000) { 2642 error_setg(errp, "scsi generic interface too old"); 2643 goto out; 2644 } 2645 2646 /* get device type from INQUIRY data */ 2647 rc = get_device_type(s); 2648 if (rc < 0) { 2649 error_setg(errp, "INQUIRY failed"); 2650 goto out; 2651 } 2652 2653 /* Make a guess for the block size, we'll fix it when the guest sends. 2654 * READ CAPACITY. If they don't, they likely would assume these sizes 2655 * anyway. (TODO: check in /sys). 2656 */ 2657 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2658 s->qdev.blocksize = 2048; 2659 } else { 2660 s->qdev.blocksize = 512; 2661 } 2662 2663 /* Makes the scsi-block device not removable by using HMP and QMP eject 2664 * command. 2665 */ 2666 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2667 2668 scsi_realize(&s->qdev, errp); 2669 scsi_generic_read_device_inquiry(&s->qdev); 2670 2671 out: 2672 aio_context_release(ctx); 2673 } 2674 2675 typedef struct SCSIBlockReq { 2676 SCSIDiskReq req; 2677 sg_io_hdr_t io_header; 2678 2679 /* Selected bytes of the original CDB, copied into our own CDB. */ 2680 uint8_t cmd, cdb1, group_number; 2681 2682 /* CDB passed to SG_IO. */ 2683 uint8_t cdb[16]; 2684 BlockCompletionFunc *cb; 2685 void *cb_opaque; 2686 } SCSIBlockReq; 2687 2688 static void scsi_block_sgio_complete(void *opaque, int ret) 2689 { 2690 SCSIBlockReq *req = (SCSIBlockReq *)opaque; 2691 SCSIDiskReq *r = &req->req; 2692 SCSIDevice *s = r->req.dev; 2693 sg_io_hdr_t *io_hdr = &req->io_header; 2694 2695 if (ret == 0) { 2696 if (io_hdr->host_status != SCSI_HOST_OK) { 2697 scsi_req_complete_failed(&r->req, io_hdr->host_status); 2698 scsi_req_unref(&r->req); 2699 return; 2700 } 2701 2702 if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) { 2703 ret = BUSY; 2704 } else { 2705 ret = io_hdr->status; 2706 } 2707 2708 if (ret > 0) { 2709 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 2710 if (scsi_handle_rw_error(r, ret, true)) { 2711 aio_context_release(blk_get_aio_context(s->conf.blk)); 2712 scsi_req_unref(&r->req); 2713 return; 2714 } 2715 aio_context_release(blk_get_aio_context(s->conf.blk)); 2716 2717 /* Ignore error. */ 2718 ret = 0; 2719 } 2720 } 2721 2722 req->cb(req->cb_opaque, ret); 2723 } 2724 2725 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2726 int64_t offset, QEMUIOVector *iov, 2727 int direction, 2728 BlockCompletionFunc *cb, void *opaque) 2729 { 2730 sg_io_hdr_t *io_header = &req->io_header; 2731 SCSIDiskReq *r = &req->req; 2732 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2733 int nb_logical_blocks; 2734 uint64_t lba; 2735 BlockAIOCB *aiocb; 2736 2737 /* This is not supported yet. It can only happen if the guest does 2738 * reads and writes that are not aligned to one logical sectors 2739 * _and_ cover multiple MemoryRegions. 2740 */ 2741 assert(offset % s->qdev.blocksize == 0); 2742 assert(iov->size % s->qdev.blocksize == 0); 2743 2744 io_header->interface_id = 'S'; 2745 2746 /* The data transfer comes from the QEMUIOVector. */ 2747 io_header->dxfer_direction = direction; 2748 io_header->dxfer_len = iov->size; 2749 io_header->dxferp = (void *)iov->iov; 2750 io_header->iovec_count = iov->niov; 2751 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2752 2753 /* Build a new CDB with the LBA and length patched in, in case 2754 * DMA helpers split the transfer in multiple segments. Do not 2755 * build a CDB smaller than what the guest wanted, and only build 2756 * a larger one if strictly necessary. 2757 */ 2758 io_header->cmdp = req->cdb; 2759 lba = offset / s->qdev.blocksize; 2760 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2761 2762 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2763 /* 6-byte CDB */ 2764 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2765 req->cdb[4] = nb_logical_blocks; 2766 req->cdb[5] = 0; 2767 io_header->cmd_len = 6; 2768 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2769 /* 10-byte CDB */ 2770 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2771 req->cdb[1] = req->cdb1; 2772 stl_be_p(&req->cdb[2], lba); 2773 req->cdb[6] = req->group_number; 2774 stw_be_p(&req->cdb[7], nb_logical_blocks); 2775 req->cdb[9] = 0; 2776 io_header->cmd_len = 10; 2777 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2778 /* 12-byte CDB */ 2779 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2780 req->cdb[1] = req->cdb1; 2781 stl_be_p(&req->cdb[2], lba); 2782 stl_be_p(&req->cdb[6], nb_logical_blocks); 2783 req->cdb[10] = req->group_number; 2784 req->cdb[11] = 0; 2785 io_header->cmd_len = 12; 2786 } else { 2787 /* 16-byte CDB */ 2788 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2789 req->cdb[1] = req->cdb1; 2790 stq_be_p(&req->cdb[2], lba); 2791 stl_be_p(&req->cdb[10], nb_logical_blocks); 2792 req->cdb[14] = req->group_number; 2793 req->cdb[15] = 0; 2794 io_header->cmd_len = 16; 2795 } 2796 2797 /* The rest is as in scsi-generic.c. */ 2798 io_header->mx_sb_len = sizeof(r->req.sense); 2799 io_header->sbp = r->req.sense; 2800 io_header->timeout = s->qdev.io_timeout * 1000; 2801 io_header->usr_ptr = r; 2802 io_header->flags |= SG_FLAG_DIRECT_IO; 2803 req->cb = cb; 2804 req->cb_opaque = opaque; 2805 trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba, 2806 nb_logical_blocks, io_header->timeout); 2807 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, scsi_block_sgio_complete, req); 2808 assert(aiocb != NULL); 2809 return aiocb; 2810 } 2811 2812 static bool scsi_block_no_fua(SCSICommand *cmd) 2813 { 2814 return false; 2815 } 2816 2817 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2818 QEMUIOVector *iov, 2819 BlockCompletionFunc *cb, void *cb_opaque, 2820 void *opaque) 2821 { 2822 SCSIBlockReq *r = opaque; 2823 return scsi_block_do_sgio(r, offset, iov, 2824 SG_DXFER_FROM_DEV, cb, cb_opaque); 2825 } 2826 2827 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2828 QEMUIOVector *iov, 2829 BlockCompletionFunc *cb, void *cb_opaque, 2830 void *opaque) 2831 { 2832 SCSIBlockReq *r = opaque; 2833 return scsi_block_do_sgio(r, offset, iov, 2834 SG_DXFER_TO_DEV, cb, cb_opaque); 2835 } 2836 2837 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2838 { 2839 switch (buf[0]) { 2840 case VERIFY_10: 2841 case VERIFY_12: 2842 case VERIFY_16: 2843 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2844 * for the number of logical blocks specified in the length 2845 * field). For other modes, do not use scatter/gather operation. 2846 */ 2847 if ((buf[1] & 6) == 2) { 2848 return false; 2849 } 2850 break; 2851 2852 case READ_6: 2853 case READ_10: 2854 case READ_12: 2855 case READ_16: 2856 case WRITE_6: 2857 case WRITE_10: 2858 case WRITE_12: 2859 case WRITE_16: 2860 case WRITE_VERIFY_10: 2861 case WRITE_VERIFY_12: 2862 case WRITE_VERIFY_16: 2863 /* MMC writing cannot be done via DMA helpers, because it sometimes 2864 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2865 * We might use scsi_block_dma_reqops as long as no writing commands are 2866 * seen, but performance usually isn't paramount on optical media. So, 2867 * just make scsi-block operate the same as scsi-generic for them. 2868 */ 2869 if (s->qdev.type != TYPE_ROM) { 2870 return false; 2871 } 2872 break; 2873 2874 default: 2875 break; 2876 } 2877 2878 return true; 2879 } 2880 2881 2882 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2883 { 2884 SCSIBlockReq *r = (SCSIBlockReq *)req; 2885 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2886 2887 r->cmd = req->cmd.buf[0]; 2888 switch (r->cmd >> 5) { 2889 case 0: 2890 /* 6-byte CDB. */ 2891 r->cdb1 = r->group_number = 0; 2892 break; 2893 case 1: 2894 /* 10-byte CDB. */ 2895 r->cdb1 = req->cmd.buf[1]; 2896 r->group_number = req->cmd.buf[6]; 2897 break; 2898 case 4: 2899 /* 12-byte CDB. */ 2900 r->cdb1 = req->cmd.buf[1]; 2901 r->group_number = req->cmd.buf[10]; 2902 break; 2903 case 5: 2904 /* 16-byte CDB. */ 2905 r->cdb1 = req->cmd.buf[1]; 2906 r->group_number = req->cmd.buf[14]; 2907 break; 2908 default: 2909 abort(); 2910 } 2911 2912 /* Protection information is not supported. For SCSI versions 2 and 2913 * older (as determined by snooping the guest's INQUIRY commands), 2914 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2915 */ 2916 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2917 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2918 return 0; 2919 } 2920 2921 return scsi_disk_dma_command(req, buf); 2922 } 2923 2924 static const SCSIReqOps scsi_block_dma_reqops = { 2925 .size = sizeof(SCSIBlockReq), 2926 .free_req = scsi_free_request, 2927 .send_command = scsi_block_dma_command, 2928 .read_data = scsi_read_data, 2929 .write_data = scsi_write_data, 2930 .get_buf = scsi_get_buf, 2931 .load_request = scsi_disk_load_request, 2932 .save_request = scsi_disk_save_request, 2933 }; 2934 2935 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2936 uint32_t lun, uint8_t *buf, 2937 void *hba_private) 2938 { 2939 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2940 2941 if (scsi_block_is_passthrough(s, buf)) { 2942 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2943 hba_private); 2944 } else { 2945 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2946 hba_private); 2947 } 2948 } 2949 2950 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2951 uint8_t *buf, void *hba_private) 2952 { 2953 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2954 2955 if (scsi_block_is_passthrough(s, buf)) { 2956 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2957 } else { 2958 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2959 } 2960 } 2961 2962 static void scsi_block_update_sense(SCSIRequest *req) 2963 { 2964 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2965 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); 2966 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); 2967 } 2968 #endif 2969 2970 static 2971 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2972 BlockCompletionFunc *cb, void *cb_opaque, 2973 void *opaque) 2974 { 2975 SCSIDiskReq *r = opaque; 2976 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2977 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2978 } 2979 2980 static 2981 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2982 BlockCompletionFunc *cb, void *cb_opaque, 2983 void *opaque) 2984 { 2985 SCSIDiskReq *r = opaque; 2986 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2987 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2988 } 2989 2990 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2991 { 2992 DeviceClass *dc = DEVICE_CLASS(klass); 2993 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2994 2995 dc->fw_name = "disk"; 2996 dc->reset = scsi_disk_reset; 2997 sdc->dma_readv = scsi_dma_readv; 2998 sdc->dma_writev = scsi_dma_writev; 2999 sdc->need_fua_emulation = scsi_is_cmd_fua; 3000 } 3001 3002 static const TypeInfo scsi_disk_base_info = { 3003 .name = TYPE_SCSI_DISK_BASE, 3004 .parent = TYPE_SCSI_DEVICE, 3005 .class_init = scsi_disk_base_class_initfn, 3006 .instance_size = sizeof(SCSIDiskState), 3007 .class_size = sizeof(SCSIDiskClass), 3008 .abstract = true, 3009 }; 3010 3011 #define DEFINE_SCSI_DISK_PROPERTIES() \ 3012 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ 3013 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ 3014 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3015 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 3016 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 3017 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 3018 DEFINE_PROP_STRING("product", SCSIDiskState, product), \ 3019 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) 3020 3021 3022 static Property scsi_hd_properties[] = { 3023 DEFINE_SCSI_DISK_PROPERTIES(), 3024 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3025 SCSI_DISK_F_REMOVABLE, false), 3026 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3027 SCSI_DISK_F_DPOFUA, false), 3028 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3029 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3030 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3031 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3032 DEFAULT_MAX_UNMAP_SIZE), 3033 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3034 DEFAULT_MAX_IO_SIZE), 3035 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3036 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3037 5), 3038 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 3039 DEFINE_PROP_END_OF_LIST(), 3040 }; 3041 3042 static const VMStateDescription vmstate_scsi_disk_state = { 3043 .name = "scsi-disk", 3044 .version_id = 1, 3045 .minimum_version_id = 1, 3046 .fields = (VMStateField[]) { 3047 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 3048 VMSTATE_BOOL(media_changed, SCSIDiskState), 3049 VMSTATE_BOOL(media_event, SCSIDiskState), 3050 VMSTATE_BOOL(eject_request, SCSIDiskState), 3051 VMSTATE_BOOL(tray_open, SCSIDiskState), 3052 VMSTATE_BOOL(tray_locked, SCSIDiskState), 3053 VMSTATE_END_OF_LIST() 3054 } 3055 }; 3056 3057 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 3058 { 3059 DeviceClass *dc = DEVICE_CLASS(klass); 3060 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3061 3062 sc->realize = scsi_hd_realize; 3063 sc->unrealize = scsi_unrealize; 3064 sc->alloc_req = scsi_new_request; 3065 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3066 dc->desc = "virtual SCSI disk"; 3067 device_class_set_props(dc, scsi_hd_properties); 3068 dc->vmsd = &vmstate_scsi_disk_state; 3069 } 3070 3071 static const TypeInfo scsi_hd_info = { 3072 .name = "scsi-hd", 3073 .parent = TYPE_SCSI_DISK_BASE, 3074 .class_init = scsi_hd_class_initfn, 3075 }; 3076 3077 static Property scsi_cd_properties[] = { 3078 DEFINE_SCSI_DISK_PROPERTIES(), 3079 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3080 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3081 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3082 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3083 DEFAULT_MAX_IO_SIZE), 3084 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3085 5), 3086 DEFINE_PROP_END_OF_LIST(), 3087 }; 3088 3089 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3090 { 3091 DeviceClass *dc = DEVICE_CLASS(klass); 3092 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3093 3094 sc->realize = scsi_cd_realize; 3095 sc->alloc_req = scsi_new_request; 3096 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3097 dc->desc = "virtual SCSI CD-ROM"; 3098 device_class_set_props(dc, scsi_cd_properties); 3099 dc->vmsd = &vmstate_scsi_disk_state; 3100 } 3101 3102 static const TypeInfo scsi_cd_info = { 3103 .name = "scsi-cd", 3104 .parent = TYPE_SCSI_DISK_BASE, 3105 .class_init = scsi_cd_class_initfn, 3106 }; 3107 3108 #ifdef __linux__ 3109 static Property scsi_block_properties[] = { 3110 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), 3111 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3112 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3113 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3114 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3115 DEFAULT_MAX_UNMAP_SIZE), 3116 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3117 DEFAULT_MAX_IO_SIZE), 3118 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3119 -1), 3120 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout, 3121 DEFAULT_IO_TIMEOUT), 3122 DEFINE_PROP_END_OF_LIST(), 3123 }; 3124 3125 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3126 { 3127 DeviceClass *dc = DEVICE_CLASS(klass); 3128 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3129 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3130 3131 sc->realize = scsi_block_realize; 3132 sc->alloc_req = scsi_block_new_request; 3133 sc->parse_cdb = scsi_block_parse_cdb; 3134 sdc->dma_readv = scsi_block_dma_readv; 3135 sdc->dma_writev = scsi_block_dma_writev; 3136 sdc->update_sense = scsi_block_update_sense; 3137 sdc->need_fua_emulation = scsi_block_no_fua; 3138 dc->desc = "SCSI block device passthrough"; 3139 device_class_set_props(dc, scsi_block_properties); 3140 dc->vmsd = &vmstate_scsi_disk_state; 3141 } 3142 3143 static const TypeInfo scsi_block_info = { 3144 .name = "scsi-block", 3145 .parent = TYPE_SCSI_DISK_BASE, 3146 .class_init = scsi_block_class_initfn, 3147 }; 3148 #endif 3149 3150 static void scsi_disk_register_types(void) 3151 { 3152 type_register_static(&scsi_disk_base_info); 3153 type_register_static(&scsi_hd_info); 3154 type_register_static(&scsi_cd_info); 3155 #ifdef __linux__ 3156 type_register_static(&scsi_block_info); 3157 #endif 3158 } 3159 3160 type_init(scsi_disk_register_types) 3161