1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/units.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "qemu/main-loop.h" 27 #include "qemu/module.h" 28 #include "hw/scsi/scsi.h" 29 #include "migration/qemu-file-types.h" 30 #include "migration/vmstate.h" 31 #include "hw/scsi/emulation.h" 32 #include "scsi/constants.h" 33 #include "sysemu/block-backend.h" 34 #include "sysemu/blockdev.h" 35 #include "hw/block/block.h" 36 #include "hw/qdev-properties.h" 37 #include "hw/qdev-properties-system.h" 38 #include "sysemu/dma.h" 39 #include "sysemu/sysemu.h" 40 #include "qemu/cutils.h" 41 #include "trace.h" 42 #include "qom/object.h" 43 44 #ifdef __linux 45 #include <scsi/sg.h> 46 #endif 47 48 #define SCSI_WRITE_SAME_MAX (512 * KiB) 49 #define SCSI_DMA_BUF_SIZE (128 * KiB) 50 #define SCSI_MAX_INQUIRY_LEN 256 51 #define SCSI_MAX_MODE_LEN 256 52 53 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 54 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 55 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 56 57 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 58 59 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE) 60 61 struct SCSIDiskClass { 62 SCSIDeviceClass parent_class; 63 DMAIOFunc *dma_readv; 64 DMAIOFunc *dma_writev; 65 bool (*need_fua_emulation)(SCSICommand *cmd); 66 void (*update_sense)(SCSIRequest *r); 67 }; 68 69 typedef struct SCSIDiskReq { 70 SCSIRequest req; 71 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */ 72 uint64_t sector; 73 uint32_t sector_count; 74 uint32_t buflen; 75 bool started; 76 bool need_fua_emulation; 77 struct iovec iov; 78 QEMUIOVector qiov; 79 BlockAcctCookie acct; 80 } SCSIDiskReq; 81 82 #define SCSI_DISK_F_REMOVABLE 0 83 #define SCSI_DISK_F_DPOFUA 1 84 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 85 86 struct SCSIDiskState { 87 SCSIDevice qdev; 88 uint32_t features; 89 bool media_changed; 90 bool media_event; 91 bool eject_request; 92 uint16_t port_index; 93 uint64_t max_unmap_size; 94 uint64_t max_io_size; 95 QEMUBH *bh; 96 char *version; 97 char *serial; 98 char *vendor; 99 char *product; 100 char *device_id; 101 bool tray_open; 102 bool tray_locked; 103 /* 104 * 0x0000 - rotation rate not reported 105 * 0x0001 - non-rotating medium (SSD) 106 * 0x0002-0x0400 - reserved 107 * 0x0401-0xffe - rotations per minute 108 * 0xffff - reserved 109 */ 110 uint16_t rotation_rate; 111 }; 112 113 static void scsi_free_request(SCSIRequest *req) 114 { 115 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 116 117 qemu_vfree(r->iov.iov_base); 118 } 119 120 /* Helper function for command completion with sense. */ 121 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 122 { 123 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, 124 sense.ascq); 125 scsi_req_build_sense(&r->req, sense); 126 scsi_req_complete(&r->req, CHECK_CONDITION); 127 } 128 129 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 130 { 131 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 132 133 if (!r->iov.iov_base) { 134 r->buflen = size; 135 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 136 } 137 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen); 138 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 139 } 140 141 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 142 { 143 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 144 145 qemu_put_be64s(f, &r->sector); 146 qemu_put_be32s(f, &r->sector_count); 147 qemu_put_be32s(f, &r->buflen); 148 if (r->buflen) { 149 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 150 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 151 } else if (!req->retry) { 152 uint32_t len = r->iov.iov_len; 153 qemu_put_be32s(f, &len); 154 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 155 } 156 } 157 } 158 159 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 160 { 161 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 162 163 qemu_get_be64s(f, &r->sector); 164 qemu_get_be32s(f, &r->sector_count); 165 qemu_get_be32s(f, &r->buflen); 166 if (r->buflen) { 167 scsi_init_iovec(r, r->buflen); 168 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 169 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 170 } else if (!r->req.retry) { 171 uint32_t len; 172 qemu_get_be32s(f, &len); 173 r->iov.iov_len = len; 174 assert(r->iov.iov_len <= r->buflen); 175 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 176 } 177 } 178 179 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 180 } 181 182 /* 183 * scsi_handle_rw_error has two return values. False means that the error 184 * must be ignored, true means that the error has been processed and the 185 * caller should not do anything else for this request. Note that 186 * scsi_handle_rw_error always manages its reference counts, independent 187 * of the return value. 188 */ 189 static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed) 190 { 191 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 192 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 193 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 194 SCSISense sense = SENSE_CODE(NO_SENSE); 195 int error = 0; 196 bool req_has_sense = false; 197 BlockErrorAction action; 198 int status; 199 200 if (ret < 0) { 201 status = scsi_sense_from_errno(-ret, &sense); 202 error = -ret; 203 } else { 204 /* A passthrough command has completed with nonzero status. */ 205 status = ret; 206 if (status == CHECK_CONDITION) { 207 req_has_sense = true; 208 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 209 } else { 210 error = EINVAL; 211 } 212 } 213 214 /* 215 * Check whether the error has to be handled by the guest or should 216 * rather follow the rerror=/werror= settings. Guest-handled errors 217 * are usually retried immediately, so do not post them to QMP and 218 * do not account them as failed I/O. 219 */ 220 if (req_has_sense && 221 scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { 222 action = BLOCK_ERROR_ACTION_REPORT; 223 acct_failed = false; 224 } else { 225 action = blk_get_error_action(s->qdev.conf.blk, is_read, error); 226 blk_error_action(s->qdev.conf.blk, action, is_read, error); 227 } 228 229 switch (action) { 230 case BLOCK_ERROR_ACTION_REPORT: 231 if (acct_failed) { 232 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 233 } 234 if (req_has_sense) { 235 sdc->update_sense(&r->req); 236 } else if (status == CHECK_CONDITION) { 237 scsi_req_build_sense(&r->req, sense); 238 } 239 scsi_req_complete(&r->req, status); 240 return true; 241 242 case BLOCK_ERROR_ACTION_IGNORE: 243 return false; 244 245 case BLOCK_ERROR_ACTION_STOP: 246 scsi_req_retry(&r->req); 247 return true; 248 249 default: 250 g_assert_not_reached(); 251 } 252 } 253 254 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 255 { 256 if (r->req.io_canceled) { 257 scsi_req_cancel_complete(&r->req); 258 return true; 259 } 260 261 if (ret < 0) { 262 return scsi_handle_rw_error(r, ret, acct_failed); 263 } 264 265 return false; 266 } 267 268 static void scsi_aio_complete(void *opaque, int ret) 269 { 270 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 271 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 272 273 assert(r->req.aiocb != NULL); 274 r->req.aiocb = NULL; 275 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 276 if (scsi_disk_req_check_error(r, ret, true)) { 277 goto done; 278 } 279 280 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 281 scsi_req_complete(&r->req, GOOD); 282 283 done: 284 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 285 scsi_req_unref(&r->req); 286 } 287 288 static bool scsi_is_cmd_fua(SCSICommand *cmd) 289 { 290 switch (cmd->buf[0]) { 291 case READ_10: 292 case READ_12: 293 case READ_16: 294 case WRITE_10: 295 case WRITE_12: 296 case WRITE_16: 297 return (cmd->buf[1] & 8) != 0; 298 299 case VERIFY_10: 300 case VERIFY_12: 301 case VERIFY_16: 302 case WRITE_VERIFY_10: 303 case WRITE_VERIFY_12: 304 case WRITE_VERIFY_16: 305 return true; 306 307 case READ_6: 308 case WRITE_6: 309 default: 310 return false; 311 } 312 } 313 314 static void scsi_write_do_fua(SCSIDiskReq *r) 315 { 316 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 317 318 assert(r->req.aiocb == NULL); 319 assert(!r->req.io_canceled); 320 321 if (r->need_fua_emulation) { 322 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 323 BLOCK_ACCT_FLUSH); 324 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 325 return; 326 } 327 328 scsi_req_complete(&r->req, GOOD); 329 scsi_req_unref(&r->req); 330 } 331 332 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 333 { 334 assert(r->req.aiocb == NULL); 335 if (scsi_disk_req_check_error(r, ret, false)) { 336 goto done; 337 } 338 339 r->sector += r->sector_count; 340 r->sector_count = 0; 341 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 342 scsi_write_do_fua(r); 343 return; 344 } else { 345 scsi_req_complete(&r->req, GOOD); 346 } 347 348 done: 349 scsi_req_unref(&r->req); 350 } 351 352 static void scsi_dma_complete(void *opaque, int ret) 353 { 354 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 355 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 356 357 assert(r->req.aiocb != NULL); 358 r->req.aiocb = NULL; 359 360 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 361 if (ret < 0) { 362 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 363 } else { 364 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 365 } 366 scsi_dma_complete_noio(r, ret); 367 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 368 } 369 370 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) 371 { 372 uint32_t n; 373 374 assert(r->req.aiocb == NULL); 375 if (scsi_disk_req_check_error(r, ret, false)) { 376 goto done; 377 } 378 379 n = r->qiov.size / BDRV_SECTOR_SIZE; 380 r->sector += n; 381 r->sector_count -= n; 382 scsi_req_data(&r->req, r->qiov.size); 383 384 done: 385 scsi_req_unref(&r->req); 386 } 387 388 static void scsi_read_complete(void *opaque, int ret) 389 { 390 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 391 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 392 393 assert(r->req.aiocb != NULL); 394 r->req.aiocb = NULL; 395 396 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 397 if (ret < 0) { 398 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 399 } else { 400 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 401 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); 402 } 403 scsi_read_complete_noio(r, ret); 404 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 405 } 406 407 /* Actually issue a read to the block device. */ 408 static void scsi_do_read(SCSIDiskReq *r, int ret) 409 { 410 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 411 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 412 413 assert (r->req.aiocb == NULL); 414 if (scsi_disk_req_check_error(r, ret, false)) { 415 goto done; 416 } 417 418 /* The request is used as the AIO opaque value, so add a ref. */ 419 scsi_req_ref(&r->req); 420 421 if (r->req.sg) { 422 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 423 r->req.resid -= r->req.sg->size; 424 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 425 r->req.sg, r->sector << BDRV_SECTOR_BITS, 426 BDRV_SECTOR_SIZE, 427 sdc->dma_readv, r, scsi_dma_complete, r, 428 DMA_DIRECTION_FROM_DEVICE); 429 } else { 430 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 431 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 432 r->qiov.size, BLOCK_ACCT_READ); 433 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 434 scsi_read_complete, r, r); 435 } 436 437 done: 438 scsi_req_unref(&r->req); 439 } 440 441 static void scsi_do_read_cb(void *opaque, int ret) 442 { 443 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 444 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 445 446 assert (r->req.aiocb != NULL); 447 r->req.aiocb = NULL; 448 449 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 450 if (ret < 0) { 451 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 452 } else { 453 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 454 } 455 scsi_do_read(opaque, ret); 456 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 457 } 458 459 /* Read more data from scsi device into buffer. */ 460 static void scsi_read_data(SCSIRequest *req) 461 { 462 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 463 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 464 bool first; 465 466 trace_scsi_disk_read_data_count(r->sector_count); 467 if (r->sector_count == 0) { 468 /* This also clears the sense buffer for REQUEST SENSE. */ 469 scsi_req_complete(&r->req, GOOD); 470 return; 471 } 472 473 /* No data transfer may already be in progress */ 474 assert(r->req.aiocb == NULL); 475 476 /* The request is used as the AIO opaque value, so add a ref. */ 477 scsi_req_ref(&r->req); 478 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 479 trace_scsi_disk_read_data_invalid(); 480 scsi_read_complete_noio(r, -EINVAL); 481 return; 482 } 483 484 if (!blk_is_available(req->dev->conf.blk)) { 485 scsi_read_complete_noio(r, -ENOMEDIUM); 486 return; 487 } 488 489 first = !r->started; 490 r->started = true; 491 if (first && r->need_fua_emulation) { 492 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 493 BLOCK_ACCT_FLUSH); 494 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 495 } else { 496 scsi_do_read(r, 0); 497 } 498 } 499 500 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 501 { 502 uint32_t n; 503 504 assert (r->req.aiocb == NULL); 505 if (scsi_disk_req_check_error(r, ret, false)) { 506 goto done; 507 } 508 509 n = r->qiov.size / BDRV_SECTOR_SIZE; 510 r->sector += n; 511 r->sector_count -= n; 512 if (r->sector_count == 0) { 513 scsi_write_do_fua(r); 514 return; 515 } else { 516 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 517 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); 518 scsi_req_data(&r->req, r->qiov.size); 519 } 520 521 done: 522 scsi_req_unref(&r->req); 523 } 524 525 static void scsi_write_complete(void * opaque, int ret) 526 { 527 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 528 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 529 530 assert (r->req.aiocb != NULL); 531 r->req.aiocb = NULL; 532 533 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 534 if (ret < 0) { 535 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 536 } else { 537 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 538 } 539 scsi_write_complete_noio(r, ret); 540 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 541 } 542 543 static void scsi_write_data(SCSIRequest *req) 544 { 545 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 546 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 547 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 548 549 /* No data transfer may already be in progress */ 550 assert(r->req.aiocb == NULL); 551 552 /* The request is used as the AIO opaque value, so add a ref. */ 553 scsi_req_ref(&r->req); 554 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 555 trace_scsi_disk_write_data_invalid(); 556 scsi_write_complete_noio(r, -EINVAL); 557 return; 558 } 559 560 if (!r->req.sg && !r->qiov.size) { 561 /* Called for the first time. Ask the driver to send us more data. */ 562 r->started = true; 563 scsi_write_complete_noio(r, 0); 564 return; 565 } 566 if (!blk_is_available(req->dev->conf.blk)) { 567 scsi_write_complete_noio(r, -ENOMEDIUM); 568 return; 569 } 570 571 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 572 r->req.cmd.buf[0] == VERIFY_16) { 573 if (r->req.sg) { 574 scsi_dma_complete_noio(r, 0); 575 } else { 576 scsi_write_complete_noio(r, 0); 577 } 578 return; 579 } 580 581 if (r->req.sg) { 582 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 583 r->req.resid -= r->req.sg->size; 584 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 585 r->req.sg, r->sector << BDRV_SECTOR_BITS, 586 BDRV_SECTOR_SIZE, 587 sdc->dma_writev, r, scsi_dma_complete, r, 588 DMA_DIRECTION_TO_DEVICE); 589 } else { 590 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 591 r->qiov.size, BLOCK_ACCT_WRITE); 592 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 593 scsi_write_complete, r, r); 594 } 595 } 596 597 /* Return a pointer to the data buffer. */ 598 static uint8_t *scsi_get_buf(SCSIRequest *req) 599 { 600 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 601 602 return (uint8_t *)r->iov.iov_base; 603 } 604 605 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 606 { 607 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 608 uint8_t page_code = req->cmd.buf[2]; 609 int start, buflen = 0; 610 611 outbuf[buflen++] = s->qdev.type & 0x1f; 612 outbuf[buflen++] = page_code; 613 outbuf[buflen++] = 0x00; 614 outbuf[buflen++] = 0x00; 615 start = buflen; 616 617 switch (page_code) { 618 case 0x00: /* Supported page codes, mandatory */ 619 { 620 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); 621 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 622 if (s->serial) { 623 outbuf[buflen++] = 0x80; /* unit serial number */ 624 } 625 outbuf[buflen++] = 0x83; /* device identification */ 626 if (s->qdev.type == TYPE_DISK) { 627 outbuf[buflen++] = 0xb0; /* block limits */ 628 outbuf[buflen++] = 0xb1; /* block device characteristics */ 629 outbuf[buflen++] = 0xb2; /* thin provisioning */ 630 } 631 break; 632 } 633 case 0x80: /* Device serial number, optional */ 634 { 635 int l; 636 637 if (!s->serial) { 638 trace_scsi_disk_emulate_vpd_page_80_not_supported(); 639 return -1; 640 } 641 642 l = strlen(s->serial); 643 if (l > 36) { 644 l = 36; 645 } 646 647 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); 648 memcpy(outbuf + buflen, s->serial, l); 649 buflen += l; 650 break; 651 } 652 653 case 0x83: /* Device identification page, mandatory */ 654 { 655 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; 656 657 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); 658 659 if (id_len) { 660 outbuf[buflen++] = 0x2; /* ASCII */ 661 outbuf[buflen++] = 0; /* not officially assigned */ 662 outbuf[buflen++] = 0; /* reserved */ 663 outbuf[buflen++] = id_len; /* length of data following */ 664 memcpy(outbuf + buflen, s->device_id, id_len); 665 buflen += id_len; 666 } 667 668 if (s->qdev.wwn) { 669 outbuf[buflen++] = 0x1; /* Binary */ 670 outbuf[buflen++] = 0x3; /* NAA */ 671 outbuf[buflen++] = 0; /* reserved */ 672 outbuf[buflen++] = 8; 673 stq_be_p(&outbuf[buflen], s->qdev.wwn); 674 buflen += 8; 675 } 676 677 if (s->qdev.port_wwn) { 678 outbuf[buflen++] = 0x61; /* SAS / Binary */ 679 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 680 outbuf[buflen++] = 0; /* reserved */ 681 outbuf[buflen++] = 8; 682 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 683 buflen += 8; 684 } 685 686 if (s->port_index) { 687 outbuf[buflen++] = 0x61; /* SAS / Binary */ 688 689 /* PIV/Target port/relative target port */ 690 outbuf[buflen++] = 0x94; 691 692 outbuf[buflen++] = 0; /* reserved */ 693 outbuf[buflen++] = 4; 694 stw_be_p(&outbuf[buflen + 2], s->port_index); 695 buflen += 4; 696 } 697 break; 698 } 699 case 0xb0: /* block limits */ 700 { 701 SCSIBlockLimits bl = {}; 702 703 if (s->qdev.type == TYPE_ROM) { 704 trace_scsi_disk_emulate_vpd_page_b0_not_supported(); 705 return -1; 706 } 707 bl.wsnz = 1; 708 bl.unmap_sectors = 709 s->qdev.conf.discard_granularity / s->qdev.blocksize; 710 bl.min_io_size = 711 s->qdev.conf.min_io_size / s->qdev.blocksize; 712 bl.opt_io_size = 713 s->qdev.conf.opt_io_size / s->qdev.blocksize; 714 bl.max_unmap_sectors = 715 s->max_unmap_size / s->qdev.blocksize; 716 bl.max_io_sectors = 717 s->max_io_size / s->qdev.blocksize; 718 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 719 bl.max_unmap_descr = 255; 720 721 if (s->qdev.type == TYPE_DISK) { 722 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 723 int max_io_sectors_blk = 724 max_transfer_blk / s->qdev.blocksize; 725 726 bl.max_io_sectors = 727 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 728 } 729 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 730 break; 731 } 732 case 0xb1: /* block device characteristics */ 733 { 734 buflen = 0x40; 735 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 736 outbuf[5] = s->rotation_rate & 0xff; 737 outbuf[6] = 0; /* PRODUCT TYPE */ 738 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 739 outbuf[8] = 0; /* VBULS */ 740 break; 741 } 742 case 0xb2: /* thin provisioning */ 743 { 744 buflen = 8; 745 outbuf[4] = 0; 746 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 747 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 748 outbuf[7] = 0; 749 break; 750 } 751 default: 752 return -1; 753 } 754 /* done with EVPD */ 755 assert(buflen - start <= 255); 756 outbuf[start - 1] = buflen - start; 757 return buflen; 758 } 759 760 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 761 { 762 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 763 int buflen = 0; 764 765 if (req->cmd.buf[1] & 0x1) { 766 /* Vital product data */ 767 return scsi_disk_emulate_vpd_page(req, outbuf); 768 } 769 770 /* Standard INQUIRY data */ 771 if (req->cmd.buf[2] != 0) { 772 return -1; 773 } 774 775 /* PAGE CODE == 0 */ 776 buflen = req->cmd.xfer; 777 if (buflen > SCSI_MAX_INQUIRY_LEN) { 778 buflen = SCSI_MAX_INQUIRY_LEN; 779 } 780 781 outbuf[0] = s->qdev.type & 0x1f; 782 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 783 784 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 785 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 786 787 memset(&outbuf[32], 0, 4); 788 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 789 /* 790 * We claim conformance to SPC-3, which is required for guests 791 * to ask for modern features like READ CAPACITY(16) or the 792 * block characteristics VPD page by default. Not all of SPC-3 793 * is actually implemented, but we're good enough. 794 */ 795 outbuf[2] = s->qdev.default_scsi_version; 796 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 797 798 if (buflen > 36) { 799 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 800 } else { 801 /* If the allocation length of CDB is too small, 802 the additional length is not adjusted */ 803 outbuf[4] = 36 - 5; 804 } 805 806 /* Sync data transfer and TCQ. */ 807 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 808 return buflen; 809 } 810 811 static inline bool media_is_dvd(SCSIDiskState *s) 812 { 813 uint64_t nb_sectors; 814 if (s->qdev.type != TYPE_ROM) { 815 return false; 816 } 817 if (!blk_is_available(s->qdev.conf.blk)) { 818 return false; 819 } 820 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 821 return nb_sectors > CD_MAX_SECTORS; 822 } 823 824 static inline bool media_is_cd(SCSIDiskState *s) 825 { 826 uint64_t nb_sectors; 827 if (s->qdev.type != TYPE_ROM) { 828 return false; 829 } 830 if (!blk_is_available(s->qdev.conf.blk)) { 831 return false; 832 } 833 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 834 return nb_sectors <= CD_MAX_SECTORS; 835 } 836 837 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 838 uint8_t *outbuf) 839 { 840 uint8_t type = r->req.cmd.buf[1] & 7; 841 842 if (s->qdev.type != TYPE_ROM) { 843 return -1; 844 } 845 846 /* Types 1/2 are only defined for Blu-Ray. */ 847 if (type != 0) { 848 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 849 return -1; 850 } 851 852 memset(outbuf, 0, 34); 853 outbuf[1] = 32; 854 outbuf[2] = 0xe; /* last session complete, disc finalized */ 855 outbuf[3] = 1; /* first track on disc */ 856 outbuf[4] = 1; /* # of sessions */ 857 outbuf[5] = 1; /* first track of last session */ 858 outbuf[6] = 1; /* last track of last session */ 859 outbuf[7] = 0x20; /* unrestricted use */ 860 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 861 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 862 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 863 /* 24-31: disc bar code */ 864 /* 32: disc application code */ 865 /* 33: number of OPC tables */ 866 867 return 34; 868 } 869 870 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 871 uint8_t *outbuf) 872 { 873 static const int rds_caps_size[5] = { 874 [0] = 2048 + 4, 875 [1] = 4 + 4, 876 [3] = 188 + 4, 877 [4] = 2048 + 4, 878 }; 879 880 uint8_t media = r->req.cmd.buf[1]; 881 uint8_t layer = r->req.cmd.buf[6]; 882 uint8_t format = r->req.cmd.buf[7]; 883 int size = -1; 884 885 if (s->qdev.type != TYPE_ROM) { 886 return -1; 887 } 888 if (media != 0) { 889 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 890 return -1; 891 } 892 893 if (format != 0xff) { 894 if (!blk_is_available(s->qdev.conf.blk)) { 895 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 896 return -1; 897 } 898 if (media_is_cd(s)) { 899 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 900 return -1; 901 } 902 if (format >= ARRAY_SIZE(rds_caps_size)) { 903 return -1; 904 } 905 size = rds_caps_size[format]; 906 memset(outbuf, 0, size); 907 } 908 909 switch (format) { 910 case 0x00: { 911 /* Physical format information */ 912 uint64_t nb_sectors; 913 if (layer != 0) { 914 goto fail; 915 } 916 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 917 918 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 919 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 920 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 921 outbuf[7] = 0; /* default densities */ 922 923 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 924 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 925 break; 926 } 927 928 case 0x01: /* DVD copyright information, all zeros */ 929 break; 930 931 case 0x03: /* BCA information - invalid field for no BCA info */ 932 return -1; 933 934 case 0x04: /* DVD disc manufacturing information, all zeros */ 935 break; 936 937 case 0xff: { /* List capabilities */ 938 int i; 939 size = 4; 940 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 941 if (!rds_caps_size[i]) { 942 continue; 943 } 944 outbuf[size] = i; 945 outbuf[size + 1] = 0x40; /* Not writable, readable */ 946 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 947 size += 4; 948 } 949 break; 950 } 951 952 default: 953 return -1; 954 } 955 956 /* Size of buffer, not including 2 byte size field */ 957 stw_be_p(outbuf, size - 2); 958 return size; 959 960 fail: 961 return -1; 962 } 963 964 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 965 { 966 uint8_t event_code, media_status; 967 968 media_status = 0; 969 if (s->tray_open) { 970 media_status = MS_TRAY_OPEN; 971 } else if (blk_is_inserted(s->qdev.conf.blk)) { 972 media_status = MS_MEDIA_PRESENT; 973 } 974 975 /* Event notification descriptor */ 976 event_code = MEC_NO_CHANGE; 977 if (media_status != MS_TRAY_OPEN) { 978 if (s->media_event) { 979 event_code = MEC_NEW_MEDIA; 980 s->media_event = false; 981 } else if (s->eject_request) { 982 event_code = MEC_EJECT_REQUESTED; 983 s->eject_request = false; 984 } 985 } 986 987 outbuf[0] = event_code; 988 outbuf[1] = media_status; 989 990 /* These fields are reserved, just clear them. */ 991 outbuf[2] = 0; 992 outbuf[3] = 0; 993 return 4; 994 } 995 996 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 997 uint8_t *outbuf) 998 { 999 int size; 1000 uint8_t *buf = r->req.cmd.buf; 1001 uint8_t notification_class_request = buf[4]; 1002 if (s->qdev.type != TYPE_ROM) { 1003 return -1; 1004 } 1005 if ((buf[1] & 1) == 0) { 1006 /* asynchronous */ 1007 return -1; 1008 } 1009 1010 size = 4; 1011 outbuf[0] = outbuf[1] = 0; 1012 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1013 if (notification_class_request & (1 << GESN_MEDIA)) { 1014 outbuf[2] = GESN_MEDIA; 1015 size += scsi_event_status_media(s, &outbuf[size]); 1016 } else { 1017 outbuf[2] = 0x80; 1018 } 1019 stw_be_p(outbuf, size - 4); 1020 return size; 1021 } 1022 1023 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1024 { 1025 int current; 1026 1027 if (s->qdev.type != TYPE_ROM) { 1028 return -1; 1029 } 1030 1031 if (media_is_dvd(s)) { 1032 current = MMC_PROFILE_DVD_ROM; 1033 } else if (media_is_cd(s)) { 1034 current = MMC_PROFILE_CD_ROM; 1035 } else { 1036 current = MMC_PROFILE_NONE; 1037 } 1038 1039 memset(outbuf, 0, 40); 1040 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1041 stw_be_p(&outbuf[6], current); 1042 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1043 outbuf[10] = 0x03; /* persistent, current */ 1044 outbuf[11] = 8; /* two profiles */ 1045 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1046 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1047 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1048 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1049 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1050 stw_be_p(&outbuf[20], 1); 1051 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1052 outbuf[23] = 8; 1053 stl_be_p(&outbuf[24], 1); /* SCSI */ 1054 outbuf[28] = 1; /* DBE = 1, mandatory */ 1055 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1056 stw_be_p(&outbuf[32], 3); 1057 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1058 outbuf[35] = 4; 1059 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1060 /* TODO: Random readable, CD read, DVD read, drive serial number, 1061 power management */ 1062 return 40; 1063 } 1064 1065 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1066 { 1067 if (s->qdev.type != TYPE_ROM) { 1068 return -1; 1069 } 1070 memset(outbuf, 0, 8); 1071 outbuf[5] = 1; /* CD-ROM */ 1072 return 8; 1073 } 1074 1075 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1076 int page_control) 1077 { 1078 static const int mode_sense_valid[0x3f] = { 1079 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1080 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1081 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1082 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1083 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1084 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1085 }; 1086 1087 uint8_t *p = *p_outbuf + 2; 1088 int length; 1089 1090 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1091 return -1; 1092 } 1093 1094 /* 1095 * If Changeable Values are requested, a mask denoting those mode parameters 1096 * that are changeable shall be returned. As we currently don't support 1097 * parameter changes via MODE_SELECT all bits are returned set to zero. 1098 * The buffer was already menset to zero by the caller of this function. 1099 * 1100 * The offsets here are off by two compared to the descriptions in the 1101 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1102 * but it is done so that offsets are consistent within our implementation 1103 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1104 * 2-byte and 4-byte headers. 1105 */ 1106 switch (page) { 1107 case MODE_PAGE_HD_GEOMETRY: 1108 length = 0x16; 1109 if (page_control == 1) { /* Changeable Values */ 1110 break; 1111 } 1112 /* if a geometry hint is available, use it */ 1113 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1114 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1115 p[2] = s->qdev.conf.cyls & 0xff; 1116 p[3] = s->qdev.conf.heads & 0xff; 1117 /* Write precomp start cylinder, disabled */ 1118 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1119 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1120 p[6] = s->qdev.conf.cyls & 0xff; 1121 /* Reduced current start cylinder, disabled */ 1122 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1123 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1124 p[9] = s->qdev.conf.cyls & 0xff; 1125 /* Device step rate [ns], 200ns */ 1126 p[10] = 0; 1127 p[11] = 200; 1128 /* Landing zone cylinder */ 1129 p[12] = 0xff; 1130 p[13] = 0xff; 1131 p[14] = 0xff; 1132 /* Medium rotation rate [rpm], 5400 rpm */ 1133 p[18] = (5400 >> 8) & 0xff; 1134 p[19] = 5400 & 0xff; 1135 break; 1136 1137 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1138 length = 0x1e; 1139 if (page_control == 1) { /* Changeable Values */ 1140 break; 1141 } 1142 /* Transfer rate [kbit/s], 5Mbit/s */ 1143 p[0] = 5000 >> 8; 1144 p[1] = 5000 & 0xff; 1145 /* if a geometry hint is available, use it */ 1146 p[2] = s->qdev.conf.heads & 0xff; 1147 p[3] = s->qdev.conf.secs & 0xff; 1148 p[4] = s->qdev.blocksize >> 8; 1149 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1150 p[7] = s->qdev.conf.cyls & 0xff; 1151 /* Write precomp start cylinder, disabled */ 1152 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1153 p[9] = s->qdev.conf.cyls & 0xff; 1154 /* Reduced current start cylinder, disabled */ 1155 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1156 p[11] = s->qdev.conf.cyls & 0xff; 1157 /* Device step rate [100us], 100us */ 1158 p[12] = 0; 1159 p[13] = 1; 1160 /* Device step pulse width [us], 1us */ 1161 p[14] = 1; 1162 /* Device head settle delay [100us], 100us */ 1163 p[15] = 0; 1164 p[16] = 1; 1165 /* Motor on delay [0.1s], 0.1s */ 1166 p[17] = 1; 1167 /* Motor off delay [0.1s], 0.1s */ 1168 p[18] = 1; 1169 /* Medium rotation rate [rpm], 5400 rpm */ 1170 p[26] = (5400 >> 8) & 0xff; 1171 p[27] = 5400 & 0xff; 1172 break; 1173 1174 case MODE_PAGE_CACHING: 1175 length = 0x12; 1176 if (page_control == 1 || /* Changeable Values */ 1177 blk_enable_write_cache(s->qdev.conf.blk)) { 1178 p[0] = 4; /* WCE */ 1179 } 1180 break; 1181 1182 case MODE_PAGE_R_W_ERROR: 1183 length = 10; 1184 if (page_control == 1) { /* Changeable Values */ 1185 break; 1186 } 1187 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1188 if (s->qdev.type == TYPE_ROM) { 1189 p[1] = 0x20; /* Read Retry Count */ 1190 } 1191 break; 1192 1193 case MODE_PAGE_AUDIO_CTL: 1194 length = 14; 1195 break; 1196 1197 case MODE_PAGE_CAPABILITIES: 1198 length = 0x14; 1199 if (page_control == 1) { /* Changeable Values */ 1200 break; 1201 } 1202 1203 p[0] = 0x3b; /* CD-R & CD-RW read */ 1204 p[1] = 0; /* Writing not supported */ 1205 p[2] = 0x7f; /* Audio, composite, digital out, 1206 mode 2 form 1&2, multi session */ 1207 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1208 RW corrected, C2 errors, ISRC, 1209 UPC, Bar code */ 1210 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1211 /* Locking supported, jumper present, eject, tray */ 1212 p[5] = 0; /* no volume & mute control, no 1213 changer */ 1214 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1215 p[7] = (50 * 176) & 0xff; 1216 p[8] = 2 >> 8; /* Two volume levels */ 1217 p[9] = 2 & 0xff; 1218 p[10] = 2048 >> 8; /* 2M buffer */ 1219 p[11] = 2048 & 0xff; 1220 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1221 p[13] = (16 * 176) & 0xff; 1222 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1223 p[17] = (16 * 176) & 0xff; 1224 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1225 p[19] = (16 * 176) & 0xff; 1226 break; 1227 1228 default: 1229 return -1; 1230 } 1231 1232 assert(length < 256); 1233 (*p_outbuf)[0] = page; 1234 (*p_outbuf)[1] = length; 1235 *p_outbuf += length + 2; 1236 return length + 2; 1237 } 1238 1239 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1240 { 1241 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1242 uint64_t nb_sectors; 1243 bool dbd; 1244 int page, buflen, ret, page_control; 1245 uint8_t *p; 1246 uint8_t dev_specific_param; 1247 1248 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1249 page = r->req.cmd.buf[2] & 0x3f; 1250 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1251 1252 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 1253 10, page, r->req.cmd.xfer, page_control); 1254 memset(outbuf, 0, r->req.cmd.xfer); 1255 p = outbuf; 1256 1257 if (s->qdev.type == TYPE_DISK) { 1258 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1259 if (!blk_is_writable(s->qdev.conf.blk)) { 1260 dev_specific_param |= 0x80; /* Readonly. */ 1261 } 1262 } else { 1263 /* MMC prescribes that CD/DVD drives have no block descriptors, 1264 * and defines no device-specific parameter. */ 1265 dev_specific_param = 0x00; 1266 dbd = true; 1267 } 1268 1269 if (r->req.cmd.buf[0] == MODE_SENSE) { 1270 p[1] = 0; /* Default media type. */ 1271 p[2] = dev_specific_param; 1272 p[3] = 0; /* Block descriptor length. */ 1273 p += 4; 1274 } else { /* MODE_SENSE_10 */ 1275 p[2] = 0; /* Default media type. */ 1276 p[3] = dev_specific_param; 1277 p[6] = p[7] = 0; /* Block descriptor length. */ 1278 p += 8; 1279 } 1280 1281 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1282 if (!dbd && nb_sectors) { 1283 if (r->req.cmd.buf[0] == MODE_SENSE) { 1284 outbuf[3] = 8; /* Block descriptor length */ 1285 } else { /* MODE_SENSE_10 */ 1286 outbuf[7] = 8; /* Block descriptor length */ 1287 } 1288 nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1289 if (nb_sectors > 0xffffff) { 1290 nb_sectors = 0; 1291 } 1292 p[0] = 0; /* media density code */ 1293 p[1] = (nb_sectors >> 16) & 0xff; 1294 p[2] = (nb_sectors >> 8) & 0xff; 1295 p[3] = nb_sectors & 0xff; 1296 p[4] = 0; /* reserved */ 1297 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1298 p[6] = s->qdev.blocksize >> 8; 1299 p[7] = 0; 1300 p += 8; 1301 } 1302 1303 if (page_control == 3) { 1304 /* Saved Values */ 1305 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1306 return -1; 1307 } 1308 1309 if (page == 0x3f) { 1310 for (page = 0; page <= 0x3e; page++) { 1311 mode_sense_page(s, page, &p, page_control); 1312 } 1313 } else { 1314 ret = mode_sense_page(s, page, &p, page_control); 1315 if (ret == -1) { 1316 return -1; 1317 } 1318 } 1319 1320 buflen = p - outbuf; 1321 /* 1322 * The mode data length field specifies the length in bytes of the 1323 * following data that is available to be transferred. The mode data 1324 * length does not include itself. 1325 */ 1326 if (r->req.cmd.buf[0] == MODE_SENSE) { 1327 outbuf[0] = buflen - 1; 1328 } else { /* MODE_SENSE_10 */ 1329 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1330 outbuf[1] = (buflen - 2) & 0xff; 1331 } 1332 return buflen; 1333 } 1334 1335 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1336 { 1337 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1338 int start_track, format, msf, toclen; 1339 uint64_t nb_sectors; 1340 1341 msf = req->cmd.buf[1] & 2; 1342 format = req->cmd.buf[2] & 0xf; 1343 start_track = req->cmd.buf[6]; 1344 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1345 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); 1346 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 1347 switch (format) { 1348 case 0: 1349 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1350 break; 1351 case 1: 1352 /* multi session : only a single session defined */ 1353 toclen = 12; 1354 memset(outbuf, 0, 12); 1355 outbuf[1] = 0x0a; 1356 outbuf[2] = 0x01; 1357 outbuf[3] = 0x01; 1358 break; 1359 case 2: 1360 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1361 break; 1362 default: 1363 return -1; 1364 } 1365 return toclen; 1366 } 1367 1368 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1369 { 1370 SCSIRequest *req = &r->req; 1371 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1372 bool start = req->cmd.buf[4] & 1; 1373 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1374 int pwrcnd = req->cmd.buf[4] & 0xf0; 1375 1376 if (pwrcnd) { 1377 /* eject/load only happens for power condition == 0 */ 1378 return 0; 1379 } 1380 1381 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1382 if (!start && !s->tray_open && s->tray_locked) { 1383 scsi_check_condition(r, 1384 blk_is_inserted(s->qdev.conf.blk) 1385 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1386 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1387 return -1; 1388 } 1389 1390 if (s->tray_open != !start) { 1391 blk_eject(s->qdev.conf.blk, !start); 1392 s->tray_open = !start; 1393 } 1394 } 1395 return 0; 1396 } 1397 1398 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1399 { 1400 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1401 int buflen = r->iov.iov_len; 1402 1403 if (buflen) { 1404 trace_scsi_disk_emulate_read_data(buflen); 1405 r->iov.iov_len = 0; 1406 r->started = true; 1407 scsi_req_data(&r->req, buflen); 1408 return; 1409 } 1410 1411 /* This also clears the sense buffer for REQUEST SENSE. */ 1412 scsi_req_complete(&r->req, GOOD); 1413 } 1414 1415 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1416 uint8_t *inbuf, int inlen) 1417 { 1418 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1419 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1420 uint8_t *p; 1421 int len, expected_len, changeable_len, i; 1422 1423 /* The input buffer does not include the page header, so it is 1424 * off by 2 bytes. 1425 */ 1426 expected_len = inlen + 2; 1427 if (expected_len > SCSI_MAX_MODE_LEN) { 1428 return -1; 1429 } 1430 1431 p = mode_current; 1432 memset(mode_current, 0, inlen + 2); 1433 len = mode_sense_page(s, page, &p, 0); 1434 if (len < 0 || len != expected_len) { 1435 return -1; 1436 } 1437 1438 p = mode_changeable; 1439 memset(mode_changeable, 0, inlen + 2); 1440 changeable_len = mode_sense_page(s, page, &p, 1); 1441 assert(changeable_len == len); 1442 1443 /* Check that unchangeable bits are the same as what MODE SENSE 1444 * would return. 1445 */ 1446 for (i = 2; i < len; i++) { 1447 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1448 return -1; 1449 } 1450 } 1451 return 0; 1452 } 1453 1454 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1455 { 1456 switch (page) { 1457 case MODE_PAGE_CACHING: 1458 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1459 break; 1460 1461 default: 1462 break; 1463 } 1464 } 1465 1466 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1467 { 1468 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1469 1470 while (len > 0) { 1471 int page, subpage, page_len; 1472 1473 /* Parse both possible formats for the mode page headers. */ 1474 page = p[0] & 0x3f; 1475 if (p[0] & 0x40) { 1476 if (len < 4) { 1477 goto invalid_param_len; 1478 } 1479 subpage = p[1]; 1480 page_len = lduw_be_p(&p[2]); 1481 p += 4; 1482 len -= 4; 1483 } else { 1484 if (len < 2) { 1485 goto invalid_param_len; 1486 } 1487 subpage = 0; 1488 page_len = p[1]; 1489 p += 2; 1490 len -= 2; 1491 } 1492 1493 if (subpage) { 1494 goto invalid_param; 1495 } 1496 if (page_len > len) { 1497 goto invalid_param_len; 1498 } 1499 1500 if (!change) { 1501 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1502 goto invalid_param; 1503 } 1504 } else { 1505 scsi_disk_apply_mode_select(s, page, p); 1506 } 1507 1508 p += page_len; 1509 len -= page_len; 1510 } 1511 return 0; 1512 1513 invalid_param: 1514 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1515 return -1; 1516 1517 invalid_param_len: 1518 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1519 return -1; 1520 } 1521 1522 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1523 { 1524 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1525 uint8_t *p = inbuf; 1526 int cmd = r->req.cmd.buf[0]; 1527 int len = r->req.cmd.xfer; 1528 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1529 int bd_len; 1530 int pass; 1531 1532 /* We only support PF=1, SP=0. */ 1533 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1534 goto invalid_field; 1535 } 1536 1537 if (len < hdr_len) { 1538 goto invalid_param_len; 1539 } 1540 1541 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1542 len -= hdr_len; 1543 p += hdr_len; 1544 if (len < bd_len) { 1545 goto invalid_param_len; 1546 } 1547 if (bd_len != 0 && bd_len != 8) { 1548 goto invalid_param; 1549 } 1550 1551 len -= bd_len; 1552 p += bd_len; 1553 1554 /* Ensure no change is made if there is an error! */ 1555 for (pass = 0; pass < 2; pass++) { 1556 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1557 assert(pass == 0); 1558 return; 1559 } 1560 } 1561 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1562 /* The request is used as the AIO opaque value, so add a ref. */ 1563 scsi_req_ref(&r->req); 1564 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1565 BLOCK_ACCT_FLUSH); 1566 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1567 return; 1568 } 1569 1570 scsi_req_complete(&r->req, GOOD); 1571 return; 1572 1573 invalid_param: 1574 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1575 return; 1576 1577 invalid_param_len: 1578 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1579 return; 1580 1581 invalid_field: 1582 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1583 } 1584 1585 static inline bool check_lba_range(SCSIDiskState *s, 1586 uint64_t sector_num, uint32_t nb_sectors) 1587 { 1588 /* 1589 * The first line tests that no overflow happens when computing the last 1590 * sector. The second line tests that the last accessed sector is in 1591 * range. 1592 * 1593 * Careful, the computations should not underflow for nb_sectors == 0, 1594 * and a 0-block read to the first LBA beyond the end of device is 1595 * valid. 1596 */ 1597 return (sector_num <= sector_num + nb_sectors && 1598 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1599 } 1600 1601 typedef struct UnmapCBData { 1602 SCSIDiskReq *r; 1603 uint8_t *inbuf; 1604 int count; 1605 } UnmapCBData; 1606 1607 static void scsi_unmap_complete(void *opaque, int ret); 1608 1609 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1610 { 1611 SCSIDiskReq *r = data->r; 1612 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1613 1614 assert(r->req.aiocb == NULL); 1615 1616 if (data->count > 0) { 1617 r->sector = ldq_be_p(&data->inbuf[0]) 1618 * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1619 r->sector_count = (ldl_be_p(&data->inbuf[8]) & 0xffffffffULL) 1620 * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1621 if (!check_lba_range(s, r->sector, r->sector_count)) { 1622 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), 1623 BLOCK_ACCT_UNMAP); 1624 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1625 goto done; 1626 } 1627 1628 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1629 r->sector_count * BDRV_SECTOR_SIZE, 1630 BLOCK_ACCT_UNMAP); 1631 1632 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1633 r->sector * BDRV_SECTOR_SIZE, 1634 r->sector_count * BDRV_SECTOR_SIZE, 1635 scsi_unmap_complete, data); 1636 data->count--; 1637 data->inbuf += 16; 1638 return; 1639 } 1640 1641 scsi_req_complete(&r->req, GOOD); 1642 1643 done: 1644 scsi_req_unref(&r->req); 1645 g_free(data); 1646 } 1647 1648 static void scsi_unmap_complete(void *opaque, int ret) 1649 { 1650 UnmapCBData *data = opaque; 1651 SCSIDiskReq *r = data->r; 1652 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1653 1654 assert(r->req.aiocb != NULL); 1655 r->req.aiocb = NULL; 1656 1657 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1658 if (scsi_disk_req_check_error(r, ret, true)) { 1659 scsi_req_unref(&r->req); 1660 g_free(data); 1661 } else { 1662 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1663 scsi_unmap_complete_noio(data, ret); 1664 } 1665 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1666 } 1667 1668 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1669 { 1670 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1671 uint8_t *p = inbuf; 1672 int len = r->req.cmd.xfer; 1673 UnmapCBData *data; 1674 1675 /* Reject ANCHOR=1. */ 1676 if (r->req.cmd.buf[1] & 0x1) { 1677 goto invalid_field; 1678 } 1679 1680 if (len < 8) { 1681 goto invalid_param_len; 1682 } 1683 if (len < lduw_be_p(&p[0]) + 2) { 1684 goto invalid_param_len; 1685 } 1686 if (len < lduw_be_p(&p[2]) + 8) { 1687 goto invalid_param_len; 1688 } 1689 if (lduw_be_p(&p[2]) & 15) { 1690 goto invalid_param_len; 1691 } 1692 1693 if (!blk_is_writable(s->qdev.conf.blk)) { 1694 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1695 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1696 return; 1697 } 1698 1699 data = g_new0(UnmapCBData, 1); 1700 data->r = r; 1701 data->inbuf = &p[8]; 1702 data->count = lduw_be_p(&p[2]) >> 4; 1703 1704 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1705 scsi_req_ref(&r->req); 1706 scsi_unmap_complete_noio(data, 0); 1707 return; 1708 1709 invalid_param_len: 1710 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1711 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1712 return; 1713 1714 invalid_field: 1715 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1716 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1717 } 1718 1719 typedef struct WriteSameCBData { 1720 SCSIDiskReq *r; 1721 int64_t sector; 1722 int nb_sectors; 1723 QEMUIOVector qiov; 1724 struct iovec iov; 1725 } WriteSameCBData; 1726 1727 static void scsi_write_same_complete(void *opaque, int ret) 1728 { 1729 WriteSameCBData *data = opaque; 1730 SCSIDiskReq *r = data->r; 1731 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1732 1733 assert(r->req.aiocb != NULL); 1734 r->req.aiocb = NULL; 1735 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1736 if (scsi_disk_req_check_error(r, ret, true)) { 1737 goto done; 1738 } 1739 1740 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1741 1742 data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE; 1743 data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE; 1744 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, 1745 data->iov.iov_len); 1746 if (data->iov.iov_len) { 1747 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1748 data->iov.iov_len, BLOCK_ACCT_WRITE); 1749 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1750 * where final qiov may need smaller size */ 1751 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1752 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1753 data->sector << BDRV_SECTOR_BITS, 1754 &data->qiov, 0, 1755 scsi_write_same_complete, data); 1756 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1757 return; 1758 } 1759 1760 scsi_req_complete(&r->req, GOOD); 1761 1762 done: 1763 scsi_req_unref(&r->req); 1764 qemu_vfree(data->iov.iov_base); 1765 g_free(data); 1766 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1767 } 1768 1769 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1770 { 1771 SCSIRequest *req = &r->req; 1772 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1773 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1774 WriteSameCBData *data; 1775 uint8_t *buf; 1776 int i; 1777 1778 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1779 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1780 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1781 return; 1782 } 1783 1784 if (!blk_is_writable(s->qdev.conf.blk)) { 1785 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1786 return; 1787 } 1788 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1789 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1790 return; 1791 } 1792 1793 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1794 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1795 1796 /* The request is used as the AIO opaque value, so add a ref. */ 1797 scsi_req_ref(&r->req); 1798 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1799 nb_sectors * s->qdev.blocksize, 1800 BLOCK_ACCT_WRITE); 1801 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1802 r->req.cmd.lba * s->qdev.blocksize, 1803 nb_sectors * s->qdev.blocksize, 1804 flags, scsi_aio_complete, r); 1805 return; 1806 } 1807 1808 data = g_new0(WriteSameCBData, 1); 1809 data->r = r; 1810 data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1811 data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1812 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, 1813 SCSI_WRITE_SAME_MAX); 1814 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1815 data->iov.iov_len); 1816 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1817 1818 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1819 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1820 } 1821 1822 scsi_req_ref(&r->req); 1823 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1824 data->iov.iov_len, BLOCK_ACCT_WRITE); 1825 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1826 data->sector << BDRV_SECTOR_BITS, 1827 &data->qiov, 0, 1828 scsi_write_same_complete, data); 1829 } 1830 1831 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1832 { 1833 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1834 1835 if (r->iov.iov_len) { 1836 int buflen = r->iov.iov_len; 1837 trace_scsi_disk_emulate_write_data(buflen); 1838 r->iov.iov_len = 0; 1839 scsi_req_data(&r->req, buflen); 1840 return; 1841 } 1842 1843 switch (req->cmd.buf[0]) { 1844 case MODE_SELECT: 1845 case MODE_SELECT_10: 1846 /* This also clears the sense buffer for REQUEST SENSE. */ 1847 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1848 break; 1849 1850 case UNMAP: 1851 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1852 break; 1853 1854 case VERIFY_10: 1855 case VERIFY_12: 1856 case VERIFY_16: 1857 if (r->req.status == -1) { 1858 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1859 } 1860 break; 1861 1862 case WRITE_SAME_10: 1863 case WRITE_SAME_16: 1864 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1865 break; 1866 1867 default: 1868 abort(); 1869 } 1870 } 1871 1872 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1873 { 1874 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1875 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1876 uint64_t nb_sectors; 1877 uint8_t *outbuf; 1878 int buflen; 1879 1880 switch (req->cmd.buf[0]) { 1881 case INQUIRY: 1882 case MODE_SENSE: 1883 case MODE_SENSE_10: 1884 case RESERVE: 1885 case RESERVE_10: 1886 case RELEASE: 1887 case RELEASE_10: 1888 case START_STOP: 1889 case ALLOW_MEDIUM_REMOVAL: 1890 case GET_CONFIGURATION: 1891 case GET_EVENT_STATUS_NOTIFICATION: 1892 case MECHANISM_STATUS: 1893 case REQUEST_SENSE: 1894 break; 1895 1896 default: 1897 if (!blk_is_available(s->qdev.conf.blk)) { 1898 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1899 return 0; 1900 } 1901 break; 1902 } 1903 1904 /* 1905 * FIXME: we shouldn't return anything bigger than 4k, but the code 1906 * requires the buffer to be as big as req->cmd.xfer in several 1907 * places. So, do not allow CDBs with a very large ALLOCATION 1908 * LENGTH. The real fix would be to modify scsi_read_data and 1909 * dma_buf_read, so that they return data beyond the buflen 1910 * as all zeros. 1911 */ 1912 if (req->cmd.xfer > 65536) { 1913 goto illegal_request; 1914 } 1915 r->buflen = MAX(4096, req->cmd.xfer); 1916 1917 if (!r->iov.iov_base) { 1918 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1919 } 1920 1921 outbuf = r->iov.iov_base; 1922 memset(outbuf, 0, r->buflen); 1923 switch (req->cmd.buf[0]) { 1924 case TEST_UNIT_READY: 1925 assert(blk_is_available(s->qdev.conf.blk)); 1926 break; 1927 case INQUIRY: 1928 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1929 if (buflen < 0) { 1930 goto illegal_request; 1931 } 1932 break; 1933 case MODE_SENSE: 1934 case MODE_SENSE_10: 1935 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1936 if (buflen < 0) { 1937 goto illegal_request; 1938 } 1939 break; 1940 case READ_TOC: 1941 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1942 if (buflen < 0) { 1943 goto illegal_request; 1944 } 1945 break; 1946 case RESERVE: 1947 if (req->cmd.buf[1] & 1) { 1948 goto illegal_request; 1949 } 1950 break; 1951 case RESERVE_10: 1952 if (req->cmd.buf[1] & 3) { 1953 goto illegal_request; 1954 } 1955 break; 1956 case RELEASE: 1957 if (req->cmd.buf[1] & 1) { 1958 goto illegal_request; 1959 } 1960 break; 1961 case RELEASE_10: 1962 if (req->cmd.buf[1] & 3) { 1963 goto illegal_request; 1964 } 1965 break; 1966 case START_STOP: 1967 if (scsi_disk_emulate_start_stop(r) < 0) { 1968 return 0; 1969 } 1970 break; 1971 case ALLOW_MEDIUM_REMOVAL: 1972 s->tray_locked = req->cmd.buf[4] & 1; 1973 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1974 break; 1975 case READ_CAPACITY_10: 1976 /* The normal LEN field for this command is zero. */ 1977 memset(outbuf, 0, 8); 1978 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1979 if (!nb_sectors) { 1980 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1981 return 0; 1982 } 1983 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1984 goto illegal_request; 1985 } 1986 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 1987 /* Returned value is the address of the last sector. */ 1988 nb_sectors--; 1989 /* Remember the new size for read/write sanity checking. */ 1990 s->qdev.max_lba = nb_sectors; 1991 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 1992 if (nb_sectors > UINT32_MAX) { 1993 nb_sectors = UINT32_MAX; 1994 } 1995 outbuf[0] = (nb_sectors >> 24) & 0xff; 1996 outbuf[1] = (nb_sectors >> 16) & 0xff; 1997 outbuf[2] = (nb_sectors >> 8) & 0xff; 1998 outbuf[3] = nb_sectors & 0xff; 1999 outbuf[4] = 0; 2000 outbuf[5] = 0; 2001 outbuf[6] = s->qdev.blocksize >> 8; 2002 outbuf[7] = 0; 2003 break; 2004 case REQUEST_SENSE: 2005 /* Just return "NO SENSE". */ 2006 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2007 (req->cmd.buf[1] & 1) == 0); 2008 if (buflen < 0) { 2009 goto illegal_request; 2010 } 2011 break; 2012 case MECHANISM_STATUS: 2013 buflen = scsi_emulate_mechanism_status(s, outbuf); 2014 if (buflen < 0) { 2015 goto illegal_request; 2016 } 2017 break; 2018 case GET_CONFIGURATION: 2019 buflen = scsi_get_configuration(s, outbuf); 2020 if (buflen < 0) { 2021 goto illegal_request; 2022 } 2023 break; 2024 case GET_EVENT_STATUS_NOTIFICATION: 2025 buflen = scsi_get_event_status_notification(s, r, outbuf); 2026 if (buflen < 0) { 2027 goto illegal_request; 2028 } 2029 break; 2030 case READ_DISC_INFORMATION: 2031 buflen = scsi_read_disc_information(s, r, outbuf); 2032 if (buflen < 0) { 2033 goto illegal_request; 2034 } 2035 break; 2036 case READ_DVD_STRUCTURE: 2037 buflen = scsi_read_dvd_structure(s, r, outbuf); 2038 if (buflen < 0) { 2039 goto illegal_request; 2040 } 2041 break; 2042 case SERVICE_ACTION_IN_16: 2043 /* Service Action In subcommands. */ 2044 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2045 trace_scsi_disk_emulate_command_SAI_16(); 2046 memset(outbuf, 0, req->cmd.xfer); 2047 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2048 if (!nb_sectors) { 2049 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2050 return 0; 2051 } 2052 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2053 goto illegal_request; 2054 } 2055 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2056 /* Returned value is the address of the last sector. */ 2057 nb_sectors--; 2058 /* Remember the new size for read/write sanity checking. */ 2059 s->qdev.max_lba = nb_sectors; 2060 outbuf[0] = (nb_sectors >> 56) & 0xff; 2061 outbuf[1] = (nb_sectors >> 48) & 0xff; 2062 outbuf[2] = (nb_sectors >> 40) & 0xff; 2063 outbuf[3] = (nb_sectors >> 32) & 0xff; 2064 outbuf[4] = (nb_sectors >> 24) & 0xff; 2065 outbuf[5] = (nb_sectors >> 16) & 0xff; 2066 outbuf[6] = (nb_sectors >> 8) & 0xff; 2067 outbuf[7] = nb_sectors & 0xff; 2068 outbuf[8] = 0; 2069 outbuf[9] = 0; 2070 outbuf[10] = s->qdev.blocksize >> 8; 2071 outbuf[11] = 0; 2072 outbuf[12] = 0; 2073 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2074 2075 /* set TPE bit if the format supports discard */ 2076 if (s->qdev.conf.discard_granularity) { 2077 outbuf[14] = 0x80; 2078 } 2079 2080 /* Protection, exponent and lowest lba field left blank. */ 2081 break; 2082 } 2083 trace_scsi_disk_emulate_command_SAI_unsupported(); 2084 goto illegal_request; 2085 case SYNCHRONIZE_CACHE: 2086 /* The request is used as the AIO opaque value, so add a ref. */ 2087 scsi_req_ref(&r->req); 2088 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2089 BLOCK_ACCT_FLUSH); 2090 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2091 return 0; 2092 case SEEK_10: 2093 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); 2094 if (r->req.cmd.lba > s->qdev.max_lba) { 2095 goto illegal_lba; 2096 } 2097 break; 2098 case MODE_SELECT: 2099 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); 2100 break; 2101 case MODE_SELECT_10: 2102 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 2103 break; 2104 case UNMAP: 2105 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); 2106 break; 2107 case VERIFY_10: 2108 case VERIFY_12: 2109 case VERIFY_16: 2110 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 2111 if (req->cmd.buf[1] & 6) { 2112 goto illegal_request; 2113 } 2114 break; 2115 case WRITE_SAME_10: 2116 case WRITE_SAME_16: 2117 trace_scsi_disk_emulate_command_WRITE_SAME( 2118 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); 2119 break; 2120 default: 2121 trace_scsi_disk_emulate_command_UNKNOWN(buf[0], 2122 scsi_command_name(buf[0])); 2123 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2124 return 0; 2125 } 2126 assert(!r->req.aiocb); 2127 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2128 if (r->iov.iov_len == 0) { 2129 scsi_req_complete(&r->req, GOOD); 2130 } 2131 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2132 assert(r->iov.iov_len == req->cmd.xfer); 2133 return -r->iov.iov_len; 2134 } else { 2135 return r->iov.iov_len; 2136 } 2137 2138 illegal_request: 2139 if (r->req.status == -1) { 2140 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2141 } 2142 return 0; 2143 2144 illegal_lba: 2145 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2146 return 0; 2147 } 2148 2149 /* Execute a scsi command. Returns the length of the data expected by the 2150 command. This will be Positive for data transfers from the device 2151 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2152 and zero if the command does not transfer any data. */ 2153 2154 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2155 { 2156 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2157 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2158 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2159 uint32_t len; 2160 uint8_t command; 2161 2162 command = buf[0]; 2163 2164 if (!blk_is_available(s->qdev.conf.blk)) { 2165 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2166 return 0; 2167 } 2168 2169 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2170 switch (command) { 2171 case READ_6: 2172 case READ_10: 2173 case READ_12: 2174 case READ_16: 2175 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); 2176 /* Protection information is not supported. For SCSI versions 2 and 2177 * older (as determined by snooping the guest's INQUIRY commands), 2178 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2179 */ 2180 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2181 goto illegal_request; 2182 } 2183 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2184 goto illegal_lba; 2185 } 2186 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2187 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2188 break; 2189 case WRITE_6: 2190 case WRITE_10: 2191 case WRITE_12: 2192 case WRITE_16: 2193 case WRITE_VERIFY_10: 2194 case WRITE_VERIFY_12: 2195 case WRITE_VERIFY_16: 2196 if (!blk_is_writable(s->qdev.conf.blk)) { 2197 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2198 return 0; 2199 } 2200 trace_scsi_disk_dma_command_WRITE( 2201 (command & 0xe) == 0xe ? "And Verify " : "", 2202 r->req.cmd.lba, len); 2203 /* fall through */ 2204 case VERIFY_10: 2205 case VERIFY_12: 2206 case VERIFY_16: 2207 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2208 * As far as DMA is concerned, we can treat it the same as a write; 2209 * scsi_block_do_sgio will send VERIFY commands. 2210 */ 2211 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2212 goto illegal_request; 2213 } 2214 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2215 goto illegal_lba; 2216 } 2217 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2218 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2219 break; 2220 default: 2221 abort(); 2222 illegal_request: 2223 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2224 return 0; 2225 illegal_lba: 2226 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2227 return 0; 2228 } 2229 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2230 if (r->sector_count == 0) { 2231 scsi_req_complete(&r->req, GOOD); 2232 } 2233 assert(r->iov.iov_len == 0); 2234 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2235 return -r->sector_count * BDRV_SECTOR_SIZE; 2236 } else { 2237 return r->sector_count * BDRV_SECTOR_SIZE; 2238 } 2239 } 2240 2241 static void scsi_disk_reset(DeviceState *dev) 2242 { 2243 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2244 uint64_t nb_sectors; 2245 2246 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2247 2248 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2249 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2250 if (nb_sectors) { 2251 nb_sectors--; 2252 } 2253 s->qdev.max_lba = nb_sectors; 2254 /* reset tray statuses */ 2255 s->tray_locked = 0; 2256 s->tray_open = 0; 2257 2258 s->qdev.scsi_version = s->qdev.default_scsi_version; 2259 } 2260 2261 static void scsi_disk_resize_cb(void *opaque) 2262 { 2263 SCSIDiskState *s = opaque; 2264 2265 /* SPC lists this sense code as available only for 2266 * direct-access devices. 2267 */ 2268 if (s->qdev.type == TYPE_DISK) { 2269 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2270 } 2271 } 2272 2273 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2274 { 2275 SCSIDiskState *s = opaque; 2276 2277 /* 2278 * When a CD gets changed, we have to report an ejected state and 2279 * then a loaded state to guests so that they detect tray 2280 * open/close and media change events. Guests that do not use 2281 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2282 * states rely on this behavior. 2283 * 2284 * media_changed governs the state machine used for unit attention 2285 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2286 */ 2287 s->media_changed = load; 2288 s->tray_open = !load; 2289 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2290 s->media_event = true; 2291 s->eject_request = false; 2292 } 2293 2294 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2295 { 2296 SCSIDiskState *s = opaque; 2297 2298 s->eject_request = true; 2299 if (force) { 2300 s->tray_locked = false; 2301 } 2302 } 2303 2304 static bool scsi_cd_is_tray_open(void *opaque) 2305 { 2306 return ((SCSIDiskState *)opaque)->tray_open; 2307 } 2308 2309 static bool scsi_cd_is_medium_locked(void *opaque) 2310 { 2311 return ((SCSIDiskState *)opaque)->tray_locked; 2312 } 2313 2314 static const BlockDevOps scsi_disk_removable_block_ops = { 2315 .change_media_cb = scsi_cd_change_media_cb, 2316 .eject_request_cb = scsi_cd_eject_request_cb, 2317 .is_tray_open = scsi_cd_is_tray_open, 2318 .is_medium_locked = scsi_cd_is_medium_locked, 2319 2320 .resize_cb = scsi_disk_resize_cb, 2321 }; 2322 2323 static const BlockDevOps scsi_disk_block_ops = { 2324 .resize_cb = scsi_disk_resize_cb, 2325 }; 2326 2327 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2328 { 2329 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2330 if (s->media_changed) { 2331 s->media_changed = false; 2332 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2333 } 2334 } 2335 2336 static void scsi_realize(SCSIDevice *dev, Error **errp) 2337 { 2338 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2339 bool read_only; 2340 2341 if (!s->qdev.conf.blk) { 2342 error_setg(errp, "drive property not set"); 2343 return; 2344 } 2345 2346 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2347 !blk_is_inserted(s->qdev.conf.blk)) { 2348 error_setg(errp, "Device needs media, but drive is empty"); 2349 return; 2350 } 2351 2352 if (!blkconf_blocksizes(&s->qdev.conf, errp)) { 2353 return; 2354 } 2355 2356 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && 2357 !s->qdev.hba_supports_iothread) 2358 { 2359 error_setg(errp, "HBA does not support iothreads"); 2360 return; 2361 } 2362 2363 if (dev->type == TYPE_DISK) { 2364 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2365 return; 2366 } 2367 } 2368 2369 read_only = !blk_supports_write_perm(s->qdev.conf.blk); 2370 if (dev->type == TYPE_ROM) { 2371 read_only = true; 2372 } 2373 2374 if (!blkconf_apply_backend_options(&dev->conf, read_only, 2375 dev->type == TYPE_DISK, errp)) { 2376 return; 2377 } 2378 2379 if (s->qdev.conf.discard_granularity == -1) { 2380 s->qdev.conf.discard_granularity = 2381 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2382 } 2383 2384 if (!s->version) { 2385 s->version = g_strdup(qemu_hw_version()); 2386 } 2387 if (!s->vendor) { 2388 s->vendor = g_strdup("QEMU"); 2389 } 2390 if (!s->device_id) { 2391 if (s->serial) { 2392 s->device_id = g_strdup_printf("%.20s", s->serial); 2393 } else { 2394 const char *str = blk_name(s->qdev.conf.blk); 2395 if (str && *str) { 2396 s->device_id = g_strdup(str); 2397 } 2398 } 2399 } 2400 2401 if (blk_is_sg(s->qdev.conf.blk)) { 2402 error_setg(errp, "unwanted /dev/sg*"); 2403 return; 2404 } 2405 2406 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2407 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2408 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2409 } else { 2410 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2411 } 2412 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2413 2414 blk_iostatus_enable(s->qdev.conf.blk); 2415 2416 add_boot_device_lchs(&dev->qdev, NULL, 2417 dev->conf.lcyls, 2418 dev->conf.lheads, 2419 dev->conf.lsecs); 2420 } 2421 2422 static void scsi_unrealize(SCSIDevice *dev) 2423 { 2424 del_boot_device_lchs(&dev->qdev, NULL); 2425 } 2426 2427 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2428 { 2429 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2430 AioContext *ctx = NULL; 2431 /* can happen for devices without drive. The error message for missing 2432 * backend will be issued in scsi_realize 2433 */ 2434 if (s->qdev.conf.blk) { 2435 ctx = blk_get_aio_context(s->qdev.conf.blk); 2436 aio_context_acquire(ctx); 2437 if (!blkconf_blocksizes(&s->qdev.conf, errp)) { 2438 goto out; 2439 } 2440 } 2441 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2442 s->qdev.type = TYPE_DISK; 2443 if (!s->product) { 2444 s->product = g_strdup("QEMU HARDDISK"); 2445 } 2446 scsi_realize(&s->qdev, errp); 2447 out: 2448 if (ctx) { 2449 aio_context_release(ctx); 2450 } 2451 } 2452 2453 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2454 { 2455 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2456 AioContext *ctx; 2457 int ret; 2458 2459 if (!dev->conf.blk) { 2460 /* Anonymous BlockBackend for an empty drive. As we put it into 2461 * dev->conf, qdev takes care of detaching on unplug. */ 2462 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 2463 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2464 assert(ret == 0); 2465 } 2466 2467 ctx = blk_get_aio_context(dev->conf.blk); 2468 aio_context_acquire(ctx); 2469 s->qdev.blocksize = 2048; 2470 s->qdev.type = TYPE_ROM; 2471 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2472 if (!s->product) { 2473 s->product = g_strdup("QEMU CD-ROM"); 2474 } 2475 scsi_realize(&s->qdev, errp); 2476 aio_context_release(ctx); 2477 } 2478 2479 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2480 { 2481 DriveInfo *dinfo; 2482 Error *local_err = NULL; 2483 2484 warn_report("'scsi-disk' is deprecated, " 2485 "please use 'scsi-hd' or 'scsi-cd' instead"); 2486 2487 if (!dev->conf.blk) { 2488 scsi_realize(dev, &local_err); 2489 assert(local_err); 2490 error_propagate(errp, local_err); 2491 return; 2492 } 2493 2494 dinfo = blk_legacy_dinfo(dev->conf.blk); 2495 if (dinfo && dinfo->media_cd) { 2496 scsi_cd_realize(dev, errp); 2497 } else { 2498 scsi_hd_realize(dev, errp); 2499 } 2500 } 2501 2502 static const SCSIReqOps scsi_disk_emulate_reqops = { 2503 .size = sizeof(SCSIDiskReq), 2504 .free_req = scsi_free_request, 2505 .send_command = scsi_disk_emulate_command, 2506 .read_data = scsi_disk_emulate_read_data, 2507 .write_data = scsi_disk_emulate_write_data, 2508 .get_buf = scsi_get_buf, 2509 }; 2510 2511 static const SCSIReqOps scsi_disk_dma_reqops = { 2512 .size = sizeof(SCSIDiskReq), 2513 .free_req = scsi_free_request, 2514 .send_command = scsi_disk_dma_command, 2515 .read_data = scsi_read_data, 2516 .write_data = scsi_write_data, 2517 .get_buf = scsi_get_buf, 2518 .load_request = scsi_disk_load_request, 2519 .save_request = scsi_disk_save_request, 2520 }; 2521 2522 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2523 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2524 [INQUIRY] = &scsi_disk_emulate_reqops, 2525 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2526 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2527 [START_STOP] = &scsi_disk_emulate_reqops, 2528 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2529 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2530 [READ_TOC] = &scsi_disk_emulate_reqops, 2531 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2532 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2533 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2534 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2535 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2536 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2537 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2538 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2539 [SEEK_10] = &scsi_disk_emulate_reqops, 2540 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2541 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2542 [UNMAP] = &scsi_disk_emulate_reqops, 2543 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2544 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2545 [VERIFY_10] = &scsi_disk_emulate_reqops, 2546 [VERIFY_12] = &scsi_disk_emulate_reqops, 2547 [VERIFY_16] = &scsi_disk_emulate_reqops, 2548 2549 [READ_6] = &scsi_disk_dma_reqops, 2550 [READ_10] = &scsi_disk_dma_reqops, 2551 [READ_12] = &scsi_disk_dma_reqops, 2552 [READ_16] = &scsi_disk_dma_reqops, 2553 [WRITE_6] = &scsi_disk_dma_reqops, 2554 [WRITE_10] = &scsi_disk_dma_reqops, 2555 [WRITE_12] = &scsi_disk_dma_reqops, 2556 [WRITE_16] = &scsi_disk_dma_reqops, 2557 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2558 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2559 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2560 }; 2561 2562 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) 2563 { 2564 int i; 2565 int len = scsi_cdb_length(buf); 2566 char *line_buffer, *p; 2567 2568 assert(len > 0 && len <= 16); 2569 line_buffer = g_malloc(len * 5 + 1); 2570 2571 for (i = 0, p = line_buffer; i < len; i++) { 2572 p += sprintf(p, " 0x%02x", buf[i]); 2573 } 2574 trace_scsi_disk_new_request(lun, tag, line_buffer); 2575 2576 g_free(line_buffer); 2577 } 2578 2579 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2580 uint8_t *buf, void *hba_private) 2581 { 2582 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2583 SCSIRequest *req; 2584 const SCSIReqOps *ops; 2585 uint8_t command; 2586 2587 command = buf[0]; 2588 ops = scsi_disk_reqops_dispatch[command]; 2589 if (!ops) { 2590 ops = &scsi_disk_emulate_reqops; 2591 } 2592 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2593 2594 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { 2595 scsi_disk_new_request_dump(lun, tag, buf); 2596 } 2597 2598 return req; 2599 } 2600 2601 #ifdef __linux__ 2602 static int get_device_type(SCSIDiskState *s) 2603 { 2604 uint8_t cmd[16]; 2605 uint8_t buf[36]; 2606 int ret; 2607 2608 memset(cmd, 0, sizeof(cmd)); 2609 memset(buf, 0, sizeof(buf)); 2610 cmd[0] = INQUIRY; 2611 cmd[4] = sizeof(buf); 2612 2613 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2614 buf, sizeof(buf), s->qdev.io_timeout); 2615 if (ret < 0) { 2616 return -1; 2617 } 2618 s->qdev.type = buf[0]; 2619 if (buf[1] & 0x80) { 2620 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2621 } 2622 return 0; 2623 } 2624 2625 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2626 { 2627 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2628 AioContext *ctx; 2629 int sg_version; 2630 int rc; 2631 2632 if (!s->qdev.conf.blk) { 2633 error_setg(errp, "drive property not set"); 2634 return; 2635 } 2636 2637 if (s->rotation_rate) { 2638 error_report_once("rotation_rate is specified for scsi-block but is " 2639 "not implemented. This option is deprecated and will " 2640 "be removed in a future version"); 2641 } 2642 2643 ctx = blk_get_aio_context(s->qdev.conf.blk); 2644 aio_context_acquire(ctx); 2645 2646 /* check we are using a driver managing SG_IO (version 3 and after) */ 2647 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2648 if (rc < 0) { 2649 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2650 if (rc != -EPERM) { 2651 error_append_hint(errp, "Is this a SCSI device?\n"); 2652 } 2653 goto out; 2654 } 2655 if (sg_version < 30000) { 2656 error_setg(errp, "scsi generic interface too old"); 2657 goto out; 2658 } 2659 2660 /* get device type from INQUIRY data */ 2661 rc = get_device_type(s); 2662 if (rc < 0) { 2663 error_setg(errp, "INQUIRY failed"); 2664 goto out; 2665 } 2666 2667 /* Make a guess for the block size, we'll fix it when the guest sends. 2668 * READ CAPACITY. If they don't, they likely would assume these sizes 2669 * anyway. (TODO: check in /sys). 2670 */ 2671 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2672 s->qdev.blocksize = 2048; 2673 } else { 2674 s->qdev.blocksize = 512; 2675 } 2676 2677 /* Makes the scsi-block device not removable by using HMP and QMP eject 2678 * command. 2679 */ 2680 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2681 2682 scsi_realize(&s->qdev, errp); 2683 scsi_generic_read_device_inquiry(&s->qdev); 2684 2685 out: 2686 aio_context_release(ctx); 2687 } 2688 2689 typedef struct SCSIBlockReq { 2690 SCSIDiskReq req; 2691 sg_io_hdr_t io_header; 2692 2693 /* Selected bytes of the original CDB, copied into our own CDB. */ 2694 uint8_t cmd, cdb1, group_number; 2695 2696 /* CDB passed to SG_IO. */ 2697 uint8_t cdb[16]; 2698 BlockCompletionFunc *cb; 2699 void *cb_opaque; 2700 } SCSIBlockReq; 2701 2702 static void scsi_block_sgio_complete(void *opaque, int ret) 2703 { 2704 SCSIBlockReq *req = (SCSIBlockReq *)opaque; 2705 SCSIDiskReq *r = &req->req; 2706 SCSIDevice *s = r->req.dev; 2707 sg_io_hdr_t *io_hdr = &req->io_header; 2708 2709 if (ret == 0) { 2710 if (io_hdr->host_status != SCSI_HOST_OK) { 2711 scsi_req_complete_failed(&r->req, io_hdr->host_status); 2712 scsi_req_unref(&r->req); 2713 return; 2714 } 2715 2716 if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) { 2717 ret = BUSY; 2718 } else { 2719 ret = io_hdr->status; 2720 } 2721 2722 if (ret > 0) { 2723 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 2724 if (scsi_handle_rw_error(r, ret, true)) { 2725 aio_context_release(blk_get_aio_context(s->conf.blk)); 2726 scsi_req_unref(&r->req); 2727 return; 2728 } 2729 aio_context_release(blk_get_aio_context(s->conf.blk)); 2730 2731 /* Ignore error. */ 2732 ret = 0; 2733 } 2734 } 2735 2736 req->cb(req->cb_opaque, ret); 2737 } 2738 2739 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2740 int64_t offset, QEMUIOVector *iov, 2741 int direction, 2742 BlockCompletionFunc *cb, void *opaque) 2743 { 2744 sg_io_hdr_t *io_header = &req->io_header; 2745 SCSIDiskReq *r = &req->req; 2746 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2747 int nb_logical_blocks; 2748 uint64_t lba; 2749 BlockAIOCB *aiocb; 2750 2751 /* This is not supported yet. It can only happen if the guest does 2752 * reads and writes that are not aligned to one logical sectors 2753 * _and_ cover multiple MemoryRegions. 2754 */ 2755 assert(offset % s->qdev.blocksize == 0); 2756 assert(iov->size % s->qdev.blocksize == 0); 2757 2758 io_header->interface_id = 'S'; 2759 2760 /* The data transfer comes from the QEMUIOVector. */ 2761 io_header->dxfer_direction = direction; 2762 io_header->dxfer_len = iov->size; 2763 io_header->dxferp = (void *)iov->iov; 2764 io_header->iovec_count = iov->niov; 2765 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2766 2767 /* Build a new CDB with the LBA and length patched in, in case 2768 * DMA helpers split the transfer in multiple segments. Do not 2769 * build a CDB smaller than what the guest wanted, and only build 2770 * a larger one if strictly necessary. 2771 */ 2772 io_header->cmdp = req->cdb; 2773 lba = offset / s->qdev.blocksize; 2774 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2775 2776 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2777 /* 6-byte CDB */ 2778 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2779 req->cdb[4] = nb_logical_blocks; 2780 req->cdb[5] = 0; 2781 io_header->cmd_len = 6; 2782 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2783 /* 10-byte CDB */ 2784 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2785 req->cdb[1] = req->cdb1; 2786 stl_be_p(&req->cdb[2], lba); 2787 req->cdb[6] = req->group_number; 2788 stw_be_p(&req->cdb[7], nb_logical_blocks); 2789 req->cdb[9] = 0; 2790 io_header->cmd_len = 10; 2791 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2792 /* 12-byte CDB */ 2793 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2794 req->cdb[1] = req->cdb1; 2795 stl_be_p(&req->cdb[2], lba); 2796 stl_be_p(&req->cdb[6], nb_logical_blocks); 2797 req->cdb[10] = req->group_number; 2798 req->cdb[11] = 0; 2799 io_header->cmd_len = 12; 2800 } else { 2801 /* 16-byte CDB */ 2802 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2803 req->cdb[1] = req->cdb1; 2804 stq_be_p(&req->cdb[2], lba); 2805 stl_be_p(&req->cdb[10], nb_logical_blocks); 2806 req->cdb[14] = req->group_number; 2807 req->cdb[15] = 0; 2808 io_header->cmd_len = 16; 2809 } 2810 2811 /* The rest is as in scsi-generic.c. */ 2812 io_header->mx_sb_len = sizeof(r->req.sense); 2813 io_header->sbp = r->req.sense; 2814 io_header->timeout = s->qdev.io_timeout * 1000; 2815 io_header->usr_ptr = r; 2816 io_header->flags |= SG_FLAG_DIRECT_IO; 2817 req->cb = cb; 2818 req->cb_opaque = opaque; 2819 trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba, 2820 nb_logical_blocks, io_header->timeout); 2821 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, scsi_block_sgio_complete, req); 2822 assert(aiocb != NULL); 2823 return aiocb; 2824 } 2825 2826 static bool scsi_block_no_fua(SCSICommand *cmd) 2827 { 2828 return false; 2829 } 2830 2831 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2832 QEMUIOVector *iov, 2833 BlockCompletionFunc *cb, void *cb_opaque, 2834 void *opaque) 2835 { 2836 SCSIBlockReq *r = opaque; 2837 return scsi_block_do_sgio(r, offset, iov, 2838 SG_DXFER_FROM_DEV, cb, cb_opaque); 2839 } 2840 2841 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2842 QEMUIOVector *iov, 2843 BlockCompletionFunc *cb, void *cb_opaque, 2844 void *opaque) 2845 { 2846 SCSIBlockReq *r = opaque; 2847 return scsi_block_do_sgio(r, offset, iov, 2848 SG_DXFER_TO_DEV, cb, cb_opaque); 2849 } 2850 2851 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2852 { 2853 switch (buf[0]) { 2854 case VERIFY_10: 2855 case VERIFY_12: 2856 case VERIFY_16: 2857 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2858 * for the number of logical blocks specified in the length 2859 * field). For other modes, do not use scatter/gather operation. 2860 */ 2861 if ((buf[1] & 6) == 2) { 2862 return false; 2863 } 2864 break; 2865 2866 case READ_6: 2867 case READ_10: 2868 case READ_12: 2869 case READ_16: 2870 case WRITE_6: 2871 case WRITE_10: 2872 case WRITE_12: 2873 case WRITE_16: 2874 case WRITE_VERIFY_10: 2875 case WRITE_VERIFY_12: 2876 case WRITE_VERIFY_16: 2877 /* MMC writing cannot be done via DMA helpers, because it sometimes 2878 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2879 * We might use scsi_block_dma_reqops as long as no writing commands are 2880 * seen, but performance usually isn't paramount on optical media. So, 2881 * just make scsi-block operate the same as scsi-generic for them. 2882 */ 2883 if (s->qdev.type != TYPE_ROM) { 2884 return false; 2885 } 2886 break; 2887 2888 default: 2889 break; 2890 } 2891 2892 return true; 2893 } 2894 2895 2896 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2897 { 2898 SCSIBlockReq *r = (SCSIBlockReq *)req; 2899 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2900 2901 r->cmd = req->cmd.buf[0]; 2902 switch (r->cmd >> 5) { 2903 case 0: 2904 /* 6-byte CDB. */ 2905 r->cdb1 = r->group_number = 0; 2906 break; 2907 case 1: 2908 /* 10-byte CDB. */ 2909 r->cdb1 = req->cmd.buf[1]; 2910 r->group_number = req->cmd.buf[6]; 2911 break; 2912 case 4: 2913 /* 12-byte CDB. */ 2914 r->cdb1 = req->cmd.buf[1]; 2915 r->group_number = req->cmd.buf[10]; 2916 break; 2917 case 5: 2918 /* 16-byte CDB. */ 2919 r->cdb1 = req->cmd.buf[1]; 2920 r->group_number = req->cmd.buf[14]; 2921 break; 2922 default: 2923 abort(); 2924 } 2925 2926 /* Protection information is not supported. For SCSI versions 2 and 2927 * older (as determined by snooping the guest's INQUIRY commands), 2928 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2929 */ 2930 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2931 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2932 return 0; 2933 } 2934 2935 return scsi_disk_dma_command(req, buf); 2936 } 2937 2938 static const SCSIReqOps scsi_block_dma_reqops = { 2939 .size = sizeof(SCSIBlockReq), 2940 .free_req = scsi_free_request, 2941 .send_command = scsi_block_dma_command, 2942 .read_data = scsi_read_data, 2943 .write_data = scsi_write_data, 2944 .get_buf = scsi_get_buf, 2945 .load_request = scsi_disk_load_request, 2946 .save_request = scsi_disk_save_request, 2947 }; 2948 2949 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2950 uint32_t lun, uint8_t *buf, 2951 void *hba_private) 2952 { 2953 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2954 2955 if (scsi_block_is_passthrough(s, buf)) { 2956 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2957 hba_private); 2958 } else { 2959 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2960 hba_private); 2961 } 2962 } 2963 2964 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2965 uint8_t *buf, void *hba_private) 2966 { 2967 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2968 2969 if (scsi_block_is_passthrough(s, buf)) { 2970 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2971 } else { 2972 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2973 } 2974 } 2975 2976 static void scsi_block_update_sense(SCSIRequest *req) 2977 { 2978 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2979 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); 2980 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); 2981 } 2982 #endif 2983 2984 static 2985 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2986 BlockCompletionFunc *cb, void *cb_opaque, 2987 void *opaque) 2988 { 2989 SCSIDiskReq *r = opaque; 2990 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2991 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2992 } 2993 2994 static 2995 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2996 BlockCompletionFunc *cb, void *cb_opaque, 2997 void *opaque) 2998 { 2999 SCSIDiskReq *r = opaque; 3000 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 3001 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 3002 } 3003 3004 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 3005 { 3006 DeviceClass *dc = DEVICE_CLASS(klass); 3007 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3008 3009 dc->fw_name = "disk"; 3010 dc->reset = scsi_disk_reset; 3011 sdc->dma_readv = scsi_dma_readv; 3012 sdc->dma_writev = scsi_dma_writev; 3013 sdc->need_fua_emulation = scsi_is_cmd_fua; 3014 } 3015 3016 static const TypeInfo scsi_disk_base_info = { 3017 .name = TYPE_SCSI_DISK_BASE, 3018 .parent = TYPE_SCSI_DEVICE, 3019 .class_init = scsi_disk_base_class_initfn, 3020 .instance_size = sizeof(SCSIDiskState), 3021 .class_size = sizeof(SCSIDiskClass), 3022 .abstract = true, 3023 }; 3024 3025 #define DEFINE_SCSI_DISK_PROPERTIES() \ 3026 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ 3027 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ 3028 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3029 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 3030 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 3031 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 3032 DEFINE_PROP_STRING("product", SCSIDiskState, product), \ 3033 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) 3034 3035 3036 static Property scsi_hd_properties[] = { 3037 DEFINE_SCSI_DISK_PROPERTIES(), 3038 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3039 SCSI_DISK_F_REMOVABLE, false), 3040 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3041 SCSI_DISK_F_DPOFUA, false), 3042 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3043 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3044 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3045 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3046 DEFAULT_MAX_UNMAP_SIZE), 3047 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3048 DEFAULT_MAX_IO_SIZE), 3049 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3050 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3051 5), 3052 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 3053 DEFINE_PROP_END_OF_LIST(), 3054 }; 3055 3056 static const VMStateDescription vmstate_scsi_disk_state = { 3057 .name = "scsi-disk", 3058 .version_id = 1, 3059 .minimum_version_id = 1, 3060 .fields = (VMStateField[]) { 3061 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 3062 VMSTATE_BOOL(media_changed, SCSIDiskState), 3063 VMSTATE_BOOL(media_event, SCSIDiskState), 3064 VMSTATE_BOOL(eject_request, SCSIDiskState), 3065 VMSTATE_BOOL(tray_open, SCSIDiskState), 3066 VMSTATE_BOOL(tray_locked, SCSIDiskState), 3067 VMSTATE_END_OF_LIST() 3068 } 3069 }; 3070 3071 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 3072 { 3073 DeviceClass *dc = DEVICE_CLASS(klass); 3074 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3075 3076 sc->realize = scsi_hd_realize; 3077 sc->unrealize = scsi_unrealize; 3078 sc->alloc_req = scsi_new_request; 3079 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3080 dc->desc = "virtual SCSI disk"; 3081 device_class_set_props(dc, scsi_hd_properties); 3082 dc->vmsd = &vmstate_scsi_disk_state; 3083 } 3084 3085 static const TypeInfo scsi_hd_info = { 3086 .name = "scsi-hd", 3087 .parent = TYPE_SCSI_DISK_BASE, 3088 .class_init = scsi_hd_class_initfn, 3089 }; 3090 3091 static Property scsi_cd_properties[] = { 3092 DEFINE_SCSI_DISK_PROPERTIES(), 3093 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3094 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3095 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3096 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3097 DEFAULT_MAX_IO_SIZE), 3098 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3099 5), 3100 DEFINE_PROP_END_OF_LIST(), 3101 }; 3102 3103 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3104 { 3105 DeviceClass *dc = DEVICE_CLASS(klass); 3106 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3107 3108 sc->realize = scsi_cd_realize; 3109 sc->alloc_req = scsi_new_request; 3110 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3111 dc->desc = "virtual SCSI CD-ROM"; 3112 device_class_set_props(dc, scsi_cd_properties); 3113 dc->vmsd = &vmstate_scsi_disk_state; 3114 } 3115 3116 static const TypeInfo scsi_cd_info = { 3117 .name = "scsi-cd", 3118 .parent = TYPE_SCSI_DISK_BASE, 3119 .class_init = scsi_cd_class_initfn, 3120 }; 3121 3122 #ifdef __linux__ 3123 static Property scsi_block_properties[] = { 3124 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), 3125 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3126 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3127 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3128 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3129 DEFAULT_MAX_UNMAP_SIZE), 3130 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3131 DEFAULT_MAX_IO_SIZE), 3132 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3133 -1), 3134 DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout, 3135 DEFAULT_IO_TIMEOUT), 3136 DEFINE_PROP_END_OF_LIST(), 3137 }; 3138 3139 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3140 { 3141 DeviceClass *dc = DEVICE_CLASS(klass); 3142 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3143 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3144 3145 sc->realize = scsi_block_realize; 3146 sc->alloc_req = scsi_block_new_request; 3147 sc->parse_cdb = scsi_block_parse_cdb; 3148 sdc->dma_readv = scsi_block_dma_readv; 3149 sdc->dma_writev = scsi_block_dma_writev; 3150 sdc->update_sense = scsi_block_update_sense; 3151 sdc->need_fua_emulation = scsi_block_no_fua; 3152 dc->desc = "SCSI block device passthrough"; 3153 device_class_set_props(dc, scsi_block_properties); 3154 dc->vmsd = &vmstate_scsi_disk_state; 3155 } 3156 3157 static const TypeInfo scsi_block_info = { 3158 .name = "scsi-block", 3159 .parent = TYPE_SCSI_DISK_BASE, 3160 .class_init = scsi_block_class_initfn, 3161 }; 3162 #endif 3163 3164 static Property scsi_disk_properties[] = { 3165 DEFINE_SCSI_DISK_PROPERTIES(), 3166 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3167 SCSI_DISK_F_REMOVABLE, false), 3168 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3169 SCSI_DISK_F_DPOFUA, false), 3170 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3171 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3172 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3173 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3174 DEFAULT_MAX_UNMAP_SIZE), 3175 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3176 DEFAULT_MAX_IO_SIZE), 3177 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3178 5), 3179 DEFINE_PROP_END_OF_LIST(), 3180 }; 3181 3182 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3183 { 3184 DeviceClass *dc = DEVICE_CLASS(klass); 3185 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3186 3187 sc->realize = scsi_disk_realize; 3188 sc->alloc_req = scsi_new_request; 3189 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3190 dc->fw_name = "disk"; 3191 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3192 dc->reset = scsi_disk_reset; 3193 device_class_set_props(dc, scsi_disk_properties); 3194 dc->vmsd = &vmstate_scsi_disk_state; 3195 } 3196 3197 static const TypeInfo scsi_disk_info = { 3198 .name = "scsi-disk", 3199 .parent = TYPE_SCSI_DISK_BASE, 3200 .class_init = scsi_disk_class_initfn, 3201 }; 3202 3203 static void scsi_disk_register_types(void) 3204 { 3205 type_register_static(&scsi_disk_base_info); 3206 type_register_static(&scsi_hd_info); 3207 type_register_static(&scsi_cd_info); 3208 #ifdef __linux__ 3209 type_register_static(&scsi_block_info); 3210 #endif 3211 type_register_static(&scsi_disk_info); 3212 } 3213 3214 type_init(scsi_disk_register_types) 3215