1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/units.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "qemu/main-loop.h" 27 #include "qemu/module.h" 28 #include "hw/scsi/scsi.h" 29 #include "migration/qemu-file-types.h" 30 #include "migration/vmstate.h" 31 #include "hw/scsi/emulation.h" 32 #include "scsi/constants.h" 33 #include "sysemu/block-backend.h" 34 #include "sysemu/blockdev.h" 35 #include "hw/block/block.h" 36 #include "hw/qdev-properties.h" 37 #include "sysemu/dma.h" 38 #include "sysemu/sysemu.h" 39 #include "qemu/cutils.h" 40 #include "trace.h" 41 #include "qom/object.h" 42 43 #ifdef __linux 44 #include <scsi/sg.h> 45 #endif 46 47 #define SCSI_WRITE_SAME_MAX (512 * KiB) 48 #define SCSI_DMA_BUF_SIZE (128 * KiB) 49 #define SCSI_MAX_INQUIRY_LEN 256 50 #define SCSI_MAX_MODE_LEN 256 51 52 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 53 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 54 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 55 56 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 57 58 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE) 59 60 struct SCSIDiskClass { 61 SCSIDeviceClass parent_class; 62 DMAIOFunc *dma_readv; 63 DMAIOFunc *dma_writev; 64 bool (*need_fua_emulation)(SCSICommand *cmd); 65 void (*update_sense)(SCSIRequest *r); 66 }; 67 68 typedef struct SCSIDiskReq { 69 SCSIRequest req; 70 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */ 71 uint64_t sector; 72 uint32_t sector_count; 73 uint32_t buflen; 74 bool started; 75 bool need_fua_emulation; 76 struct iovec iov; 77 QEMUIOVector qiov; 78 BlockAcctCookie acct; 79 unsigned char *status; 80 } SCSIDiskReq; 81 82 #define SCSI_DISK_F_REMOVABLE 0 83 #define SCSI_DISK_F_DPOFUA 1 84 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 85 86 struct SCSIDiskState { 87 SCSIDevice qdev; 88 uint32_t features; 89 bool media_changed; 90 bool media_event; 91 bool eject_request; 92 uint16_t port_index; 93 uint64_t max_unmap_size; 94 uint64_t max_io_size; 95 QEMUBH *bh; 96 char *version; 97 char *serial; 98 char *vendor; 99 char *product; 100 char *device_id; 101 bool tray_open; 102 bool tray_locked; 103 /* 104 * 0x0000 - rotation rate not reported 105 * 0x0001 - non-rotating medium (SSD) 106 * 0x0002-0x0400 - reserved 107 * 0x0401-0xffe - rotations per minute 108 * 0xffff - reserved 109 */ 110 uint16_t rotation_rate; 111 }; 112 113 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 114 115 static void scsi_free_request(SCSIRequest *req) 116 { 117 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 118 119 qemu_vfree(r->iov.iov_base); 120 } 121 122 /* Helper function for command completion with sense. */ 123 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 124 { 125 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, 126 sense.ascq); 127 scsi_req_build_sense(&r->req, sense); 128 scsi_req_complete(&r->req, CHECK_CONDITION); 129 } 130 131 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 132 { 133 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 134 135 if (!r->iov.iov_base) { 136 r->buflen = size; 137 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 138 } 139 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen); 140 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 141 } 142 143 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 144 { 145 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 146 147 qemu_put_be64s(f, &r->sector); 148 qemu_put_be32s(f, &r->sector_count); 149 qemu_put_be32s(f, &r->buflen); 150 if (r->buflen) { 151 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 152 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 153 } else if (!req->retry) { 154 uint32_t len = r->iov.iov_len; 155 qemu_put_be32s(f, &len); 156 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 157 } 158 } 159 } 160 161 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 162 { 163 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 164 165 qemu_get_be64s(f, &r->sector); 166 qemu_get_be32s(f, &r->sector_count); 167 qemu_get_be32s(f, &r->buflen); 168 if (r->buflen) { 169 scsi_init_iovec(r, r->buflen); 170 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 171 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 172 } else if (!r->req.retry) { 173 uint32_t len; 174 qemu_get_be32s(f, &len); 175 r->iov.iov_len = len; 176 assert(r->iov.iov_len <= r->buflen); 177 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 178 } 179 } 180 181 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 182 } 183 184 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 185 { 186 if (r->req.io_canceled) { 187 scsi_req_cancel_complete(&r->req); 188 return true; 189 } 190 191 if (ret < 0 || (r->status && *r->status)) { 192 return scsi_handle_rw_error(r, -ret, acct_failed); 193 } 194 195 return false; 196 } 197 198 static void scsi_aio_complete(void *opaque, int ret) 199 { 200 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 201 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 202 203 assert(r->req.aiocb != NULL); 204 r->req.aiocb = NULL; 205 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 206 if (scsi_disk_req_check_error(r, ret, true)) { 207 goto done; 208 } 209 210 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 211 scsi_req_complete(&r->req, GOOD); 212 213 done: 214 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 215 scsi_req_unref(&r->req); 216 } 217 218 static bool scsi_is_cmd_fua(SCSICommand *cmd) 219 { 220 switch (cmd->buf[0]) { 221 case READ_10: 222 case READ_12: 223 case READ_16: 224 case WRITE_10: 225 case WRITE_12: 226 case WRITE_16: 227 return (cmd->buf[1] & 8) != 0; 228 229 case VERIFY_10: 230 case VERIFY_12: 231 case VERIFY_16: 232 case WRITE_VERIFY_10: 233 case WRITE_VERIFY_12: 234 case WRITE_VERIFY_16: 235 return true; 236 237 case READ_6: 238 case WRITE_6: 239 default: 240 return false; 241 } 242 } 243 244 static void scsi_write_do_fua(SCSIDiskReq *r) 245 { 246 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 247 248 assert(r->req.aiocb == NULL); 249 assert(!r->req.io_canceled); 250 251 if (r->need_fua_emulation) { 252 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 253 BLOCK_ACCT_FLUSH); 254 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 255 return; 256 } 257 258 scsi_req_complete(&r->req, GOOD); 259 scsi_req_unref(&r->req); 260 } 261 262 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 263 { 264 assert(r->req.aiocb == NULL); 265 if (scsi_disk_req_check_error(r, ret, false)) { 266 goto done; 267 } 268 269 r->sector += r->sector_count; 270 r->sector_count = 0; 271 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 272 scsi_write_do_fua(r); 273 return; 274 } else { 275 scsi_req_complete(&r->req, GOOD); 276 } 277 278 done: 279 scsi_req_unref(&r->req); 280 } 281 282 static void scsi_dma_complete(void *opaque, int ret) 283 { 284 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 285 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 286 287 assert(r->req.aiocb != NULL); 288 r->req.aiocb = NULL; 289 290 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 291 if (ret < 0) { 292 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 293 } else { 294 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 295 } 296 scsi_dma_complete_noio(r, ret); 297 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 298 } 299 300 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) 301 { 302 uint32_t n; 303 304 assert(r->req.aiocb == NULL); 305 if (scsi_disk_req_check_error(r, ret, false)) { 306 goto done; 307 } 308 309 n = r->qiov.size / BDRV_SECTOR_SIZE; 310 r->sector += n; 311 r->sector_count -= n; 312 scsi_req_data(&r->req, r->qiov.size); 313 314 done: 315 scsi_req_unref(&r->req); 316 } 317 318 static void scsi_read_complete(void *opaque, int ret) 319 { 320 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 321 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 322 323 assert(r->req.aiocb != NULL); 324 r->req.aiocb = NULL; 325 326 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 327 if (ret < 0) { 328 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 329 } else { 330 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 331 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); 332 } 333 scsi_read_complete_noio(r, ret); 334 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 335 } 336 337 /* Actually issue a read to the block device. */ 338 static void scsi_do_read(SCSIDiskReq *r, int ret) 339 { 340 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 341 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 342 343 assert (r->req.aiocb == NULL); 344 if (scsi_disk_req_check_error(r, ret, false)) { 345 goto done; 346 } 347 348 /* The request is used as the AIO opaque value, so add a ref. */ 349 scsi_req_ref(&r->req); 350 351 if (r->req.sg) { 352 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 353 r->req.resid -= r->req.sg->size; 354 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 355 r->req.sg, r->sector << BDRV_SECTOR_BITS, 356 BDRV_SECTOR_SIZE, 357 sdc->dma_readv, r, scsi_dma_complete, r, 358 DMA_DIRECTION_FROM_DEVICE); 359 } else { 360 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 361 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 362 r->qiov.size, BLOCK_ACCT_READ); 363 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 364 scsi_read_complete, r, r); 365 } 366 367 done: 368 scsi_req_unref(&r->req); 369 } 370 371 static void scsi_do_read_cb(void *opaque, int ret) 372 { 373 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 374 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 375 376 assert (r->req.aiocb != NULL); 377 r->req.aiocb = NULL; 378 379 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 380 if (ret < 0) { 381 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 382 } else { 383 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 384 } 385 scsi_do_read(opaque, ret); 386 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 387 } 388 389 /* Read more data from scsi device into buffer. */ 390 static void scsi_read_data(SCSIRequest *req) 391 { 392 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 393 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 394 bool first; 395 396 trace_scsi_disk_read_data_count(r->sector_count); 397 if (r->sector_count == 0) { 398 /* This also clears the sense buffer for REQUEST SENSE. */ 399 scsi_req_complete(&r->req, GOOD); 400 return; 401 } 402 403 /* No data transfer may already be in progress */ 404 assert(r->req.aiocb == NULL); 405 406 /* The request is used as the AIO opaque value, so add a ref. */ 407 scsi_req_ref(&r->req); 408 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 409 trace_scsi_disk_read_data_invalid(); 410 scsi_read_complete_noio(r, -EINVAL); 411 return; 412 } 413 414 if (!blk_is_available(req->dev->conf.blk)) { 415 scsi_read_complete_noio(r, -ENOMEDIUM); 416 return; 417 } 418 419 first = !r->started; 420 r->started = true; 421 if (first && r->need_fua_emulation) { 422 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 423 BLOCK_ACCT_FLUSH); 424 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 425 } else { 426 scsi_do_read(r, 0); 427 } 428 } 429 430 /* 431 * scsi_handle_rw_error has two return values. False means that the error 432 * must be ignored, true means that the error has been processed and the 433 * caller should not do anything else for this request. Note that 434 * scsi_handle_rw_error always manages its reference counts, independent 435 * of the return value. 436 */ 437 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 438 { 439 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 440 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 441 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 442 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 443 is_read, error); 444 445 if (action == BLOCK_ERROR_ACTION_REPORT) { 446 if (acct_failed) { 447 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 448 } 449 switch (error) { 450 case 0: 451 /* A passthrough command has run and has produced sense data; check 452 * whether the error has to be handled by the guest or should rather 453 * pause the host. 454 */ 455 assert(r->status && *r->status); 456 if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { 457 /* These errors are handled by guest. */ 458 sdc->update_sense(&r->req); 459 scsi_req_complete(&r->req, *r->status); 460 return true; 461 } 462 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 463 break; 464 #ifdef CONFIG_LINUX 465 /* These errno mapping are specific to Linux. For more information: 466 * - scsi_decide_disposition in drivers/scsi/scsi_error.c 467 * - scsi_result_to_blk_status in drivers/scsi/scsi_lib.c 468 * - blk_errors[] in block/blk-core.c 469 */ 470 case EBADE: 471 /* DID_NEXUS_FAILURE -> BLK_STS_NEXUS. */ 472 scsi_req_complete(&r->req, RESERVATION_CONFLICT); 473 break; 474 case ENODATA: 475 /* DID_MEDIUM_ERROR -> BLK_STS_MEDIUM. */ 476 scsi_check_condition(r, SENSE_CODE(READ_ERROR)); 477 break; 478 case EREMOTEIO: 479 /* DID_TARGET_FAILURE -> BLK_STS_TARGET. */ 480 scsi_req_complete(&r->req, HARDWARE_ERROR); 481 break; 482 #endif 483 case ENOMEDIUM: 484 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 485 break; 486 case ENOMEM: 487 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 488 break; 489 case EINVAL: 490 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 491 break; 492 case ENOSPC: 493 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 494 break; 495 default: 496 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 497 break; 498 } 499 } 500 501 blk_error_action(s->qdev.conf.blk, action, is_read, error); 502 if (action == BLOCK_ERROR_ACTION_IGNORE) { 503 scsi_req_complete(&r->req, 0); 504 return true; 505 } 506 507 if (action == BLOCK_ERROR_ACTION_STOP) { 508 scsi_req_retry(&r->req); 509 } 510 return true; 511 } 512 513 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 514 { 515 uint32_t n; 516 517 assert (r->req.aiocb == NULL); 518 if (scsi_disk_req_check_error(r, ret, false)) { 519 goto done; 520 } 521 522 n = r->qiov.size / BDRV_SECTOR_SIZE; 523 r->sector += n; 524 r->sector_count -= n; 525 if (r->sector_count == 0) { 526 scsi_write_do_fua(r); 527 return; 528 } else { 529 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 530 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); 531 scsi_req_data(&r->req, r->qiov.size); 532 } 533 534 done: 535 scsi_req_unref(&r->req); 536 } 537 538 static void scsi_write_complete(void * opaque, int ret) 539 { 540 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 541 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 542 543 assert (r->req.aiocb != NULL); 544 r->req.aiocb = NULL; 545 546 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 547 if (ret < 0) { 548 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 549 } else { 550 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 551 } 552 scsi_write_complete_noio(r, ret); 553 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 554 } 555 556 static void scsi_write_data(SCSIRequest *req) 557 { 558 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 559 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 560 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 561 562 /* No data transfer may already be in progress */ 563 assert(r->req.aiocb == NULL); 564 565 /* The request is used as the AIO opaque value, so add a ref. */ 566 scsi_req_ref(&r->req); 567 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 568 trace_scsi_disk_write_data_invalid(); 569 scsi_write_complete_noio(r, -EINVAL); 570 return; 571 } 572 573 if (!r->req.sg && !r->qiov.size) { 574 /* Called for the first time. Ask the driver to send us more data. */ 575 r->started = true; 576 scsi_write_complete_noio(r, 0); 577 return; 578 } 579 if (!blk_is_available(req->dev->conf.blk)) { 580 scsi_write_complete_noio(r, -ENOMEDIUM); 581 return; 582 } 583 584 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 585 r->req.cmd.buf[0] == VERIFY_16) { 586 if (r->req.sg) { 587 scsi_dma_complete_noio(r, 0); 588 } else { 589 scsi_write_complete_noio(r, 0); 590 } 591 return; 592 } 593 594 if (r->req.sg) { 595 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 596 r->req.resid -= r->req.sg->size; 597 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 598 r->req.sg, r->sector << BDRV_SECTOR_BITS, 599 BDRV_SECTOR_SIZE, 600 sdc->dma_writev, r, scsi_dma_complete, r, 601 DMA_DIRECTION_TO_DEVICE); 602 } else { 603 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 604 r->qiov.size, BLOCK_ACCT_WRITE); 605 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 606 scsi_write_complete, r, r); 607 } 608 } 609 610 /* Return a pointer to the data buffer. */ 611 static uint8_t *scsi_get_buf(SCSIRequest *req) 612 { 613 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 614 615 return (uint8_t *)r->iov.iov_base; 616 } 617 618 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 619 { 620 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 621 uint8_t page_code = req->cmd.buf[2]; 622 int start, buflen = 0; 623 624 outbuf[buflen++] = s->qdev.type & 0x1f; 625 outbuf[buflen++] = page_code; 626 outbuf[buflen++] = 0x00; 627 outbuf[buflen++] = 0x00; 628 start = buflen; 629 630 switch (page_code) { 631 case 0x00: /* Supported page codes, mandatory */ 632 { 633 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); 634 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 635 if (s->serial) { 636 outbuf[buflen++] = 0x80; /* unit serial number */ 637 } 638 outbuf[buflen++] = 0x83; /* device identification */ 639 if (s->qdev.type == TYPE_DISK) { 640 outbuf[buflen++] = 0xb0; /* block limits */ 641 outbuf[buflen++] = 0xb1; /* block device characteristics */ 642 outbuf[buflen++] = 0xb2; /* thin provisioning */ 643 } 644 break; 645 } 646 case 0x80: /* Device serial number, optional */ 647 { 648 int l; 649 650 if (!s->serial) { 651 trace_scsi_disk_emulate_vpd_page_80_not_supported(); 652 return -1; 653 } 654 655 l = strlen(s->serial); 656 if (l > 36) { 657 l = 36; 658 } 659 660 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); 661 memcpy(outbuf + buflen, s->serial, l); 662 buflen += l; 663 break; 664 } 665 666 case 0x83: /* Device identification page, mandatory */ 667 { 668 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; 669 670 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); 671 672 if (id_len) { 673 outbuf[buflen++] = 0x2; /* ASCII */ 674 outbuf[buflen++] = 0; /* not officially assigned */ 675 outbuf[buflen++] = 0; /* reserved */ 676 outbuf[buflen++] = id_len; /* length of data following */ 677 memcpy(outbuf + buflen, s->device_id, id_len); 678 buflen += id_len; 679 } 680 681 if (s->qdev.wwn) { 682 outbuf[buflen++] = 0x1; /* Binary */ 683 outbuf[buflen++] = 0x3; /* NAA */ 684 outbuf[buflen++] = 0; /* reserved */ 685 outbuf[buflen++] = 8; 686 stq_be_p(&outbuf[buflen], s->qdev.wwn); 687 buflen += 8; 688 } 689 690 if (s->qdev.port_wwn) { 691 outbuf[buflen++] = 0x61; /* SAS / Binary */ 692 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 693 outbuf[buflen++] = 0; /* reserved */ 694 outbuf[buflen++] = 8; 695 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 696 buflen += 8; 697 } 698 699 if (s->port_index) { 700 outbuf[buflen++] = 0x61; /* SAS / Binary */ 701 702 /* PIV/Target port/relative target port */ 703 outbuf[buflen++] = 0x94; 704 705 outbuf[buflen++] = 0; /* reserved */ 706 outbuf[buflen++] = 4; 707 stw_be_p(&outbuf[buflen + 2], s->port_index); 708 buflen += 4; 709 } 710 break; 711 } 712 case 0xb0: /* block limits */ 713 { 714 SCSIBlockLimits bl = {}; 715 716 if (s->qdev.type == TYPE_ROM) { 717 trace_scsi_disk_emulate_vpd_page_b0_not_supported(); 718 return -1; 719 } 720 bl.wsnz = 1; 721 bl.unmap_sectors = 722 s->qdev.conf.discard_granularity / s->qdev.blocksize; 723 bl.min_io_size = 724 s->qdev.conf.min_io_size / s->qdev.blocksize; 725 bl.opt_io_size = 726 s->qdev.conf.opt_io_size / s->qdev.blocksize; 727 bl.max_unmap_sectors = 728 s->max_unmap_size / s->qdev.blocksize; 729 bl.max_io_sectors = 730 s->max_io_size / s->qdev.blocksize; 731 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 732 bl.max_unmap_descr = 255; 733 734 if (s->qdev.type == TYPE_DISK) { 735 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 736 int max_io_sectors_blk = 737 max_transfer_blk / s->qdev.blocksize; 738 739 bl.max_io_sectors = 740 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 741 } 742 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 743 break; 744 } 745 case 0xb1: /* block device characteristics */ 746 { 747 buflen = 0x40; 748 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 749 outbuf[5] = s->rotation_rate & 0xff; 750 outbuf[6] = 0; /* PRODUCT TYPE */ 751 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 752 outbuf[8] = 0; /* VBULS */ 753 break; 754 } 755 case 0xb2: /* thin provisioning */ 756 { 757 buflen = 8; 758 outbuf[4] = 0; 759 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 760 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 761 outbuf[7] = 0; 762 break; 763 } 764 default: 765 return -1; 766 } 767 /* done with EVPD */ 768 assert(buflen - start <= 255); 769 outbuf[start - 1] = buflen - start; 770 return buflen; 771 } 772 773 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 774 { 775 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 776 int buflen = 0; 777 778 if (req->cmd.buf[1] & 0x1) { 779 /* Vital product data */ 780 return scsi_disk_emulate_vpd_page(req, outbuf); 781 } 782 783 /* Standard INQUIRY data */ 784 if (req->cmd.buf[2] != 0) { 785 return -1; 786 } 787 788 /* PAGE CODE == 0 */ 789 buflen = req->cmd.xfer; 790 if (buflen > SCSI_MAX_INQUIRY_LEN) { 791 buflen = SCSI_MAX_INQUIRY_LEN; 792 } 793 794 outbuf[0] = s->qdev.type & 0x1f; 795 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 796 797 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 798 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 799 800 memset(&outbuf[32], 0, 4); 801 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 802 /* 803 * We claim conformance to SPC-3, which is required for guests 804 * to ask for modern features like READ CAPACITY(16) or the 805 * block characteristics VPD page by default. Not all of SPC-3 806 * is actually implemented, but we're good enough. 807 */ 808 outbuf[2] = s->qdev.default_scsi_version; 809 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 810 811 if (buflen > 36) { 812 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 813 } else { 814 /* If the allocation length of CDB is too small, 815 the additional length is not adjusted */ 816 outbuf[4] = 36 - 5; 817 } 818 819 /* Sync data transfer and TCQ. */ 820 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 821 return buflen; 822 } 823 824 static inline bool media_is_dvd(SCSIDiskState *s) 825 { 826 uint64_t nb_sectors; 827 if (s->qdev.type != TYPE_ROM) { 828 return false; 829 } 830 if (!blk_is_available(s->qdev.conf.blk)) { 831 return false; 832 } 833 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 834 return nb_sectors > CD_MAX_SECTORS; 835 } 836 837 static inline bool media_is_cd(SCSIDiskState *s) 838 { 839 uint64_t nb_sectors; 840 if (s->qdev.type != TYPE_ROM) { 841 return false; 842 } 843 if (!blk_is_available(s->qdev.conf.blk)) { 844 return false; 845 } 846 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 847 return nb_sectors <= CD_MAX_SECTORS; 848 } 849 850 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 851 uint8_t *outbuf) 852 { 853 uint8_t type = r->req.cmd.buf[1] & 7; 854 855 if (s->qdev.type != TYPE_ROM) { 856 return -1; 857 } 858 859 /* Types 1/2 are only defined for Blu-Ray. */ 860 if (type != 0) { 861 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 862 return -1; 863 } 864 865 memset(outbuf, 0, 34); 866 outbuf[1] = 32; 867 outbuf[2] = 0xe; /* last session complete, disc finalized */ 868 outbuf[3] = 1; /* first track on disc */ 869 outbuf[4] = 1; /* # of sessions */ 870 outbuf[5] = 1; /* first track of last session */ 871 outbuf[6] = 1; /* last track of last session */ 872 outbuf[7] = 0x20; /* unrestricted use */ 873 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 874 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 875 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 876 /* 24-31: disc bar code */ 877 /* 32: disc application code */ 878 /* 33: number of OPC tables */ 879 880 return 34; 881 } 882 883 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 884 uint8_t *outbuf) 885 { 886 static const int rds_caps_size[5] = { 887 [0] = 2048 + 4, 888 [1] = 4 + 4, 889 [3] = 188 + 4, 890 [4] = 2048 + 4, 891 }; 892 893 uint8_t media = r->req.cmd.buf[1]; 894 uint8_t layer = r->req.cmd.buf[6]; 895 uint8_t format = r->req.cmd.buf[7]; 896 int size = -1; 897 898 if (s->qdev.type != TYPE_ROM) { 899 return -1; 900 } 901 if (media != 0) { 902 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 903 return -1; 904 } 905 906 if (format != 0xff) { 907 if (!blk_is_available(s->qdev.conf.blk)) { 908 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 909 return -1; 910 } 911 if (media_is_cd(s)) { 912 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 913 return -1; 914 } 915 if (format >= ARRAY_SIZE(rds_caps_size)) { 916 return -1; 917 } 918 size = rds_caps_size[format]; 919 memset(outbuf, 0, size); 920 } 921 922 switch (format) { 923 case 0x00: { 924 /* Physical format information */ 925 uint64_t nb_sectors; 926 if (layer != 0) { 927 goto fail; 928 } 929 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 930 931 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 932 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 933 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 934 outbuf[7] = 0; /* default densities */ 935 936 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 937 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 938 break; 939 } 940 941 case 0x01: /* DVD copyright information, all zeros */ 942 break; 943 944 case 0x03: /* BCA information - invalid field for no BCA info */ 945 return -1; 946 947 case 0x04: /* DVD disc manufacturing information, all zeros */ 948 break; 949 950 case 0xff: { /* List capabilities */ 951 int i; 952 size = 4; 953 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 954 if (!rds_caps_size[i]) { 955 continue; 956 } 957 outbuf[size] = i; 958 outbuf[size + 1] = 0x40; /* Not writable, readable */ 959 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 960 size += 4; 961 } 962 break; 963 } 964 965 default: 966 return -1; 967 } 968 969 /* Size of buffer, not including 2 byte size field */ 970 stw_be_p(outbuf, size - 2); 971 return size; 972 973 fail: 974 return -1; 975 } 976 977 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 978 { 979 uint8_t event_code, media_status; 980 981 media_status = 0; 982 if (s->tray_open) { 983 media_status = MS_TRAY_OPEN; 984 } else if (blk_is_inserted(s->qdev.conf.blk)) { 985 media_status = MS_MEDIA_PRESENT; 986 } 987 988 /* Event notification descriptor */ 989 event_code = MEC_NO_CHANGE; 990 if (media_status != MS_TRAY_OPEN) { 991 if (s->media_event) { 992 event_code = MEC_NEW_MEDIA; 993 s->media_event = false; 994 } else if (s->eject_request) { 995 event_code = MEC_EJECT_REQUESTED; 996 s->eject_request = false; 997 } 998 } 999 1000 outbuf[0] = event_code; 1001 outbuf[1] = media_status; 1002 1003 /* These fields are reserved, just clear them. */ 1004 outbuf[2] = 0; 1005 outbuf[3] = 0; 1006 return 4; 1007 } 1008 1009 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 1010 uint8_t *outbuf) 1011 { 1012 int size; 1013 uint8_t *buf = r->req.cmd.buf; 1014 uint8_t notification_class_request = buf[4]; 1015 if (s->qdev.type != TYPE_ROM) { 1016 return -1; 1017 } 1018 if ((buf[1] & 1) == 0) { 1019 /* asynchronous */ 1020 return -1; 1021 } 1022 1023 size = 4; 1024 outbuf[0] = outbuf[1] = 0; 1025 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1026 if (notification_class_request & (1 << GESN_MEDIA)) { 1027 outbuf[2] = GESN_MEDIA; 1028 size += scsi_event_status_media(s, &outbuf[size]); 1029 } else { 1030 outbuf[2] = 0x80; 1031 } 1032 stw_be_p(outbuf, size - 4); 1033 return size; 1034 } 1035 1036 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1037 { 1038 int current; 1039 1040 if (s->qdev.type != TYPE_ROM) { 1041 return -1; 1042 } 1043 1044 if (media_is_dvd(s)) { 1045 current = MMC_PROFILE_DVD_ROM; 1046 } else if (media_is_cd(s)) { 1047 current = MMC_PROFILE_CD_ROM; 1048 } else { 1049 current = MMC_PROFILE_NONE; 1050 } 1051 1052 memset(outbuf, 0, 40); 1053 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1054 stw_be_p(&outbuf[6], current); 1055 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1056 outbuf[10] = 0x03; /* persistent, current */ 1057 outbuf[11] = 8; /* two profiles */ 1058 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1059 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1060 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1061 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1062 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1063 stw_be_p(&outbuf[20], 1); 1064 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1065 outbuf[23] = 8; 1066 stl_be_p(&outbuf[24], 1); /* SCSI */ 1067 outbuf[28] = 1; /* DBE = 1, mandatory */ 1068 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1069 stw_be_p(&outbuf[32], 3); 1070 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1071 outbuf[35] = 4; 1072 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1073 /* TODO: Random readable, CD read, DVD read, drive serial number, 1074 power management */ 1075 return 40; 1076 } 1077 1078 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1079 { 1080 if (s->qdev.type != TYPE_ROM) { 1081 return -1; 1082 } 1083 memset(outbuf, 0, 8); 1084 outbuf[5] = 1; /* CD-ROM */ 1085 return 8; 1086 } 1087 1088 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1089 int page_control) 1090 { 1091 static const int mode_sense_valid[0x3f] = { 1092 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1093 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1094 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1095 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1096 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1097 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1098 }; 1099 1100 uint8_t *p = *p_outbuf + 2; 1101 int length; 1102 1103 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1104 return -1; 1105 } 1106 1107 /* 1108 * If Changeable Values are requested, a mask denoting those mode parameters 1109 * that are changeable shall be returned. As we currently don't support 1110 * parameter changes via MODE_SELECT all bits are returned set to zero. 1111 * The buffer was already menset to zero by the caller of this function. 1112 * 1113 * The offsets here are off by two compared to the descriptions in the 1114 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1115 * but it is done so that offsets are consistent within our implementation 1116 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1117 * 2-byte and 4-byte headers. 1118 */ 1119 switch (page) { 1120 case MODE_PAGE_HD_GEOMETRY: 1121 length = 0x16; 1122 if (page_control == 1) { /* Changeable Values */ 1123 break; 1124 } 1125 /* if a geometry hint is available, use it */ 1126 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1127 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1128 p[2] = s->qdev.conf.cyls & 0xff; 1129 p[3] = s->qdev.conf.heads & 0xff; 1130 /* Write precomp start cylinder, disabled */ 1131 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1132 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1133 p[6] = s->qdev.conf.cyls & 0xff; 1134 /* Reduced current start cylinder, disabled */ 1135 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1136 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1137 p[9] = s->qdev.conf.cyls & 0xff; 1138 /* Device step rate [ns], 200ns */ 1139 p[10] = 0; 1140 p[11] = 200; 1141 /* Landing zone cylinder */ 1142 p[12] = 0xff; 1143 p[13] = 0xff; 1144 p[14] = 0xff; 1145 /* Medium rotation rate [rpm], 5400 rpm */ 1146 p[18] = (5400 >> 8) & 0xff; 1147 p[19] = 5400 & 0xff; 1148 break; 1149 1150 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1151 length = 0x1e; 1152 if (page_control == 1) { /* Changeable Values */ 1153 break; 1154 } 1155 /* Transfer rate [kbit/s], 5Mbit/s */ 1156 p[0] = 5000 >> 8; 1157 p[1] = 5000 & 0xff; 1158 /* if a geometry hint is available, use it */ 1159 p[2] = s->qdev.conf.heads & 0xff; 1160 p[3] = s->qdev.conf.secs & 0xff; 1161 p[4] = s->qdev.blocksize >> 8; 1162 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1163 p[7] = s->qdev.conf.cyls & 0xff; 1164 /* Write precomp start cylinder, disabled */ 1165 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1166 p[9] = s->qdev.conf.cyls & 0xff; 1167 /* Reduced current start cylinder, disabled */ 1168 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1169 p[11] = s->qdev.conf.cyls & 0xff; 1170 /* Device step rate [100us], 100us */ 1171 p[12] = 0; 1172 p[13] = 1; 1173 /* Device step pulse width [us], 1us */ 1174 p[14] = 1; 1175 /* Device head settle delay [100us], 100us */ 1176 p[15] = 0; 1177 p[16] = 1; 1178 /* Motor on delay [0.1s], 0.1s */ 1179 p[17] = 1; 1180 /* Motor off delay [0.1s], 0.1s */ 1181 p[18] = 1; 1182 /* Medium rotation rate [rpm], 5400 rpm */ 1183 p[26] = (5400 >> 8) & 0xff; 1184 p[27] = 5400 & 0xff; 1185 break; 1186 1187 case MODE_PAGE_CACHING: 1188 length = 0x12; 1189 if (page_control == 1 || /* Changeable Values */ 1190 blk_enable_write_cache(s->qdev.conf.blk)) { 1191 p[0] = 4; /* WCE */ 1192 } 1193 break; 1194 1195 case MODE_PAGE_R_W_ERROR: 1196 length = 10; 1197 if (page_control == 1) { /* Changeable Values */ 1198 break; 1199 } 1200 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1201 if (s->qdev.type == TYPE_ROM) { 1202 p[1] = 0x20; /* Read Retry Count */ 1203 } 1204 break; 1205 1206 case MODE_PAGE_AUDIO_CTL: 1207 length = 14; 1208 break; 1209 1210 case MODE_PAGE_CAPABILITIES: 1211 length = 0x14; 1212 if (page_control == 1) { /* Changeable Values */ 1213 break; 1214 } 1215 1216 p[0] = 0x3b; /* CD-R & CD-RW read */ 1217 p[1] = 0; /* Writing not supported */ 1218 p[2] = 0x7f; /* Audio, composite, digital out, 1219 mode 2 form 1&2, multi session */ 1220 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1221 RW corrected, C2 errors, ISRC, 1222 UPC, Bar code */ 1223 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1224 /* Locking supported, jumper present, eject, tray */ 1225 p[5] = 0; /* no volume & mute control, no 1226 changer */ 1227 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1228 p[7] = (50 * 176) & 0xff; 1229 p[8] = 2 >> 8; /* Two volume levels */ 1230 p[9] = 2 & 0xff; 1231 p[10] = 2048 >> 8; /* 2M buffer */ 1232 p[11] = 2048 & 0xff; 1233 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1234 p[13] = (16 * 176) & 0xff; 1235 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1236 p[17] = (16 * 176) & 0xff; 1237 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1238 p[19] = (16 * 176) & 0xff; 1239 break; 1240 1241 default: 1242 return -1; 1243 } 1244 1245 assert(length < 256); 1246 (*p_outbuf)[0] = page; 1247 (*p_outbuf)[1] = length; 1248 *p_outbuf += length + 2; 1249 return length + 2; 1250 } 1251 1252 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1253 { 1254 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1255 uint64_t nb_sectors; 1256 bool dbd; 1257 int page, buflen, ret, page_control; 1258 uint8_t *p; 1259 uint8_t dev_specific_param; 1260 1261 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1262 page = r->req.cmd.buf[2] & 0x3f; 1263 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1264 1265 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 1266 10, page, r->req.cmd.xfer, page_control); 1267 memset(outbuf, 0, r->req.cmd.xfer); 1268 p = outbuf; 1269 1270 if (s->qdev.type == TYPE_DISK) { 1271 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1272 if (blk_is_read_only(s->qdev.conf.blk)) { 1273 dev_specific_param |= 0x80; /* Readonly. */ 1274 } 1275 } else { 1276 /* MMC prescribes that CD/DVD drives have no block descriptors, 1277 * and defines no device-specific parameter. */ 1278 dev_specific_param = 0x00; 1279 dbd = true; 1280 } 1281 1282 if (r->req.cmd.buf[0] == MODE_SENSE) { 1283 p[1] = 0; /* Default media type. */ 1284 p[2] = dev_specific_param; 1285 p[3] = 0; /* Block descriptor length. */ 1286 p += 4; 1287 } else { /* MODE_SENSE_10 */ 1288 p[2] = 0; /* Default media type. */ 1289 p[3] = dev_specific_param; 1290 p[6] = p[7] = 0; /* Block descriptor length. */ 1291 p += 8; 1292 } 1293 1294 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1295 if (!dbd && nb_sectors) { 1296 if (r->req.cmd.buf[0] == MODE_SENSE) { 1297 outbuf[3] = 8; /* Block descriptor length */ 1298 } else { /* MODE_SENSE_10 */ 1299 outbuf[7] = 8; /* Block descriptor length */ 1300 } 1301 nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1302 if (nb_sectors > 0xffffff) { 1303 nb_sectors = 0; 1304 } 1305 p[0] = 0; /* media density code */ 1306 p[1] = (nb_sectors >> 16) & 0xff; 1307 p[2] = (nb_sectors >> 8) & 0xff; 1308 p[3] = nb_sectors & 0xff; 1309 p[4] = 0; /* reserved */ 1310 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1311 p[6] = s->qdev.blocksize >> 8; 1312 p[7] = 0; 1313 p += 8; 1314 } 1315 1316 if (page_control == 3) { 1317 /* Saved Values */ 1318 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1319 return -1; 1320 } 1321 1322 if (page == 0x3f) { 1323 for (page = 0; page <= 0x3e; page++) { 1324 mode_sense_page(s, page, &p, page_control); 1325 } 1326 } else { 1327 ret = mode_sense_page(s, page, &p, page_control); 1328 if (ret == -1) { 1329 return -1; 1330 } 1331 } 1332 1333 buflen = p - outbuf; 1334 /* 1335 * The mode data length field specifies the length in bytes of the 1336 * following data that is available to be transferred. The mode data 1337 * length does not include itself. 1338 */ 1339 if (r->req.cmd.buf[0] == MODE_SENSE) { 1340 outbuf[0] = buflen - 1; 1341 } else { /* MODE_SENSE_10 */ 1342 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1343 outbuf[1] = (buflen - 2) & 0xff; 1344 } 1345 return buflen; 1346 } 1347 1348 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1349 { 1350 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1351 int start_track, format, msf, toclen; 1352 uint64_t nb_sectors; 1353 1354 msf = req->cmd.buf[1] & 2; 1355 format = req->cmd.buf[2] & 0xf; 1356 start_track = req->cmd.buf[6]; 1357 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1358 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); 1359 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 1360 switch (format) { 1361 case 0: 1362 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1363 break; 1364 case 1: 1365 /* multi session : only a single session defined */ 1366 toclen = 12; 1367 memset(outbuf, 0, 12); 1368 outbuf[1] = 0x0a; 1369 outbuf[2] = 0x01; 1370 outbuf[3] = 0x01; 1371 break; 1372 case 2: 1373 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1374 break; 1375 default: 1376 return -1; 1377 } 1378 return toclen; 1379 } 1380 1381 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1382 { 1383 SCSIRequest *req = &r->req; 1384 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1385 bool start = req->cmd.buf[4] & 1; 1386 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1387 int pwrcnd = req->cmd.buf[4] & 0xf0; 1388 1389 if (pwrcnd) { 1390 /* eject/load only happens for power condition == 0 */ 1391 return 0; 1392 } 1393 1394 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1395 if (!start && !s->tray_open && s->tray_locked) { 1396 scsi_check_condition(r, 1397 blk_is_inserted(s->qdev.conf.blk) 1398 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1399 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1400 return -1; 1401 } 1402 1403 if (s->tray_open != !start) { 1404 blk_eject(s->qdev.conf.blk, !start); 1405 s->tray_open = !start; 1406 } 1407 } 1408 return 0; 1409 } 1410 1411 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1412 { 1413 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1414 int buflen = r->iov.iov_len; 1415 1416 if (buflen) { 1417 trace_scsi_disk_emulate_read_data(buflen); 1418 r->iov.iov_len = 0; 1419 r->started = true; 1420 scsi_req_data(&r->req, buflen); 1421 return; 1422 } 1423 1424 /* This also clears the sense buffer for REQUEST SENSE. */ 1425 scsi_req_complete(&r->req, GOOD); 1426 } 1427 1428 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1429 uint8_t *inbuf, int inlen) 1430 { 1431 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1432 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1433 uint8_t *p; 1434 int len, expected_len, changeable_len, i; 1435 1436 /* The input buffer does not include the page header, so it is 1437 * off by 2 bytes. 1438 */ 1439 expected_len = inlen + 2; 1440 if (expected_len > SCSI_MAX_MODE_LEN) { 1441 return -1; 1442 } 1443 1444 p = mode_current; 1445 memset(mode_current, 0, inlen + 2); 1446 len = mode_sense_page(s, page, &p, 0); 1447 if (len < 0 || len != expected_len) { 1448 return -1; 1449 } 1450 1451 p = mode_changeable; 1452 memset(mode_changeable, 0, inlen + 2); 1453 changeable_len = mode_sense_page(s, page, &p, 1); 1454 assert(changeable_len == len); 1455 1456 /* Check that unchangeable bits are the same as what MODE SENSE 1457 * would return. 1458 */ 1459 for (i = 2; i < len; i++) { 1460 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1461 return -1; 1462 } 1463 } 1464 return 0; 1465 } 1466 1467 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1468 { 1469 switch (page) { 1470 case MODE_PAGE_CACHING: 1471 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1472 break; 1473 1474 default: 1475 break; 1476 } 1477 } 1478 1479 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1480 { 1481 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1482 1483 while (len > 0) { 1484 int page, subpage, page_len; 1485 1486 /* Parse both possible formats for the mode page headers. */ 1487 page = p[0] & 0x3f; 1488 if (p[0] & 0x40) { 1489 if (len < 4) { 1490 goto invalid_param_len; 1491 } 1492 subpage = p[1]; 1493 page_len = lduw_be_p(&p[2]); 1494 p += 4; 1495 len -= 4; 1496 } else { 1497 if (len < 2) { 1498 goto invalid_param_len; 1499 } 1500 subpage = 0; 1501 page_len = p[1]; 1502 p += 2; 1503 len -= 2; 1504 } 1505 1506 if (subpage) { 1507 goto invalid_param; 1508 } 1509 if (page_len > len) { 1510 goto invalid_param_len; 1511 } 1512 1513 if (!change) { 1514 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1515 goto invalid_param; 1516 } 1517 } else { 1518 scsi_disk_apply_mode_select(s, page, p); 1519 } 1520 1521 p += page_len; 1522 len -= page_len; 1523 } 1524 return 0; 1525 1526 invalid_param: 1527 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1528 return -1; 1529 1530 invalid_param_len: 1531 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1532 return -1; 1533 } 1534 1535 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1536 { 1537 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1538 uint8_t *p = inbuf; 1539 int cmd = r->req.cmd.buf[0]; 1540 int len = r->req.cmd.xfer; 1541 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1542 int bd_len; 1543 int pass; 1544 1545 /* We only support PF=1, SP=0. */ 1546 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1547 goto invalid_field; 1548 } 1549 1550 if (len < hdr_len) { 1551 goto invalid_param_len; 1552 } 1553 1554 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1555 len -= hdr_len; 1556 p += hdr_len; 1557 if (len < bd_len) { 1558 goto invalid_param_len; 1559 } 1560 if (bd_len != 0 && bd_len != 8) { 1561 goto invalid_param; 1562 } 1563 1564 len -= bd_len; 1565 p += bd_len; 1566 1567 /* Ensure no change is made if there is an error! */ 1568 for (pass = 0; pass < 2; pass++) { 1569 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1570 assert(pass == 0); 1571 return; 1572 } 1573 } 1574 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1575 /* The request is used as the AIO opaque value, so add a ref. */ 1576 scsi_req_ref(&r->req); 1577 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1578 BLOCK_ACCT_FLUSH); 1579 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1580 return; 1581 } 1582 1583 scsi_req_complete(&r->req, GOOD); 1584 return; 1585 1586 invalid_param: 1587 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1588 return; 1589 1590 invalid_param_len: 1591 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1592 return; 1593 1594 invalid_field: 1595 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1596 } 1597 1598 static inline bool check_lba_range(SCSIDiskState *s, 1599 uint64_t sector_num, uint32_t nb_sectors) 1600 { 1601 /* 1602 * The first line tests that no overflow happens when computing the last 1603 * sector. The second line tests that the last accessed sector is in 1604 * range. 1605 * 1606 * Careful, the computations should not underflow for nb_sectors == 0, 1607 * and a 0-block read to the first LBA beyond the end of device is 1608 * valid. 1609 */ 1610 return (sector_num <= sector_num + nb_sectors && 1611 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1612 } 1613 1614 typedef struct UnmapCBData { 1615 SCSIDiskReq *r; 1616 uint8_t *inbuf; 1617 int count; 1618 } UnmapCBData; 1619 1620 static void scsi_unmap_complete(void *opaque, int ret); 1621 1622 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1623 { 1624 SCSIDiskReq *r = data->r; 1625 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1626 1627 assert(r->req.aiocb == NULL); 1628 1629 if (data->count > 0) { 1630 r->sector = ldq_be_p(&data->inbuf[0]) 1631 * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1632 r->sector_count = (ldl_be_p(&data->inbuf[8]) & 0xffffffffULL) 1633 * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1634 if (!check_lba_range(s, r->sector, r->sector_count)) { 1635 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), 1636 BLOCK_ACCT_UNMAP); 1637 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1638 goto done; 1639 } 1640 1641 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1642 r->sector_count * BDRV_SECTOR_SIZE, 1643 BLOCK_ACCT_UNMAP); 1644 1645 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1646 r->sector * BDRV_SECTOR_SIZE, 1647 r->sector_count * BDRV_SECTOR_SIZE, 1648 scsi_unmap_complete, data); 1649 data->count--; 1650 data->inbuf += 16; 1651 return; 1652 } 1653 1654 scsi_req_complete(&r->req, GOOD); 1655 1656 done: 1657 scsi_req_unref(&r->req); 1658 g_free(data); 1659 } 1660 1661 static void scsi_unmap_complete(void *opaque, int ret) 1662 { 1663 UnmapCBData *data = opaque; 1664 SCSIDiskReq *r = data->r; 1665 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1666 1667 assert(r->req.aiocb != NULL); 1668 r->req.aiocb = NULL; 1669 1670 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1671 if (scsi_disk_req_check_error(r, ret, true)) { 1672 scsi_req_unref(&r->req); 1673 g_free(data); 1674 } else { 1675 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1676 scsi_unmap_complete_noio(data, ret); 1677 } 1678 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1679 } 1680 1681 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1682 { 1683 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1684 uint8_t *p = inbuf; 1685 int len = r->req.cmd.xfer; 1686 UnmapCBData *data; 1687 1688 /* Reject ANCHOR=1. */ 1689 if (r->req.cmd.buf[1] & 0x1) { 1690 goto invalid_field; 1691 } 1692 1693 if (len < 8) { 1694 goto invalid_param_len; 1695 } 1696 if (len < lduw_be_p(&p[0]) + 2) { 1697 goto invalid_param_len; 1698 } 1699 if (len < lduw_be_p(&p[2]) + 8) { 1700 goto invalid_param_len; 1701 } 1702 if (lduw_be_p(&p[2]) & 15) { 1703 goto invalid_param_len; 1704 } 1705 1706 if (blk_is_read_only(s->qdev.conf.blk)) { 1707 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1708 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1709 return; 1710 } 1711 1712 data = g_new0(UnmapCBData, 1); 1713 data->r = r; 1714 data->inbuf = &p[8]; 1715 data->count = lduw_be_p(&p[2]) >> 4; 1716 1717 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1718 scsi_req_ref(&r->req); 1719 scsi_unmap_complete_noio(data, 0); 1720 return; 1721 1722 invalid_param_len: 1723 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1724 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1725 return; 1726 1727 invalid_field: 1728 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1729 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1730 } 1731 1732 typedef struct WriteSameCBData { 1733 SCSIDiskReq *r; 1734 int64_t sector; 1735 int nb_sectors; 1736 QEMUIOVector qiov; 1737 struct iovec iov; 1738 } WriteSameCBData; 1739 1740 static void scsi_write_same_complete(void *opaque, int ret) 1741 { 1742 WriteSameCBData *data = opaque; 1743 SCSIDiskReq *r = data->r; 1744 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1745 1746 assert(r->req.aiocb != NULL); 1747 r->req.aiocb = NULL; 1748 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1749 if (scsi_disk_req_check_error(r, ret, true)) { 1750 goto done; 1751 } 1752 1753 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1754 1755 data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE; 1756 data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE; 1757 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, 1758 data->iov.iov_len); 1759 if (data->iov.iov_len) { 1760 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1761 data->iov.iov_len, BLOCK_ACCT_WRITE); 1762 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1763 * where final qiov may need smaller size */ 1764 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1765 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1766 data->sector << BDRV_SECTOR_BITS, 1767 &data->qiov, 0, 1768 scsi_write_same_complete, data); 1769 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1770 return; 1771 } 1772 1773 scsi_req_complete(&r->req, GOOD); 1774 1775 done: 1776 scsi_req_unref(&r->req); 1777 qemu_vfree(data->iov.iov_base); 1778 g_free(data); 1779 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1780 } 1781 1782 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1783 { 1784 SCSIRequest *req = &r->req; 1785 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1786 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1787 WriteSameCBData *data; 1788 uint8_t *buf; 1789 int i; 1790 1791 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1792 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1793 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1794 return; 1795 } 1796 1797 if (blk_is_read_only(s->qdev.conf.blk)) { 1798 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1799 return; 1800 } 1801 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1802 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1803 return; 1804 } 1805 1806 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1807 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1808 1809 /* The request is used as the AIO opaque value, so add a ref. */ 1810 scsi_req_ref(&r->req); 1811 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1812 nb_sectors * s->qdev.blocksize, 1813 BLOCK_ACCT_WRITE); 1814 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1815 r->req.cmd.lba * s->qdev.blocksize, 1816 nb_sectors * s->qdev.blocksize, 1817 flags, scsi_aio_complete, r); 1818 return; 1819 } 1820 1821 data = g_new0(WriteSameCBData, 1); 1822 data->r = r; 1823 data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1824 data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1825 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, 1826 SCSI_WRITE_SAME_MAX); 1827 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1828 data->iov.iov_len); 1829 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1830 1831 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1832 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1833 } 1834 1835 scsi_req_ref(&r->req); 1836 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1837 data->iov.iov_len, BLOCK_ACCT_WRITE); 1838 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1839 data->sector << BDRV_SECTOR_BITS, 1840 &data->qiov, 0, 1841 scsi_write_same_complete, data); 1842 } 1843 1844 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1845 { 1846 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1847 1848 if (r->iov.iov_len) { 1849 int buflen = r->iov.iov_len; 1850 trace_scsi_disk_emulate_write_data(buflen); 1851 r->iov.iov_len = 0; 1852 scsi_req_data(&r->req, buflen); 1853 return; 1854 } 1855 1856 switch (req->cmd.buf[0]) { 1857 case MODE_SELECT: 1858 case MODE_SELECT_10: 1859 /* This also clears the sense buffer for REQUEST SENSE. */ 1860 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1861 break; 1862 1863 case UNMAP: 1864 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1865 break; 1866 1867 case VERIFY_10: 1868 case VERIFY_12: 1869 case VERIFY_16: 1870 if (r->req.status == -1) { 1871 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1872 } 1873 break; 1874 1875 case WRITE_SAME_10: 1876 case WRITE_SAME_16: 1877 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1878 break; 1879 1880 default: 1881 abort(); 1882 } 1883 } 1884 1885 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1886 { 1887 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1888 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1889 uint64_t nb_sectors; 1890 uint8_t *outbuf; 1891 int buflen; 1892 1893 switch (req->cmd.buf[0]) { 1894 case INQUIRY: 1895 case MODE_SENSE: 1896 case MODE_SENSE_10: 1897 case RESERVE: 1898 case RESERVE_10: 1899 case RELEASE: 1900 case RELEASE_10: 1901 case START_STOP: 1902 case ALLOW_MEDIUM_REMOVAL: 1903 case GET_CONFIGURATION: 1904 case GET_EVENT_STATUS_NOTIFICATION: 1905 case MECHANISM_STATUS: 1906 case REQUEST_SENSE: 1907 break; 1908 1909 default: 1910 if (!blk_is_available(s->qdev.conf.blk)) { 1911 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1912 return 0; 1913 } 1914 break; 1915 } 1916 1917 /* 1918 * FIXME: we shouldn't return anything bigger than 4k, but the code 1919 * requires the buffer to be as big as req->cmd.xfer in several 1920 * places. So, do not allow CDBs with a very large ALLOCATION 1921 * LENGTH. The real fix would be to modify scsi_read_data and 1922 * dma_buf_read, so that they return data beyond the buflen 1923 * as all zeros. 1924 */ 1925 if (req->cmd.xfer > 65536) { 1926 goto illegal_request; 1927 } 1928 r->buflen = MAX(4096, req->cmd.xfer); 1929 1930 if (!r->iov.iov_base) { 1931 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1932 } 1933 1934 outbuf = r->iov.iov_base; 1935 memset(outbuf, 0, r->buflen); 1936 switch (req->cmd.buf[0]) { 1937 case TEST_UNIT_READY: 1938 assert(blk_is_available(s->qdev.conf.blk)); 1939 break; 1940 case INQUIRY: 1941 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1942 if (buflen < 0) { 1943 goto illegal_request; 1944 } 1945 break; 1946 case MODE_SENSE: 1947 case MODE_SENSE_10: 1948 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1949 if (buflen < 0) { 1950 goto illegal_request; 1951 } 1952 break; 1953 case READ_TOC: 1954 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1955 if (buflen < 0) { 1956 goto illegal_request; 1957 } 1958 break; 1959 case RESERVE: 1960 if (req->cmd.buf[1] & 1) { 1961 goto illegal_request; 1962 } 1963 break; 1964 case RESERVE_10: 1965 if (req->cmd.buf[1] & 3) { 1966 goto illegal_request; 1967 } 1968 break; 1969 case RELEASE: 1970 if (req->cmd.buf[1] & 1) { 1971 goto illegal_request; 1972 } 1973 break; 1974 case RELEASE_10: 1975 if (req->cmd.buf[1] & 3) { 1976 goto illegal_request; 1977 } 1978 break; 1979 case START_STOP: 1980 if (scsi_disk_emulate_start_stop(r) < 0) { 1981 return 0; 1982 } 1983 break; 1984 case ALLOW_MEDIUM_REMOVAL: 1985 s->tray_locked = req->cmd.buf[4] & 1; 1986 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1987 break; 1988 case READ_CAPACITY_10: 1989 /* The normal LEN field for this command is zero. */ 1990 memset(outbuf, 0, 8); 1991 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1992 if (!nb_sectors) { 1993 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1994 return 0; 1995 } 1996 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1997 goto illegal_request; 1998 } 1999 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2000 /* Returned value is the address of the last sector. */ 2001 nb_sectors--; 2002 /* Remember the new size for read/write sanity checking. */ 2003 s->qdev.max_lba = nb_sectors; 2004 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 2005 if (nb_sectors > UINT32_MAX) { 2006 nb_sectors = UINT32_MAX; 2007 } 2008 outbuf[0] = (nb_sectors >> 24) & 0xff; 2009 outbuf[1] = (nb_sectors >> 16) & 0xff; 2010 outbuf[2] = (nb_sectors >> 8) & 0xff; 2011 outbuf[3] = nb_sectors & 0xff; 2012 outbuf[4] = 0; 2013 outbuf[5] = 0; 2014 outbuf[6] = s->qdev.blocksize >> 8; 2015 outbuf[7] = 0; 2016 break; 2017 case REQUEST_SENSE: 2018 /* Just return "NO SENSE". */ 2019 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2020 (req->cmd.buf[1] & 1) == 0); 2021 if (buflen < 0) { 2022 goto illegal_request; 2023 } 2024 break; 2025 case MECHANISM_STATUS: 2026 buflen = scsi_emulate_mechanism_status(s, outbuf); 2027 if (buflen < 0) { 2028 goto illegal_request; 2029 } 2030 break; 2031 case GET_CONFIGURATION: 2032 buflen = scsi_get_configuration(s, outbuf); 2033 if (buflen < 0) { 2034 goto illegal_request; 2035 } 2036 break; 2037 case GET_EVENT_STATUS_NOTIFICATION: 2038 buflen = scsi_get_event_status_notification(s, r, outbuf); 2039 if (buflen < 0) { 2040 goto illegal_request; 2041 } 2042 break; 2043 case READ_DISC_INFORMATION: 2044 buflen = scsi_read_disc_information(s, r, outbuf); 2045 if (buflen < 0) { 2046 goto illegal_request; 2047 } 2048 break; 2049 case READ_DVD_STRUCTURE: 2050 buflen = scsi_read_dvd_structure(s, r, outbuf); 2051 if (buflen < 0) { 2052 goto illegal_request; 2053 } 2054 break; 2055 case SERVICE_ACTION_IN_16: 2056 /* Service Action In subcommands. */ 2057 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2058 trace_scsi_disk_emulate_command_SAI_16(); 2059 memset(outbuf, 0, req->cmd.xfer); 2060 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2061 if (!nb_sectors) { 2062 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2063 return 0; 2064 } 2065 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2066 goto illegal_request; 2067 } 2068 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2069 /* Returned value is the address of the last sector. */ 2070 nb_sectors--; 2071 /* Remember the new size for read/write sanity checking. */ 2072 s->qdev.max_lba = nb_sectors; 2073 outbuf[0] = (nb_sectors >> 56) & 0xff; 2074 outbuf[1] = (nb_sectors >> 48) & 0xff; 2075 outbuf[2] = (nb_sectors >> 40) & 0xff; 2076 outbuf[3] = (nb_sectors >> 32) & 0xff; 2077 outbuf[4] = (nb_sectors >> 24) & 0xff; 2078 outbuf[5] = (nb_sectors >> 16) & 0xff; 2079 outbuf[6] = (nb_sectors >> 8) & 0xff; 2080 outbuf[7] = nb_sectors & 0xff; 2081 outbuf[8] = 0; 2082 outbuf[9] = 0; 2083 outbuf[10] = s->qdev.blocksize >> 8; 2084 outbuf[11] = 0; 2085 outbuf[12] = 0; 2086 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2087 2088 /* set TPE bit if the format supports discard */ 2089 if (s->qdev.conf.discard_granularity) { 2090 outbuf[14] = 0x80; 2091 } 2092 2093 /* Protection, exponent and lowest lba field left blank. */ 2094 break; 2095 } 2096 trace_scsi_disk_emulate_command_SAI_unsupported(); 2097 goto illegal_request; 2098 case SYNCHRONIZE_CACHE: 2099 /* The request is used as the AIO opaque value, so add a ref. */ 2100 scsi_req_ref(&r->req); 2101 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2102 BLOCK_ACCT_FLUSH); 2103 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2104 return 0; 2105 case SEEK_10: 2106 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); 2107 if (r->req.cmd.lba > s->qdev.max_lba) { 2108 goto illegal_lba; 2109 } 2110 break; 2111 case MODE_SELECT: 2112 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); 2113 break; 2114 case MODE_SELECT_10: 2115 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 2116 break; 2117 case UNMAP: 2118 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); 2119 break; 2120 case VERIFY_10: 2121 case VERIFY_12: 2122 case VERIFY_16: 2123 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 2124 if (req->cmd.buf[1] & 6) { 2125 goto illegal_request; 2126 } 2127 break; 2128 case WRITE_SAME_10: 2129 case WRITE_SAME_16: 2130 trace_scsi_disk_emulate_command_WRITE_SAME( 2131 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); 2132 break; 2133 default: 2134 trace_scsi_disk_emulate_command_UNKNOWN(buf[0], 2135 scsi_command_name(buf[0])); 2136 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2137 return 0; 2138 } 2139 assert(!r->req.aiocb); 2140 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2141 if (r->iov.iov_len == 0) { 2142 scsi_req_complete(&r->req, GOOD); 2143 } 2144 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2145 assert(r->iov.iov_len == req->cmd.xfer); 2146 return -r->iov.iov_len; 2147 } else { 2148 return r->iov.iov_len; 2149 } 2150 2151 illegal_request: 2152 if (r->req.status == -1) { 2153 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2154 } 2155 return 0; 2156 2157 illegal_lba: 2158 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2159 return 0; 2160 } 2161 2162 /* Execute a scsi command. Returns the length of the data expected by the 2163 command. This will be Positive for data transfers from the device 2164 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2165 and zero if the command does not transfer any data. */ 2166 2167 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2168 { 2169 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2170 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2171 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2172 uint32_t len; 2173 uint8_t command; 2174 2175 command = buf[0]; 2176 2177 if (!blk_is_available(s->qdev.conf.blk)) { 2178 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2179 return 0; 2180 } 2181 2182 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2183 switch (command) { 2184 case READ_6: 2185 case READ_10: 2186 case READ_12: 2187 case READ_16: 2188 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); 2189 /* Protection information is not supported. For SCSI versions 2 and 2190 * older (as determined by snooping the guest's INQUIRY commands), 2191 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2192 */ 2193 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2194 goto illegal_request; 2195 } 2196 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2197 goto illegal_lba; 2198 } 2199 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2200 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2201 break; 2202 case WRITE_6: 2203 case WRITE_10: 2204 case WRITE_12: 2205 case WRITE_16: 2206 case WRITE_VERIFY_10: 2207 case WRITE_VERIFY_12: 2208 case WRITE_VERIFY_16: 2209 if (blk_is_read_only(s->qdev.conf.blk)) { 2210 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2211 return 0; 2212 } 2213 trace_scsi_disk_dma_command_WRITE( 2214 (command & 0xe) == 0xe ? "And Verify " : "", 2215 r->req.cmd.lba, len); 2216 /* fall through */ 2217 case VERIFY_10: 2218 case VERIFY_12: 2219 case VERIFY_16: 2220 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2221 * As far as DMA is concerned, we can treat it the same as a write; 2222 * scsi_block_do_sgio will send VERIFY commands. 2223 */ 2224 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2225 goto illegal_request; 2226 } 2227 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2228 goto illegal_lba; 2229 } 2230 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2231 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2232 break; 2233 default: 2234 abort(); 2235 illegal_request: 2236 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2237 return 0; 2238 illegal_lba: 2239 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2240 return 0; 2241 } 2242 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2243 if (r->sector_count == 0) { 2244 scsi_req_complete(&r->req, GOOD); 2245 } 2246 assert(r->iov.iov_len == 0); 2247 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2248 return -r->sector_count * BDRV_SECTOR_SIZE; 2249 } else { 2250 return r->sector_count * BDRV_SECTOR_SIZE; 2251 } 2252 } 2253 2254 static void scsi_disk_reset(DeviceState *dev) 2255 { 2256 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2257 uint64_t nb_sectors; 2258 2259 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2260 2261 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2262 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2263 if (nb_sectors) { 2264 nb_sectors--; 2265 } 2266 s->qdev.max_lba = nb_sectors; 2267 /* reset tray statuses */ 2268 s->tray_locked = 0; 2269 s->tray_open = 0; 2270 2271 s->qdev.scsi_version = s->qdev.default_scsi_version; 2272 } 2273 2274 static void scsi_disk_resize_cb(void *opaque) 2275 { 2276 SCSIDiskState *s = opaque; 2277 2278 /* SPC lists this sense code as available only for 2279 * direct-access devices. 2280 */ 2281 if (s->qdev.type == TYPE_DISK) { 2282 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2283 } 2284 } 2285 2286 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2287 { 2288 SCSIDiskState *s = opaque; 2289 2290 /* 2291 * When a CD gets changed, we have to report an ejected state and 2292 * then a loaded state to guests so that they detect tray 2293 * open/close and media change events. Guests that do not use 2294 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2295 * states rely on this behavior. 2296 * 2297 * media_changed governs the state machine used for unit attention 2298 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2299 */ 2300 s->media_changed = load; 2301 s->tray_open = !load; 2302 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2303 s->media_event = true; 2304 s->eject_request = false; 2305 } 2306 2307 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2308 { 2309 SCSIDiskState *s = opaque; 2310 2311 s->eject_request = true; 2312 if (force) { 2313 s->tray_locked = false; 2314 } 2315 } 2316 2317 static bool scsi_cd_is_tray_open(void *opaque) 2318 { 2319 return ((SCSIDiskState *)opaque)->tray_open; 2320 } 2321 2322 static bool scsi_cd_is_medium_locked(void *opaque) 2323 { 2324 return ((SCSIDiskState *)opaque)->tray_locked; 2325 } 2326 2327 static const BlockDevOps scsi_disk_removable_block_ops = { 2328 .change_media_cb = scsi_cd_change_media_cb, 2329 .eject_request_cb = scsi_cd_eject_request_cb, 2330 .is_tray_open = scsi_cd_is_tray_open, 2331 .is_medium_locked = scsi_cd_is_medium_locked, 2332 2333 .resize_cb = scsi_disk_resize_cb, 2334 }; 2335 2336 static const BlockDevOps scsi_disk_block_ops = { 2337 .resize_cb = scsi_disk_resize_cb, 2338 }; 2339 2340 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2341 { 2342 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2343 if (s->media_changed) { 2344 s->media_changed = false; 2345 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2346 } 2347 } 2348 2349 static void scsi_realize(SCSIDevice *dev, Error **errp) 2350 { 2351 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2352 bool read_only; 2353 2354 if (!s->qdev.conf.blk) { 2355 error_setg(errp, "drive property not set"); 2356 return; 2357 } 2358 2359 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2360 !blk_is_inserted(s->qdev.conf.blk)) { 2361 error_setg(errp, "Device needs media, but drive is empty"); 2362 return; 2363 } 2364 2365 if (!blkconf_blocksizes(&s->qdev.conf, errp)) { 2366 return; 2367 } 2368 2369 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && 2370 !s->qdev.hba_supports_iothread) 2371 { 2372 error_setg(errp, "HBA does not support iothreads"); 2373 return; 2374 } 2375 2376 if (dev->type == TYPE_DISK) { 2377 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2378 return; 2379 } 2380 } 2381 2382 read_only = blk_is_read_only(s->qdev.conf.blk); 2383 if (dev->type == TYPE_ROM) { 2384 read_only = true; 2385 } 2386 2387 if (!blkconf_apply_backend_options(&dev->conf, read_only, 2388 dev->type == TYPE_DISK, errp)) { 2389 return; 2390 } 2391 2392 if (s->qdev.conf.discard_granularity == -1) { 2393 s->qdev.conf.discard_granularity = 2394 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2395 } 2396 2397 if (!s->version) { 2398 s->version = g_strdup(qemu_hw_version()); 2399 } 2400 if (!s->vendor) { 2401 s->vendor = g_strdup("QEMU"); 2402 } 2403 if (!s->device_id) { 2404 if (s->serial) { 2405 s->device_id = g_strdup_printf("%.20s", s->serial); 2406 } else { 2407 const char *str = blk_name(s->qdev.conf.blk); 2408 if (str && *str) { 2409 s->device_id = g_strdup(str); 2410 } 2411 } 2412 } 2413 2414 if (blk_is_sg(s->qdev.conf.blk)) { 2415 error_setg(errp, "unwanted /dev/sg*"); 2416 return; 2417 } 2418 2419 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2420 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2421 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2422 } else { 2423 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2424 } 2425 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2426 2427 blk_iostatus_enable(s->qdev.conf.blk); 2428 2429 add_boot_device_lchs(&dev->qdev, NULL, 2430 dev->conf.lcyls, 2431 dev->conf.lheads, 2432 dev->conf.lsecs); 2433 } 2434 2435 static void scsi_unrealize(SCSIDevice *dev) 2436 { 2437 del_boot_device_lchs(&dev->qdev, NULL); 2438 } 2439 2440 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2441 { 2442 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2443 AioContext *ctx = NULL; 2444 /* can happen for devices without drive. The error message for missing 2445 * backend will be issued in scsi_realize 2446 */ 2447 if (s->qdev.conf.blk) { 2448 ctx = blk_get_aio_context(s->qdev.conf.blk); 2449 aio_context_acquire(ctx); 2450 if (!blkconf_blocksizes(&s->qdev.conf, errp)) { 2451 goto out; 2452 } 2453 } 2454 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2455 s->qdev.type = TYPE_DISK; 2456 if (!s->product) { 2457 s->product = g_strdup("QEMU HARDDISK"); 2458 } 2459 scsi_realize(&s->qdev, errp); 2460 out: 2461 if (ctx) { 2462 aio_context_release(ctx); 2463 } 2464 } 2465 2466 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2467 { 2468 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2469 AioContext *ctx; 2470 int ret; 2471 2472 if (!dev->conf.blk) { 2473 /* Anonymous BlockBackend for an empty drive. As we put it into 2474 * dev->conf, qdev takes care of detaching on unplug. */ 2475 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 2476 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2477 assert(ret == 0); 2478 } 2479 2480 ctx = blk_get_aio_context(dev->conf.blk); 2481 aio_context_acquire(ctx); 2482 s->qdev.blocksize = 2048; 2483 s->qdev.type = TYPE_ROM; 2484 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2485 if (!s->product) { 2486 s->product = g_strdup("QEMU CD-ROM"); 2487 } 2488 scsi_realize(&s->qdev, errp); 2489 aio_context_release(ctx); 2490 } 2491 2492 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2493 { 2494 DriveInfo *dinfo; 2495 Error *local_err = NULL; 2496 2497 warn_report("'scsi-disk' is deprecated, " 2498 "please use 'scsi-hd' or 'scsi-cd' instead"); 2499 2500 if (!dev->conf.blk) { 2501 scsi_realize(dev, &local_err); 2502 assert(local_err); 2503 error_propagate(errp, local_err); 2504 return; 2505 } 2506 2507 dinfo = blk_legacy_dinfo(dev->conf.blk); 2508 if (dinfo && dinfo->media_cd) { 2509 scsi_cd_realize(dev, errp); 2510 } else { 2511 scsi_hd_realize(dev, errp); 2512 } 2513 } 2514 2515 static const SCSIReqOps scsi_disk_emulate_reqops = { 2516 .size = sizeof(SCSIDiskReq), 2517 .free_req = scsi_free_request, 2518 .send_command = scsi_disk_emulate_command, 2519 .read_data = scsi_disk_emulate_read_data, 2520 .write_data = scsi_disk_emulate_write_data, 2521 .get_buf = scsi_get_buf, 2522 }; 2523 2524 static const SCSIReqOps scsi_disk_dma_reqops = { 2525 .size = sizeof(SCSIDiskReq), 2526 .free_req = scsi_free_request, 2527 .send_command = scsi_disk_dma_command, 2528 .read_data = scsi_read_data, 2529 .write_data = scsi_write_data, 2530 .get_buf = scsi_get_buf, 2531 .load_request = scsi_disk_load_request, 2532 .save_request = scsi_disk_save_request, 2533 }; 2534 2535 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2536 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2537 [INQUIRY] = &scsi_disk_emulate_reqops, 2538 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2539 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2540 [START_STOP] = &scsi_disk_emulate_reqops, 2541 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2542 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2543 [READ_TOC] = &scsi_disk_emulate_reqops, 2544 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2545 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2546 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2547 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2548 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2549 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2550 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2551 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2552 [SEEK_10] = &scsi_disk_emulate_reqops, 2553 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2554 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2555 [UNMAP] = &scsi_disk_emulate_reqops, 2556 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2557 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2558 [VERIFY_10] = &scsi_disk_emulate_reqops, 2559 [VERIFY_12] = &scsi_disk_emulate_reqops, 2560 [VERIFY_16] = &scsi_disk_emulate_reqops, 2561 2562 [READ_6] = &scsi_disk_dma_reqops, 2563 [READ_10] = &scsi_disk_dma_reqops, 2564 [READ_12] = &scsi_disk_dma_reqops, 2565 [READ_16] = &scsi_disk_dma_reqops, 2566 [WRITE_6] = &scsi_disk_dma_reqops, 2567 [WRITE_10] = &scsi_disk_dma_reqops, 2568 [WRITE_12] = &scsi_disk_dma_reqops, 2569 [WRITE_16] = &scsi_disk_dma_reqops, 2570 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2571 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2572 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2573 }; 2574 2575 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) 2576 { 2577 int i; 2578 int len = scsi_cdb_length(buf); 2579 char *line_buffer, *p; 2580 2581 line_buffer = g_malloc(len * 5 + 1); 2582 2583 for (i = 0, p = line_buffer; i < len; i++) { 2584 p += sprintf(p, " 0x%02x", buf[i]); 2585 } 2586 trace_scsi_disk_new_request(lun, tag, line_buffer); 2587 2588 g_free(line_buffer); 2589 } 2590 2591 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2592 uint8_t *buf, void *hba_private) 2593 { 2594 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2595 SCSIRequest *req; 2596 const SCSIReqOps *ops; 2597 uint8_t command; 2598 2599 command = buf[0]; 2600 ops = scsi_disk_reqops_dispatch[command]; 2601 if (!ops) { 2602 ops = &scsi_disk_emulate_reqops; 2603 } 2604 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2605 2606 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { 2607 scsi_disk_new_request_dump(lun, tag, buf); 2608 } 2609 2610 return req; 2611 } 2612 2613 #ifdef __linux__ 2614 static int get_device_type(SCSIDiskState *s) 2615 { 2616 uint8_t cmd[16]; 2617 uint8_t buf[36]; 2618 int ret; 2619 2620 memset(cmd, 0, sizeof(cmd)); 2621 memset(buf, 0, sizeof(buf)); 2622 cmd[0] = INQUIRY; 2623 cmd[4] = sizeof(buf); 2624 2625 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2626 buf, sizeof(buf)); 2627 if (ret < 0) { 2628 return -1; 2629 } 2630 s->qdev.type = buf[0]; 2631 if (buf[1] & 0x80) { 2632 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2633 } 2634 return 0; 2635 } 2636 2637 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2638 { 2639 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2640 AioContext *ctx; 2641 int sg_version; 2642 int rc; 2643 2644 if (!s->qdev.conf.blk) { 2645 error_setg(errp, "drive property not set"); 2646 return; 2647 } 2648 2649 if (s->rotation_rate) { 2650 error_report_once("rotation_rate is specified for scsi-block but is " 2651 "not implemented. This option is deprecated and will " 2652 "be removed in a future version"); 2653 } 2654 2655 ctx = blk_get_aio_context(s->qdev.conf.blk); 2656 aio_context_acquire(ctx); 2657 2658 /* check we are using a driver managing SG_IO (version 3 and after) */ 2659 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2660 if (rc < 0) { 2661 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2662 if (rc != -EPERM) { 2663 error_append_hint(errp, "Is this a SCSI device?\n"); 2664 } 2665 goto out; 2666 } 2667 if (sg_version < 30000) { 2668 error_setg(errp, "scsi generic interface too old"); 2669 goto out; 2670 } 2671 2672 /* get device type from INQUIRY data */ 2673 rc = get_device_type(s); 2674 if (rc < 0) { 2675 error_setg(errp, "INQUIRY failed"); 2676 goto out; 2677 } 2678 2679 /* Make a guess for the block size, we'll fix it when the guest sends. 2680 * READ CAPACITY. If they don't, they likely would assume these sizes 2681 * anyway. (TODO: check in /sys). 2682 */ 2683 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2684 s->qdev.blocksize = 2048; 2685 } else { 2686 s->qdev.blocksize = 512; 2687 } 2688 2689 /* Makes the scsi-block device not removable by using HMP and QMP eject 2690 * command. 2691 */ 2692 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2693 2694 scsi_realize(&s->qdev, errp); 2695 scsi_generic_read_device_inquiry(&s->qdev); 2696 2697 out: 2698 aio_context_release(ctx); 2699 } 2700 2701 typedef struct SCSIBlockReq { 2702 SCSIDiskReq req; 2703 sg_io_hdr_t io_header; 2704 2705 /* Selected bytes of the original CDB, copied into our own CDB. */ 2706 uint8_t cmd, cdb1, group_number; 2707 2708 /* CDB passed to SG_IO. */ 2709 uint8_t cdb[16]; 2710 } SCSIBlockReq; 2711 2712 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2713 int64_t offset, QEMUIOVector *iov, 2714 int direction, 2715 BlockCompletionFunc *cb, void *opaque) 2716 { 2717 sg_io_hdr_t *io_header = &req->io_header; 2718 SCSIDiskReq *r = &req->req; 2719 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2720 int nb_logical_blocks; 2721 uint64_t lba; 2722 BlockAIOCB *aiocb; 2723 2724 /* This is not supported yet. It can only happen if the guest does 2725 * reads and writes that are not aligned to one logical sectors 2726 * _and_ cover multiple MemoryRegions. 2727 */ 2728 assert(offset % s->qdev.blocksize == 0); 2729 assert(iov->size % s->qdev.blocksize == 0); 2730 2731 io_header->interface_id = 'S'; 2732 2733 /* The data transfer comes from the QEMUIOVector. */ 2734 io_header->dxfer_direction = direction; 2735 io_header->dxfer_len = iov->size; 2736 io_header->dxferp = (void *)iov->iov; 2737 io_header->iovec_count = iov->niov; 2738 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2739 2740 /* Build a new CDB with the LBA and length patched in, in case 2741 * DMA helpers split the transfer in multiple segments. Do not 2742 * build a CDB smaller than what the guest wanted, and only build 2743 * a larger one if strictly necessary. 2744 */ 2745 io_header->cmdp = req->cdb; 2746 lba = offset / s->qdev.blocksize; 2747 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2748 2749 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2750 /* 6-byte CDB */ 2751 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2752 req->cdb[4] = nb_logical_blocks; 2753 req->cdb[5] = 0; 2754 io_header->cmd_len = 6; 2755 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2756 /* 10-byte CDB */ 2757 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2758 req->cdb[1] = req->cdb1; 2759 stl_be_p(&req->cdb[2], lba); 2760 req->cdb[6] = req->group_number; 2761 stw_be_p(&req->cdb[7], nb_logical_blocks); 2762 req->cdb[9] = 0; 2763 io_header->cmd_len = 10; 2764 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2765 /* 12-byte CDB */ 2766 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2767 req->cdb[1] = req->cdb1; 2768 stl_be_p(&req->cdb[2], lba); 2769 stl_be_p(&req->cdb[6], nb_logical_blocks); 2770 req->cdb[10] = req->group_number; 2771 req->cdb[11] = 0; 2772 io_header->cmd_len = 12; 2773 } else { 2774 /* 16-byte CDB */ 2775 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2776 req->cdb[1] = req->cdb1; 2777 stq_be_p(&req->cdb[2], lba); 2778 stl_be_p(&req->cdb[10], nb_logical_blocks); 2779 req->cdb[14] = req->group_number; 2780 req->cdb[15] = 0; 2781 io_header->cmd_len = 16; 2782 } 2783 2784 /* The rest is as in scsi-generic.c. */ 2785 io_header->mx_sb_len = sizeof(r->req.sense); 2786 io_header->sbp = r->req.sense; 2787 io_header->timeout = UINT_MAX; 2788 io_header->usr_ptr = r; 2789 io_header->flags |= SG_FLAG_DIRECT_IO; 2790 2791 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2792 assert(aiocb != NULL); 2793 return aiocb; 2794 } 2795 2796 static bool scsi_block_no_fua(SCSICommand *cmd) 2797 { 2798 return false; 2799 } 2800 2801 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2802 QEMUIOVector *iov, 2803 BlockCompletionFunc *cb, void *cb_opaque, 2804 void *opaque) 2805 { 2806 SCSIBlockReq *r = opaque; 2807 return scsi_block_do_sgio(r, offset, iov, 2808 SG_DXFER_FROM_DEV, cb, cb_opaque); 2809 } 2810 2811 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2812 QEMUIOVector *iov, 2813 BlockCompletionFunc *cb, void *cb_opaque, 2814 void *opaque) 2815 { 2816 SCSIBlockReq *r = opaque; 2817 return scsi_block_do_sgio(r, offset, iov, 2818 SG_DXFER_TO_DEV, cb, cb_opaque); 2819 } 2820 2821 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2822 { 2823 switch (buf[0]) { 2824 case VERIFY_10: 2825 case VERIFY_12: 2826 case VERIFY_16: 2827 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2828 * for the number of logical blocks specified in the length 2829 * field). For other modes, do not use scatter/gather operation. 2830 */ 2831 if ((buf[1] & 6) == 2) { 2832 return false; 2833 } 2834 break; 2835 2836 case READ_6: 2837 case READ_10: 2838 case READ_12: 2839 case READ_16: 2840 case WRITE_6: 2841 case WRITE_10: 2842 case WRITE_12: 2843 case WRITE_16: 2844 case WRITE_VERIFY_10: 2845 case WRITE_VERIFY_12: 2846 case WRITE_VERIFY_16: 2847 /* MMC writing cannot be done via DMA helpers, because it sometimes 2848 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2849 * We might use scsi_block_dma_reqops as long as no writing commands are 2850 * seen, but performance usually isn't paramount on optical media. So, 2851 * just make scsi-block operate the same as scsi-generic for them. 2852 */ 2853 if (s->qdev.type != TYPE_ROM) { 2854 return false; 2855 } 2856 break; 2857 2858 default: 2859 break; 2860 } 2861 2862 return true; 2863 } 2864 2865 2866 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2867 { 2868 SCSIBlockReq *r = (SCSIBlockReq *)req; 2869 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2870 2871 r->cmd = req->cmd.buf[0]; 2872 switch (r->cmd >> 5) { 2873 case 0: 2874 /* 6-byte CDB. */ 2875 r->cdb1 = r->group_number = 0; 2876 break; 2877 case 1: 2878 /* 10-byte CDB. */ 2879 r->cdb1 = req->cmd.buf[1]; 2880 r->group_number = req->cmd.buf[6]; 2881 break; 2882 case 4: 2883 /* 12-byte CDB. */ 2884 r->cdb1 = req->cmd.buf[1]; 2885 r->group_number = req->cmd.buf[10]; 2886 break; 2887 case 5: 2888 /* 16-byte CDB. */ 2889 r->cdb1 = req->cmd.buf[1]; 2890 r->group_number = req->cmd.buf[14]; 2891 break; 2892 default: 2893 abort(); 2894 } 2895 2896 /* Protection information is not supported. For SCSI versions 2 and 2897 * older (as determined by snooping the guest's INQUIRY commands), 2898 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2899 */ 2900 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2901 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2902 return 0; 2903 } 2904 2905 r->req.status = &r->io_header.status; 2906 return scsi_disk_dma_command(req, buf); 2907 } 2908 2909 static const SCSIReqOps scsi_block_dma_reqops = { 2910 .size = sizeof(SCSIBlockReq), 2911 .free_req = scsi_free_request, 2912 .send_command = scsi_block_dma_command, 2913 .read_data = scsi_read_data, 2914 .write_data = scsi_write_data, 2915 .get_buf = scsi_get_buf, 2916 .load_request = scsi_disk_load_request, 2917 .save_request = scsi_disk_save_request, 2918 }; 2919 2920 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2921 uint32_t lun, uint8_t *buf, 2922 void *hba_private) 2923 { 2924 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2925 2926 if (scsi_block_is_passthrough(s, buf)) { 2927 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2928 hba_private); 2929 } else { 2930 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2931 hba_private); 2932 } 2933 } 2934 2935 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2936 uint8_t *buf, void *hba_private) 2937 { 2938 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2939 2940 if (scsi_block_is_passthrough(s, buf)) { 2941 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2942 } else { 2943 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2944 } 2945 } 2946 2947 static void scsi_block_update_sense(SCSIRequest *req) 2948 { 2949 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2950 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); 2951 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); 2952 } 2953 #endif 2954 2955 static 2956 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2957 BlockCompletionFunc *cb, void *cb_opaque, 2958 void *opaque) 2959 { 2960 SCSIDiskReq *r = opaque; 2961 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2962 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2963 } 2964 2965 static 2966 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2967 BlockCompletionFunc *cb, void *cb_opaque, 2968 void *opaque) 2969 { 2970 SCSIDiskReq *r = opaque; 2971 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2972 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2973 } 2974 2975 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2976 { 2977 DeviceClass *dc = DEVICE_CLASS(klass); 2978 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2979 2980 dc->fw_name = "disk"; 2981 dc->reset = scsi_disk_reset; 2982 sdc->dma_readv = scsi_dma_readv; 2983 sdc->dma_writev = scsi_dma_writev; 2984 sdc->need_fua_emulation = scsi_is_cmd_fua; 2985 } 2986 2987 static const TypeInfo scsi_disk_base_info = { 2988 .name = TYPE_SCSI_DISK_BASE, 2989 .parent = TYPE_SCSI_DEVICE, 2990 .class_init = scsi_disk_base_class_initfn, 2991 .instance_size = sizeof(SCSIDiskState), 2992 .class_size = sizeof(SCSIDiskClass), 2993 .abstract = true, 2994 }; 2995 2996 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2997 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ 2998 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ 2999 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3000 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 3001 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 3002 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 3003 DEFINE_PROP_STRING("product", SCSIDiskState, product), \ 3004 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) 3005 3006 3007 static Property scsi_hd_properties[] = { 3008 DEFINE_SCSI_DISK_PROPERTIES(), 3009 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3010 SCSI_DISK_F_REMOVABLE, false), 3011 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3012 SCSI_DISK_F_DPOFUA, false), 3013 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3014 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3015 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3016 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3017 DEFAULT_MAX_UNMAP_SIZE), 3018 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3019 DEFAULT_MAX_IO_SIZE), 3020 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3021 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3022 5), 3023 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 3024 DEFINE_PROP_END_OF_LIST(), 3025 }; 3026 3027 static const VMStateDescription vmstate_scsi_disk_state = { 3028 .name = "scsi-disk", 3029 .version_id = 1, 3030 .minimum_version_id = 1, 3031 .fields = (VMStateField[]) { 3032 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 3033 VMSTATE_BOOL(media_changed, SCSIDiskState), 3034 VMSTATE_BOOL(media_event, SCSIDiskState), 3035 VMSTATE_BOOL(eject_request, SCSIDiskState), 3036 VMSTATE_BOOL(tray_open, SCSIDiskState), 3037 VMSTATE_BOOL(tray_locked, SCSIDiskState), 3038 VMSTATE_END_OF_LIST() 3039 } 3040 }; 3041 3042 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 3043 { 3044 DeviceClass *dc = DEVICE_CLASS(klass); 3045 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3046 3047 sc->realize = scsi_hd_realize; 3048 sc->unrealize = scsi_unrealize; 3049 sc->alloc_req = scsi_new_request; 3050 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3051 dc->desc = "virtual SCSI disk"; 3052 device_class_set_props(dc, scsi_hd_properties); 3053 dc->vmsd = &vmstate_scsi_disk_state; 3054 } 3055 3056 static const TypeInfo scsi_hd_info = { 3057 .name = "scsi-hd", 3058 .parent = TYPE_SCSI_DISK_BASE, 3059 .class_init = scsi_hd_class_initfn, 3060 }; 3061 3062 static Property scsi_cd_properties[] = { 3063 DEFINE_SCSI_DISK_PROPERTIES(), 3064 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3065 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3066 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3067 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3068 DEFAULT_MAX_IO_SIZE), 3069 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3070 5), 3071 DEFINE_PROP_END_OF_LIST(), 3072 }; 3073 3074 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3075 { 3076 DeviceClass *dc = DEVICE_CLASS(klass); 3077 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3078 3079 sc->realize = scsi_cd_realize; 3080 sc->alloc_req = scsi_new_request; 3081 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3082 dc->desc = "virtual SCSI CD-ROM"; 3083 device_class_set_props(dc, scsi_cd_properties); 3084 dc->vmsd = &vmstate_scsi_disk_state; 3085 } 3086 3087 static const TypeInfo scsi_cd_info = { 3088 .name = "scsi-cd", 3089 .parent = TYPE_SCSI_DISK_BASE, 3090 .class_init = scsi_cd_class_initfn, 3091 }; 3092 3093 #ifdef __linux__ 3094 static Property scsi_block_properties[] = { 3095 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), 3096 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3097 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3098 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3099 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3100 DEFAULT_MAX_UNMAP_SIZE), 3101 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3102 DEFAULT_MAX_IO_SIZE), 3103 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3104 -1), 3105 DEFINE_PROP_END_OF_LIST(), 3106 }; 3107 3108 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3109 { 3110 DeviceClass *dc = DEVICE_CLASS(klass); 3111 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3112 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3113 3114 sc->realize = scsi_block_realize; 3115 sc->alloc_req = scsi_block_new_request; 3116 sc->parse_cdb = scsi_block_parse_cdb; 3117 sdc->dma_readv = scsi_block_dma_readv; 3118 sdc->dma_writev = scsi_block_dma_writev; 3119 sdc->update_sense = scsi_block_update_sense; 3120 sdc->need_fua_emulation = scsi_block_no_fua; 3121 dc->desc = "SCSI block device passthrough"; 3122 device_class_set_props(dc, scsi_block_properties); 3123 dc->vmsd = &vmstate_scsi_disk_state; 3124 } 3125 3126 static const TypeInfo scsi_block_info = { 3127 .name = "scsi-block", 3128 .parent = TYPE_SCSI_DISK_BASE, 3129 .class_init = scsi_block_class_initfn, 3130 }; 3131 #endif 3132 3133 static Property scsi_disk_properties[] = { 3134 DEFINE_SCSI_DISK_PROPERTIES(), 3135 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3136 SCSI_DISK_F_REMOVABLE, false), 3137 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3138 SCSI_DISK_F_DPOFUA, false), 3139 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3140 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3141 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3142 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3143 DEFAULT_MAX_UNMAP_SIZE), 3144 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3145 DEFAULT_MAX_IO_SIZE), 3146 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3147 5), 3148 DEFINE_PROP_END_OF_LIST(), 3149 }; 3150 3151 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3152 { 3153 DeviceClass *dc = DEVICE_CLASS(klass); 3154 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3155 3156 sc->realize = scsi_disk_realize; 3157 sc->alloc_req = scsi_new_request; 3158 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3159 dc->fw_name = "disk"; 3160 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3161 dc->reset = scsi_disk_reset; 3162 device_class_set_props(dc, scsi_disk_properties); 3163 dc->vmsd = &vmstate_scsi_disk_state; 3164 } 3165 3166 static const TypeInfo scsi_disk_info = { 3167 .name = "scsi-disk", 3168 .parent = TYPE_SCSI_DISK_BASE, 3169 .class_init = scsi_disk_class_initfn, 3170 }; 3171 3172 static void scsi_disk_register_types(void) 3173 { 3174 type_register_static(&scsi_disk_base_info); 3175 type_register_static(&scsi_hd_info); 3176 type_register_static(&scsi_cd_info); 3177 #ifdef __linux__ 3178 type_register_static(&scsi_block_info); 3179 #endif 3180 type_register_static(&scsi_disk_info); 3181 } 3182 3183 type_init(scsi_disk_register_types) 3184