1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/units.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "qemu/main-loop.h" 27 #include "qemu/module.h" 28 #include "hw/scsi/scsi.h" 29 #include "migration/qemu-file-types.h" 30 #include "migration/vmstate.h" 31 #include "hw/scsi/emulation.h" 32 #include "scsi/constants.h" 33 #include "sysemu/block-backend.h" 34 #include "sysemu/blockdev.h" 35 #include "hw/block/block.h" 36 #include "hw/qdev-properties.h" 37 #include "hw/qdev-properties-system.h" 38 #include "sysemu/dma.h" 39 #include "sysemu/sysemu.h" 40 #include "qemu/cutils.h" 41 #include "trace.h" 42 #include "qom/object.h" 43 44 #ifdef __linux 45 #include <scsi/sg.h> 46 #endif 47 48 #define SCSI_WRITE_SAME_MAX (512 * KiB) 49 #define SCSI_DMA_BUF_SIZE (128 * KiB) 50 #define SCSI_MAX_INQUIRY_LEN 256 51 #define SCSI_MAX_MODE_LEN 256 52 53 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 54 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 55 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 56 57 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 58 59 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE) 60 61 struct SCSIDiskClass { 62 SCSIDeviceClass parent_class; 63 DMAIOFunc *dma_readv; 64 DMAIOFunc *dma_writev; 65 bool (*need_fua_emulation)(SCSICommand *cmd); 66 void (*update_sense)(SCSIRequest *r); 67 }; 68 69 typedef struct SCSIDiskReq { 70 SCSIRequest req; 71 /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes. */ 72 uint64_t sector; 73 uint32_t sector_count; 74 uint32_t buflen; 75 bool started; 76 bool need_fua_emulation; 77 struct iovec iov; 78 QEMUIOVector qiov; 79 BlockAcctCookie acct; 80 unsigned char *status; 81 } SCSIDiskReq; 82 83 #define SCSI_DISK_F_REMOVABLE 0 84 #define SCSI_DISK_F_DPOFUA 1 85 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 86 87 struct SCSIDiskState { 88 SCSIDevice qdev; 89 uint32_t features; 90 bool media_changed; 91 bool media_event; 92 bool eject_request; 93 uint16_t port_index; 94 uint64_t max_unmap_size; 95 uint64_t max_io_size; 96 QEMUBH *bh; 97 char *version; 98 char *serial; 99 char *vendor; 100 char *product; 101 char *device_id; 102 bool tray_open; 103 bool tray_locked; 104 /* 105 * 0x0000 - rotation rate not reported 106 * 0x0001 - non-rotating medium (SSD) 107 * 0x0002-0x0400 - reserved 108 * 0x0401-0xffe - rotations per minute 109 * 0xffff - reserved 110 */ 111 uint16_t rotation_rate; 112 }; 113 114 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 115 116 static void scsi_free_request(SCSIRequest *req) 117 { 118 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 119 120 qemu_vfree(r->iov.iov_base); 121 } 122 123 /* Helper function for command completion with sense. */ 124 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 125 { 126 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, 127 sense.ascq); 128 scsi_req_build_sense(&r->req, sense); 129 scsi_req_complete(&r->req, CHECK_CONDITION); 130 } 131 132 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 133 { 134 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 135 136 if (!r->iov.iov_base) { 137 r->buflen = size; 138 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 139 } 140 r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen); 141 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 142 } 143 144 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 145 { 146 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 147 148 qemu_put_be64s(f, &r->sector); 149 qemu_put_be32s(f, &r->sector_count); 150 qemu_put_be32s(f, &r->buflen); 151 if (r->buflen) { 152 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 153 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 154 } else if (!req->retry) { 155 uint32_t len = r->iov.iov_len; 156 qemu_put_be32s(f, &len); 157 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 158 } 159 } 160 } 161 162 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 163 { 164 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 165 166 qemu_get_be64s(f, &r->sector); 167 qemu_get_be32s(f, &r->sector_count); 168 qemu_get_be32s(f, &r->buflen); 169 if (r->buflen) { 170 scsi_init_iovec(r, r->buflen); 171 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 172 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 173 } else if (!r->req.retry) { 174 uint32_t len; 175 qemu_get_be32s(f, &len); 176 r->iov.iov_len = len; 177 assert(r->iov.iov_len <= r->buflen); 178 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 179 } 180 } 181 182 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 183 } 184 185 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 186 { 187 if (r->req.io_canceled) { 188 scsi_req_cancel_complete(&r->req); 189 return true; 190 } 191 192 if (ret < 0 || (r->status && *r->status)) { 193 return scsi_handle_rw_error(r, -ret, acct_failed); 194 } 195 196 return false; 197 } 198 199 static void scsi_aio_complete(void *opaque, int ret) 200 { 201 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 202 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 203 204 assert(r->req.aiocb != NULL); 205 r->req.aiocb = NULL; 206 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 207 if (scsi_disk_req_check_error(r, ret, true)) { 208 goto done; 209 } 210 211 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 212 scsi_req_complete(&r->req, GOOD); 213 214 done: 215 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 216 scsi_req_unref(&r->req); 217 } 218 219 static bool scsi_is_cmd_fua(SCSICommand *cmd) 220 { 221 switch (cmd->buf[0]) { 222 case READ_10: 223 case READ_12: 224 case READ_16: 225 case WRITE_10: 226 case WRITE_12: 227 case WRITE_16: 228 return (cmd->buf[1] & 8) != 0; 229 230 case VERIFY_10: 231 case VERIFY_12: 232 case VERIFY_16: 233 case WRITE_VERIFY_10: 234 case WRITE_VERIFY_12: 235 case WRITE_VERIFY_16: 236 return true; 237 238 case READ_6: 239 case WRITE_6: 240 default: 241 return false; 242 } 243 } 244 245 static void scsi_write_do_fua(SCSIDiskReq *r) 246 { 247 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 248 249 assert(r->req.aiocb == NULL); 250 assert(!r->req.io_canceled); 251 252 if (r->need_fua_emulation) { 253 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 254 BLOCK_ACCT_FLUSH); 255 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 256 return; 257 } 258 259 scsi_req_complete(&r->req, GOOD); 260 scsi_req_unref(&r->req); 261 } 262 263 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 264 { 265 assert(r->req.aiocb == NULL); 266 if (scsi_disk_req_check_error(r, ret, false)) { 267 goto done; 268 } 269 270 r->sector += r->sector_count; 271 r->sector_count = 0; 272 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 273 scsi_write_do_fua(r); 274 return; 275 } else { 276 scsi_req_complete(&r->req, GOOD); 277 } 278 279 done: 280 scsi_req_unref(&r->req); 281 } 282 283 static void scsi_dma_complete(void *opaque, int ret) 284 { 285 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 286 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 287 288 assert(r->req.aiocb != NULL); 289 r->req.aiocb = NULL; 290 291 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 292 if (ret < 0) { 293 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 294 } else { 295 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 296 } 297 scsi_dma_complete_noio(r, ret); 298 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 299 } 300 301 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) 302 { 303 uint32_t n; 304 305 assert(r->req.aiocb == NULL); 306 if (scsi_disk_req_check_error(r, ret, false)) { 307 goto done; 308 } 309 310 n = r->qiov.size / BDRV_SECTOR_SIZE; 311 r->sector += n; 312 r->sector_count -= n; 313 scsi_req_data(&r->req, r->qiov.size); 314 315 done: 316 scsi_req_unref(&r->req); 317 } 318 319 static void scsi_read_complete(void *opaque, int ret) 320 { 321 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 322 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 323 324 assert(r->req.aiocb != NULL); 325 r->req.aiocb = NULL; 326 327 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 328 if (ret < 0) { 329 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 330 } else { 331 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 332 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); 333 } 334 scsi_read_complete_noio(r, ret); 335 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 336 } 337 338 /* Actually issue a read to the block device. */ 339 static void scsi_do_read(SCSIDiskReq *r, int ret) 340 { 341 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 342 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 343 344 assert (r->req.aiocb == NULL); 345 if (scsi_disk_req_check_error(r, ret, false)) { 346 goto done; 347 } 348 349 /* The request is used as the AIO opaque value, so add a ref. */ 350 scsi_req_ref(&r->req); 351 352 if (r->req.sg) { 353 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 354 r->req.resid -= r->req.sg->size; 355 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 356 r->req.sg, r->sector << BDRV_SECTOR_BITS, 357 BDRV_SECTOR_SIZE, 358 sdc->dma_readv, r, scsi_dma_complete, r, 359 DMA_DIRECTION_FROM_DEVICE); 360 } else { 361 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 362 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 363 r->qiov.size, BLOCK_ACCT_READ); 364 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 365 scsi_read_complete, r, r); 366 } 367 368 done: 369 scsi_req_unref(&r->req); 370 } 371 372 static void scsi_do_read_cb(void *opaque, int ret) 373 { 374 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 375 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 376 377 assert (r->req.aiocb != NULL); 378 r->req.aiocb = NULL; 379 380 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 381 if (ret < 0) { 382 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 383 } else { 384 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 385 } 386 scsi_do_read(opaque, ret); 387 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 388 } 389 390 /* Read more data from scsi device into buffer. */ 391 static void scsi_read_data(SCSIRequest *req) 392 { 393 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 394 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 395 bool first; 396 397 trace_scsi_disk_read_data_count(r->sector_count); 398 if (r->sector_count == 0) { 399 /* This also clears the sense buffer for REQUEST SENSE. */ 400 scsi_req_complete(&r->req, GOOD); 401 return; 402 } 403 404 /* No data transfer may already be in progress */ 405 assert(r->req.aiocb == NULL); 406 407 /* The request is used as the AIO opaque value, so add a ref. */ 408 scsi_req_ref(&r->req); 409 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 410 trace_scsi_disk_read_data_invalid(); 411 scsi_read_complete_noio(r, -EINVAL); 412 return; 413 } 414 415 if (!blk_is_available(req->dev->conf.blk)) { 416 scsi_read_complete_noio(r, -ENOMEDIUM); 417 return; 418 } 419 420 first = !r->started; 421 r->started = true; 422 if (first && r->need_fua_emulation) { 423 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 424 BLOCK_ACCT_FLUSH); 425 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 426 } else { 427 scsi_do_read(r, 0); 428 } 429 } 430 431 /* 432 * scsi_handle_rw_error has two return values. False means that the error 433 * must be ignored, true means that the error has been processed and the 434 * caller should not do anything else for this request. Note that 435 * scsi_handle_rw_error always manages its reference counts, independent 436 * of the return value. 437 */ 438 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 439 { 440 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 441 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 442 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 443 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 444 is_read, error); 445 446 if (action == BLOCK_ERROR_ACTION_REPORT) { 447 if (acct_failed) { 448 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 449 } 450 switch (error) { 451 case 0: 452 /* A passthrough command has run and has produced sense data; check 453 * whether the error has to be handled by the guest or should rather 454 * pause the host. 455 */ 456 assert(r->status && *r->status); 457 if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { 458 /* These errors are handled by guest. */ 459 sdc->update_sense(&r->req); 460 scsi_req_complete(&r->req, *r->status); 461 return true; 462 } 463 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 464 break; 465 #ifdef CONFIG_LINUX 466 /* These errno mapping are specific to Linux. For more information: 467 * - scsi_decide_disposition in drivers/scsi/scsi_error.c 468 * - scsi_result_to_blk_status in drivers/scsi/scsi_lib.c 469 * - blk_errors[] in block/blk-core.c 470 */ 471 case EBADE: 472 /* DID_NEXUS_FAILURE -> BLK_STS_NEXUS. */ 473 scsi_req_complete(&r->req, RESERVATION_CONFLICT); 474 break; 475 case ENODATA: 476 /* DID_MEDIUM_ERROR -> BLK_STS_MEDIUM. */ 477 scsi_check_condition(r, SENSE_CODE(READ_ERROR)); 478 break; 479 case EREMOTEIO: 480 /* DID_TARGET_FAILURE -> BLK_STS_TARGET. */ 481 scsi_req_complete(&r->req, HARDWARE_ERROR); 482 break; 483 #endif 484 case ENOMEDIUM: 485 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 486 break; 487 case ENOMEM: 488 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 489 break; 490 case EINVAL: 491 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 492 break; 493 case ENOSPC: 494 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 495 break; 496 default: 497 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 498 break; 499 } 500 } 501 502 blk_error_action(s->qdev.conf.blk, action, is_read, error); 503 if (action == BLOCK_ERROR_ACTION_IGNORE) { 504 scsi_req_complete(&r->req, 0); 505 return true; 506 } 507 508 if (action == BLOCK_ERROR_ACTION_STOP) { 509 scsi_req_retry(&r->req); 510 } 511 return true; 512 } 513 514 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 515 { 516 uint32_t n; 517 518 assert (r->req.aiocb == NULL); 519 if (scsi_disk_req_check_error(r, ret, false)) { 520 goto done; 521 } 522 523 n = r->qiov.size / BDRV_SECTOR_SIZE; 524 r->sector += n; 525 r->sector_count -= n; 526 if (r->sector_count == 0) { 527 scsi_write_do_fua(r); 528 return; 529 } else { 530 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 531 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); 532 scsi_req_data(&r->req, r->qiov.size); 533 } 534 535 done: 536 scsi_req_unref(&r->req); 537 } 538 539 static void scsi_write_complete(void * opaque, int ret) 540 { 541 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 542 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 543 544 assert (r->req.aiocb != NULL); 545 r->req.aiocb = NULL; 546 547 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 548 if (ret < 0) { 549 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 550 } else { 551 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 552 } 553 scsi_write_complete_noio(r, ret); 554 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 555 } 556 557 static void scsi_write_data(SCSIRequest *req) 558 { 559 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 560 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 561 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 562 563 /* No data transfer may already be in progress */ 564 assert(r->req.aiocb == NULL); 565 566 /* The request is used as the AIO opaque value, so add a ref. */ 567 scsi_req_ref(&r->req); 568 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 569 trace_scsi_disk_write_data_invalid(); 570 scsi_write_complete_noio(r, -EINVAL); 571 return; 572 } 573 574 if (!r->req.sg && !r->qiov.size) { 575 /* Called for the first time. Ask the driver to send us more data. */ 576 r->started = true; 577 scsi_write_complete_noio(r, 0); 578 return; 579 } 580 if (!blk_is_available(req->dev->conf.blk)) { 581 scsi_write_complete_noio(r, -ENOMEDIUM); 582 return; 583 } 584 585 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 586 r->req.cmd.buf[0] == VERIFY_16) { 587 if (r->req.sg) { 588 scsi_dma_complete_noio(r, 0); 589 } else { 590 scsi_write_complete_noio(r, 0); 591 } 592 return; 593 } 594 595 if (r->req.sg) { 596 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 597 r->req.resid -= r->req.sg->size; 598 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 599 r->req.sg, r->sector << BDRV_SECTOR_BITS, 600 BDRV_SECTOR_SIZE, 601 sdc->dma_writev, r, scsi_dma_complete, r, 602 DMA_DIRECTION_TO_DEVICE); 603 } else { 604 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 605 r->qiov.size, BLOCK_ACCT_WRITE); 606 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 607 scsi_write_complete, r, r); 608 } 609 } 610 611 /* Return a pointer to the data buffer. */ 612 static uint8_t *scsi_get_buf(SCSIRequest *req) 613 { 614 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 615 616 return (uint8_t *)r->iov.iov_base; 617 } 618 619 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 620 { 621 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 622 uint8_t page_code = req->cmd.buf[2]; 623 int start, buflen = 0; 624 625 outbuf[buflen++] = s->qdev.type & 0x1f; 626 outbuf[buflen++] = page_code; 627 outbuf[buflen++] = 0x00; 628 outbuf[buflen++] = 0x00; 629 start = buflen; 630 631 switch (page_code) { 632 case 0x00: /* Supported page codes, mandatory */ 633 { 634 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); 635 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 636 if (s->serial) { 637 outbuf[buflen++] = 0x80; /* unit serial number */ 638 } 639 outbuf[buflen++] = 0x83; /* device identification */ 640 if (s->qdev.type == TYPE_DISK) { 641 outbuf[buflen++] = 0xb0; /* block limits */ 642 outbuf[buflen++] = 0xb1; /* block device characteristics */ 643 outbuf[buflen++] = 0xb2; /* thin provisioning */ 644 } 645 break; 646 } 647 case 0x80: /* Device serial number, optional */ 648 { 649 int l; 650 651 if (!s->serial) { 652 trace_scsi_disk_emulate_vpd_page_80_not_supported(); 653 return -1; 654 } 655 656 l = strlen(s->serial); 657 if (l > 36) { 658 l = 36; 659 } 660 661 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); 662 memcpy(outbuf + buflen, s->serial, l); 663 buflen += l; 664 break; 665 } 666 667 case 0x83: /* Device identification page, mandatory */ 668 { 669 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; 670 671 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); 672 673 if (id_len) { 674 outbuf[buflen++] = 0x2; /* ASCII */ 675 outbuf[buflen++] = 0; /* not officially assigned */ 676 outbuf[buflen++] = 0; /* reserved */ 677 outbuf[buflen++] = id_len; /* length of data following */ 678 memcpy(outbuf + buflen, s->device_id, id_len); 679 buflen += id_len; 680 } 681 682 if (s->qdev.wwn) { 683 outbuf[buflen++] = 0x1; /* Binary */ 684 outbuf[buflen++] = 0x3; /* NAA */ 685 outbuf[buflen++] = 0; /* reserved */ 686 outbuf[buflen++] = 8; 687 stq_be_p(&outbuf[buflen], s->qdev.wwn); 688 buflen += 8; 689 } 690 691 if (s->qdev.port_wwn) { 692 outbuf[buflen++] = 0x61; /* SAS / Binary */ 693 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 694 outbuf[buflen++] = 0; /* reserved */ 695 outbuf[buflen++] = 8; 696 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 697 buflen += 8; 698 } 699 700 if (s->port_index) { 701 outbuf[buflen++] = 0x61; /* SAS / Binary */ 702 703 /* PIV/Target port/relative target port */ 704 outbuf[buflen++] = 0x94; 705 706 outbuf[buflen++] = 0; /* reserved */ 707 outbuf[buflen++] = 4; 708 stw_be_p(&outbuf[buflen + 2], s->port_index); 709 buflen += 4; 710 } 711 break; 712 } 713 case 0xb0: /* block limits */ 714 { 715 SCSIBlockLimits bl = {}; 716 717 if (s->qdev.type == TYPE_ROM) { 718 trace_scsi_disk_emulate_vpd_page_b0_not_supported(); 719 return -1; 720 } 721 bl.wsnz = 1; 722 bl.unmap_sectors = 723 s->qdev.conf.discard_granularity / s->qdev.blocksize; 724 bl.min_io_size = 725 s->qdev.conf.min_io_size / s->qdev.blocksize; 726 bl.opt_io_size = 727 s->qdev.conf.opt_io_size / s->qdev.blocksize; 728 bl.max_unmap_sectors = 729 s->max_unmap_size / s->qdev.blocksize; 730 bl.max_io_sectors = 731 s->max_io_size / s->qdev.blocksize; 732 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 733 bl.max_unmap_descr = 255; 734 735 if (s->qdev.type == TYPE_DISK) { 736 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 737 int max_io_sectors_blk = 738 max_transfer_blk / s->qdev.blocksize; 739 740 bl.max_io_sectors = 741 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 742 } 743 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 744 break; 745 } 746 case 0xb1: /* block device characteristics */ 747 { 748 buflen = 0x40; 749 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 750 outbuf[5] = s->rotation_rate & 0xff; 751 outbuf[6] = 0; /* PRODUCT TYPE */ 752 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 753 outbuf[8] = 0; /* VBULS */ 754 break; 755 } 756 case 0xb2: /* thin provisioning */ 757 { 758 buflen = 8; 759 outbuf[4] = 0; 760 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 761 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 762 outbuf[7] = 0; 763 break; 764 } 765 default: 766 return -1; 767 } 768 /* done with EVPD */ 769 assert(buflen - start <= 255); 770 outbuf[start - 1] = buflen - start; 771 return buflen; 772 } 773 774 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 775 { 776 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 777 int buflen = 0; 778 779 if (req->cmd.buf[1] & 0x1) { 780 /* Vital product data */ 781 return scsi_disk_emulate_vpd_page(req, outbuf); 782 } 783 784 /* Standard INQUIRY data */ 785 if (req->cmd.buf[2] != 0) { 786 return -1; 787 } 788 789 /* PAGE CODE == 0 */ 790 buflen = req->cmd.xfer; 791 if (buflen > SCSI_MAX_INQUIRY_LEN) { 792 buflen = SCSI_MAX_INQUIRY_LEN; 793 } 794 795 outbuf[0] = s->qdev.type & 0x1f; 796 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 797 798 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 799 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 800 801 memset(&outbuf[32], 0, 4); 802 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 803 /* 804 * We claim conformance to SPC-3, which is required for guests 805 * to ask for modern features like READ CAPACITY(16) or the 806 * block characteristics VPD page by default. Not all of SPC-3 807 * is actually implemented, but we're good enough. 808 */ 809 outbuf[2] = s->qdev.default_scsi_version; 810 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 811 812 if (buflen > 36) { 813 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 814 } else { 815 /* If the allocation length of CDB is too small, 816 the additional length is not adjusted */ 817 outbuf[4] = 36 - 5; 818 } 819 820 /* Sync data transfer and TCQ. */ 821 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 822 return buflen; 823 } 824 825 static inline bool media_is_dvd(SCSIDiskState *s) 826 { 827 uint64_t nb_sectors; 828 if (s->qdev.type != TYPE_ROM) { 829 return false; 830 } 831 if (!blk_is_available(s->qdev.conf.blk)) { 832 return false; 833 } 834 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 835 return nb_sectors > CD_MAX_SECTORS; 836 } 837 838 static inline bool media_is_cd(SCSIDiskState *s) 839 { 840 uint64_t nb_sectors; 841 if (s->qdev.type != TYPE_ROM) { 842 return false; 843 } 844 if (!blk_is_available(s->qdev.conf.blk)) { 845 return false; 846 } 847 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 848 return nb_sectors <= CD_MAX_SECTORS; 849 } 850 851 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 852 uint8_t *outbuf) 853 { 854 uint8_t type = r->req.cmd.buf[1] & 7; 855 856 if (s->qdev.type != TYPE_ROM) { 857 return -1; 858 } 859 860 /* Types 1/2 are only defined for Blu-Ray. */ 861 if (type != 0) { 862 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 863 return -1; 864 } 865 866 memset(outbuf, 0, 34); 867 outbuf[1] = 32; 868 outbuf[2] = 0xe; /* last session complete, disc finalized */ 869 outbuf[3] = 1; /* first track on disc */ 870 outbuf[4] = 1; /* # of sessions */ 871 outbuf[5] = 1; /* first track of last session */ 872 outbuf[6] = 1; /* last track of last session */ 873 outbuf[7] = 0x20; /* unrestricted use */ 874 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 875 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 876 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 877 /* 24-31: disc bar code */ 878 /* 32: disc application code */ 879 /* 33: number of OPC tables */ 880 881 return 34; 882 } 883 884 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 885 uint8_t *outbuf) 886 { 887 static const int rds_caps_size[5] = { 888 [0] = 2048 + 4, 889 [1] = 4 + 4, 890 [3] = 188 + 4, 891 [4] = 2048 + 4, 892 }; 893 894 uint8_t media = r->req.cmd.buf[1]; 895 uint8_t layer = r->req.cmd.buf[6]; 896 uint8_t format = r->req.cmd.buf[7]; 897 int size = -1; 898 899 if (s->qdev.type != TYPE_ROM) { 900 return -1; 901 } 902 if (media != 0) { 903 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 904 return -1; 905 } 906 907 if (format != 0xff) { 908 if (!blk_is_available(s->qdev.conf.blk)) { 909 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 910 return -1; 911 } 912 if (media_is_cd(s)) { 913 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 914 return -1; 915 } 916 if (format >= ARRAY_SIZE(rds_caps_size)) { 917 return -1; 918 } 919 size = rds_caps_size[format]; 920 memset(outbuf, 0, size); 921 } 922 923 switch (format) { 924 case 0x00: { 925 /* Physical format information */ 926 uint64_t nb_sectors; 927 if (layer != 0) { 928 goto fail; 929 } 930 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 931 932 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 933 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 934 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 935 outbuf[7] = 0; /* default densities */ 936 937 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 938 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 939 break; 940 } 941 942 case 0x01: /* DVD copyright information, all zeros */ 943 break; 944 945 case 0x03: /* BCA information - invalid field for no BCA info */ 946 return -1; 947 948 case 0x04: /* DVD disc manufacturing information, all zeros */ 949 break; 950 951 case 0xff: { /* List capabilities */ 952 int i; 953 size = 4; 954 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 955 if (!rds_caps_size[i]) { 956 continue; 957 } 958 outbuf[size] = i; 959 outbuf[size + 1] = 0x40; /* Not writable, readable */ 960 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 961 size += 4; 962 } 963 break; 964 } 965 966 default: 967 return -1; 968 } 969 970 /* Size of buffer, not including 2 byte size field */ 971 stw_be_p(outbuf, size - 2); 972 return size; 973 974 fail: 975 return -1; 976 } 977 978 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 979 { 980 uint8_t event_code, media_status; 981 982 media_status = 0; 983 if (s->tray_open) { 984 media_status = MS_TRAY_OPEN; 985 } else if (blk_is_inserted(s->qdev.conf.blk)) { 986 media_status = MS_MEDIA_PRESENT; 987 } 988 989 /* Event notification descriptor */ 990 event_code = MEC_NO_CHANGE; 991 if (media_status != MS_TRAY_OPEN) { 992 if (s->media_event) { 993 event_code = MEC_NEW_MEDIA; 994 s->media_event = false; 995 } else if (s->eject_request) { 996 event_code = MEC_EJECT_REQUESTED; 997 s->eject_request = false; 998 } 999 } 1000 1001 outbuf[0] = event_code; 1002 outbuf[1] = media_status; 1003 1004 /* These fields are reserved, just clear them. */ 1005 outbuf[2] = 0; 1006 outbuf[3] = 0; 1007 return 4; 1008 } 1009 1010 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 1011 uint8_t *outbuf) 1012 { 1013 int size; 1014 uint8_t *buf = r->req.cmd.buf; 1015 uint8_t notification_class_request = buf[4]; 1016 if (s->qdev.type != TYPE_ROM) { 1017 return -1; 1018 } 1019 if ((buf[1] & 1) == 0) { 1020 /* asynchronous */ 1021 return -1; 1022 } 1023 1024 size = 4; 1025 outbuf[0] = outbuf[1] = 0; 1026 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1027 if (notification_class_request & (1 << GESN_MEDIA)) { 1028 outbuf[2] = GESN_MEDIA; 1029 size += scsi_event_status_media(s, &outbuf[size]); 1030 } else { 1031 outbuf[2] = 0x80; 1032 } 1033 stw_be_p(outbuf, size - 4); 1034 return size; 1035 } 1036 1037 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1038 { 1039 int current; 1040 1041 if (s->qdev.type != TYPE_ROM) { 1042 return -1; 1043 } 1044 1045 if (media_is_dvd(s)) { 1046 current = MMC_PROFILE_DVD_ROM; 1047 } else if (media_is_cd(s)) { 1048 current = MMC_PROFILE_CD_ROM; 1049 } else { 1050 current = MMC_PROFILE_NONE; 1051 } 1052 1053 memset(outbuf, 0, 40); 1054 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1055 stw_be_p(&outbuf[6], current); 1056 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1057 outbuf[10] = 0x03; /* persistent, current */ 1058 outbuf[11] = 8; /* two profiles */ 1059 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1060 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1061 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1062 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1063 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1064 stw_be_p(&outbuf[20], 1); 1065 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1066 outbuf[23] = 8; 1067 stl_be_p(&outbuf[24], 1); /* SCSI */ 1068 outbuf[28] = 1; /* DBE = 1, mandatory */ 1069 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1070 stw_be_p(&outbuf[32], 3); 1071 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1072 outbuf[35] = 4; 1073 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1074 /* TODO: Random readable, CD read, DVD read, drive serial number, 1075 power management */ 1076 return 40; 1077 } 1078 1079 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1080 { 1081 if (s->qdev.type != TYPE_ROM) { 1082 return -1; 1083 } 1084 memset(outbuf, 0, 8); 1085 outbuf[5] = 1; /* CD-ROM */ 1086 return 8; 1087 } 1088 1089 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1090 int page_control) 1091 { 1092 static const int mode_sense_valid[0x3f] = { 1093 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1094 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1095 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1096 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1097 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1098 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1099 }; 1100 1101 uint8_t *p = *p_outbuf + 2; 1102 int length; 1103 1104 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1105 return -1; 1106 } 1107 1108 /* 1109 * If Changeable Values are requested, a mask denoting those mode parameters 1110 * that are changeable shall be returned. As we currently don't support 1111 * parameter changes via MODE_SELECT all bits are returned set to zero. 1112 * The buffer was already menset to zero by the caller of this function. 1113 * 1114 * The offsets here are off by two compared to the descriptions in the 1115 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1116 * but it is done so that offsets are consistent within our implementation 1117 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1118 * 2-byte and 4-byte headers. 1119 */ 1120 switch (page) { 1121 case MODE_PAGE_HD_GEOMETRY: 1122 length = 0x16; 1123 if (page_control == 1) { /* Changeable Values */ 1124 break; 1125 } 1126 /* if a geometry hint is available, use it */ 1127 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1128 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1129 p[2] = s->qdev.conf.cyls & 0xff; 1130 p[3] = s->qdev.conf.heads & 0xff; 1131 /* Write precomp start cylinder, disabled */ 1132 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1133 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1134 p[6] = s->qdev.conf.cyls & 0xff; 1135 /* Reduced current start cylinder, disabled */ 1136 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1137 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1138 p[9] = s->qdev.conf.cyls & 0xff; 1139 /* Device step rate [ns], 200ns */ 1140 p[10] = 0; 1141 p[11] = 200; 1142 /* Landing zone cylinder */ 1143 p[12] = 0xff; 1144 p[13] = 0xff; 1145 p[14] = 0xff; 1146 /* Medium rotation rate [rpm], 5400 rpm */ 1147 p[18] = (5400 >> 8) & 0xff; 1148 p[19] = 5400 & 0xff; 1149 break; 1150 1151 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1152 length = 0x1e; 1153 if (page_control == 1) { /* Changeable Values */ 1154 break; 1155 } 1156 /* Transfer rate [kbit/s], 5Mbit/s */ 1157 p[0] = 5000 >> 8; 1158 p[1] = 5000 & 0xff; 1159 /* if a geometry hint is available, use it */ 1160 p[2] = s->qdev.conf.heads & 0xff; 1161 p[3] = s->qdev.conf.secs & 0xff; 1162 p[4] = s->qdev.blocksize >> 8; 1163 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1164 p[7] = s->qdev.conf.cyls & 0xff; 1165 /* Write precomp start cylinder, disabled */ 1166 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1167 p[9] = s->qdev.conf.cyls & 0xff; 1168 /* Reduced current start cylinder, disabled */ 1169 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1170 p[11] = s->qdev.conf.cyls & 0xff; 1171 /* Device step rate [100us], 100us */ 1172 p[12] = 0; 1173 p[13] = 1; 1174 /* Device step pulse width [us], 1us */ 1175 p[14] = 1; 1176 /* Device head settle delay [100us], 100us */ 1177 p[15] = 0; 1178 p[16] = 1; 1179 /* Motor on delay [0.1s], 0.1s */ 1180 p[17] = 1; 1181 /* Motor off delay [0.1s], 0.1s */ 1182 p[18] = 1; 1183 /* Medium rotation rate [rpm], 5400 rpm */ 1184 p[26] = (5400 >> 8) & 0xff; 1185 p[27] = 5400 & 0xff; 1186 break; 1187 1188 case MODE_PAGE_CACHING: 1189 length = 0x12; 1190 if (page_control == 1 || /* Changeable Values */ 1191 blk_enable_write_cache(s->qdev.conf.blk)) { 1192 p[0] = 4; /* WCE */ 1193 } 1194 break; 1195 1196 case MODE_PAGE_R_W_ERROR: 1197 length = 10; 1198 if (page_control == 1) { /* Changeable Values */ 1199 break; 1200 } 1201 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1202 if (s->qdev.type == TYPE_ROM) { 1203 p[1] = 0x20; /* Read Retry Count */ 1204 } 1205 break; 1206 1207 case MODE_PAGE_AUDIO_CTL: 1208 length = 14; 1209 break; 1210 1211 case MODE_PAGE_CAPABILITIES: 1212 length = 0x14; 1213 if (page_control == 1) { /* Changeable Values */ 1214 break; 1215 } 1216 1217 p[0] = 0x3b; /* CD-R & CD-RW read */ 1218 p[1] = 0; /* Writing not supported */ 1219 p[2] = 0x7f; /* Audio, composite, digital out, 1220 mode 2 form 1&2, multi session */ 1221 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1222 RW corrected, C2 errors, ISRC, 1223 UPC, Bar code */ 1224 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1225 /* Locking supported, jumper present, eject, tray */ 1226 p[5] = 0; /* no volume & mute control, no 1227 changer */ 1228 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1229 p[7] = (50 * 176) & 0xff; 1230 p[8] = 2 >> 8; /* Two volume levels */ 1231 p[9] = 2 & 0xff; 1232 p[10] = 2048 >> 8; /* 2M buffer */ 1233 p[11] = 2048 & 0xff; 1234 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1235 p[13] = (16 * 176) & 0xff; 1236 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1237 p[17] = (16 * 176) & 0xff; 1238 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1239 p[19] = (16 * 176) & 0xff; 1240 break; 1241 1242 default: 1243 return -1; 1244 } 1245 1246 assert(length < 256); 1247 (*p_outbuf)[0] = page; 1248 (*p_outbuf)[1] = length; 1249 *p_outbuf += length + 2; 1250 return length + 2; 1251 } 1252 1253 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1254 { 1255 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1256 uint64_t nb_sectors; 1257 bool dbd; 1258 int page, buflen, ret, page_control; 1259 uint8_t *p; 1260 uint8_t dev_specific_param; 1261 1262 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1263 page = r->req.cmd.buf[2] & 0x3f; 1264 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1265 1266 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 1267 10, page, r->req.cmd.xfer, page_control); 1268 memset(outbuf, 0, r->req.cmd.xfer); 1269 p = outbuf; 1270 1271 if (s->qdev.type == TYPE_DISK) { 1272 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1273 if (blk_is_read_only(s->qdev.conf.blk)) { 1274 dev_specific_param |= 0x80; /* Readonly. */ 1275 } 1276 } else { 1277 /* MMC prescribes that CD/DVD drives have no block descriptors, 1278 * and defines no device-specific parameter. */ 1279 dev_specific_param = 0x00; 1280 dbd = true; 1281 } 1282 1283 if (r->req.cmd.buf[0] == MODE_SENSE) { 1284 p[1] = 0; /* Default media type. */ 1285 p[2] = dev_specific_param; 1286 p[3] = 0; /* Block descriptor length. */ 1287 p += 4; 1288 } else { /* MODE_SENSE_10 */ 1289 p[2] = 0; /* Default media type. */ 1290 p[3] = dev_specific_param; 1291 p[6] = p[7] = 0; /* Block descriptor length. */ 1292 p += 8; 1293 } 1294 1295 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1296 if (!dbd && nb_sectors) { 1297 if (r->req.cmd.buf[0] == MODE_SENSE) { 1298 outbuf[3] = 8; /* Block descriptor length */ 1299 } else { /* MODE_SENSE_10 */ 1300 outbuf[7] = 8; /* Block descriptor length */ 1301 } 1302 nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1303 if (nb_sectors > 0xffffff) { 1304 nb_sectors = 0; 1305 } 1306 p[0] = 0; /* media density code */ 1307 p[1] = (nb_sectors >> 16) & 0xff; 1308 p[2] = (nb_sectors >> 8) & 0xff; 1309 p[3] = nb_sectors & 0xff; 1310 p[4] = 0; /* reserved */ 1311 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1312 p[6] = s->qdev.blocksize >> 8; 1313 p[7] = 0; 1314 p += 8; 1315 } 1316 1317 if (page_control == 3) { 1318 /* Saved Values */ 1319 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1320 return -1; 1321 } 1322 1323 if (page == 0x3f) { 1324 for (page = 0; page <= 0x3e; page++) { 1325 mode_sense_page(s, page, &p, page_control); 1326 } 1327 } else { 1328 ret = mode_sense_page(s, page, &p, page_control); 1329 if (ret == -1) { 1330 return -1; 1331 } 1332 } 1333 1334 buflen = p - outbuf; 1335 /* 1336 * The mode data length field specifies the length in bytes of the 1337 * following data that is available to be transferred. The mode data 1338 * length does not include itself. 1339 */ 1340 if (r->req.cmd.buf[0] == MODE_SENSE) { 1341 outbuf[0] = buflen - 1; 1342 } else { /* MODE_SENSE_10 */ 1343 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1344 outbuf[1] = (buflen - 2) & 0xff; 1345 } 1346 return buflen; 1347 } 1348 1349 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1350 { 1351 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1352 int start_track, format, msf, toclen; 1353 uint64_t nb_sectors; 1354 1355 msf = req->cmd.buf[1] & 2; 1356 format = req->cmd.buf[2] & 0xf; 1357 start_track = req->cmd.buf[6]; 1358 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1359 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); 1360 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 1361 switch (format) { 1362 case 0: 1363 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1364 break; 1365 case 1: 1366 /* multi session : only a single session defined */ 1367 toclen = 12; 1368 memset(outbuf, 0, 12); 1369 outbuf[1] = 0x0a; 1370 outbuf[2] = 0x01; 1371 outbuf[3] = 0x01; 1372 break; 1373 case 2: 1374 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1375 break; 1376 default: 1377 return -1; 1378 } 1379 return toclen; 1380 } 1381 1382 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1383 { 1384 SCSIRequest *req = &r->req; 1385 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1386 bool start = req->cmd.buf[4] & 1; 1387 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1388 int pwrcnd = req->cmd.buf[4] & 0xf0; 1389 1390 if (pwrcnd) { 1391 /* eject/load only happens for power condition == 0 */ 1392 return 0; 1393 } 1394 1395 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1396 if (!start && !s->tray_open && s->tray_locked) { 1397 scsi_check_condition(r, 1398 blk_is_inserted(s->qdev.conf.blk) 1399 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1400 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1401 return -1; 1402 } 1403 1404 if (s->tray_open != !start) { 1405 blk_eject(s->qdev.conf.blk, !start); 1406 s->tray_open = !start; 1407 } 1408 } 1409 return 0; 1410 } 1411 1412 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1413 { 1414 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1415 int buflen = r->iov.iov_len; 1416 1417 if (buflen) { 1418 trace_scsi_disk_emulate_read_data(buflen); 1419 r->iov.iov_len = 0; 1420 r->started = true; 1421 scsi_req_data(&r->req, buflen); 1422 return; 1423 } 1424 1425 /* This also clears the sense buffer for REQUEST SENSE. */ 1426 scsi_req_complete(&r->req, GOOD); 1427 } 1428 1429 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1430 uint8_t *inbuf, int inlen) 1431 { 1432 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1433 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1434 uint8_t *p; 1435 int len, expected_len, changeable_len, i; 1436 1437 /* The input buffer does not include the page header, so it is 1438 * off by 2 bytes. 1439 */ 1440 expected_len = inlen + 2; 1441 if (expected_len > SCSI_MAX_MODE_LEN) { 1442 return -1; 1443 } 1444 1445 p = mode_current; 1446 memset(mode_current, 0, inlen + 2); 1447 len = mode_sense_page(s, page, &p, 0); 1448 if (len < 0 || len != expected_len) { 1449 return -1; 1450 } 1451 1452 p = mode_changeable; 1453 memset(mode_changeable, 0, inlen + 2); 1454 changeable_len = mode_sense_page(s, page, &p, 1); 1455 assert(changeable_len == len); 1456 1457 /* Check that unchangeable bits are the same as what MODE SENSE 1458 * would return. 1459 */ 1460 for (i = 2; i < len; i++) { 1461 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1462 return -1; 1463 } 1464 } 1465 return 0; 1466 } 1467 1468 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1469 { 1470 switch (page) { 1471 case MODE_PAGE_CACHING: 1472 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1473 break; 1474 1475 default: 1476 break; 1477 } 1478 } 1479 1480 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1481 { 1482 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1483 1484 while (len > 0) { 1485 int page, subpage, page_len; 1486 1487 /* Parse both possible formats for the mode page headers. */ 1488 page = p[0] & 0x3f; 1489 if (p[0] & 0x40) { 1490 if (len < 4) { 1491 goto invalid_param_len; 1492 } 1493 subpage = p[1]; 1494 page_len = lduw_be_p(&p[2]); 1495 p += 4; 1496 len -= 4; 1497 } else { 1498 if (len < 2) { 1499 goto invalid_param_len; 1500 } 1501 subpage = 0; 1502 page_len = p[1]; 1503 p += 2; 1504 len -= 2; 1505 } 1506 1507 if (subpage) { 1508 goto invalid_param; 1509 } 1510 if (page_len > len) { 1511 goto invalid_param_len; 1512 } 1513 1514 if (!change) { 1515 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1516 goto invalid_param; 1517 } 1518 } else { 1519 scsi_disk_apply_mode_select(s, page, p); 1520 } 1521 1522 p += page_len; 1523 len -= page_len; 1524 } 1525 return 0; 1526 1527 invalid_param: 1528 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1529 return -1; 1530 1531 invalid_param_len: 1532 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1533 return -1; 1534 } 1535 1536 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1537 { 1538 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1539 uint8_t *p = inbuf; 1540 int cmd = r->req.cmd.buf[0]; 1541 int len = r->req.cmd.xfer; 1542 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1543 int bd_len; 1544 int pass; 1545 1546 /* We only support PF=1, SP=0. */ 1547 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1548 goto invalid_field; 1549 } 1550 1551 if (len < hdr_len) { 1552 goto invalid_param_len; 1553 } 1554 1555 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1556 len -= hdr_len; 1557 p += hdr_len; 1558 if (len < bd_len) { 1559 goto invalid_param_len; 1560 } 1561 if (bd_len != 0 && bd_len != 8) { 1562 goto invalid_param; 1563 } 1564 1565 len -= bd_len; 1566 p += bd_len; 1567 1568 /* Ensure no change is made if there is an error! */ 1569 for (pass = 0; pass < 2; pass++) { 1570 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1571 assert(pass == 0); 1572 return; 1573 } 1574 } 1575 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1576 /* The request is used as the AIO opaque value, so add a ref. */ 1577 scsi_req_ref(&r->req); 1578 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1579 BLOCK_ACCT_FLUSH); 1580 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1581 return; 1582 } 1583 1584 scsi_req_complete(&r->req, GOOD); 1585 return; 1586 1587 invalid_param: 1588 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1589 return; 1590 1591 invalid_param_len: 1592 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1593 return; 1594 1595 invalid_field: 1596 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1597 } 1598 1599 static inline bool check_lba_range(SCSIDiskState *s, 1600 uint64_t sector_num, uint32_t nb_sectors) 1601 { 1602 /* 1603 * The first line tests that no overflow happens when computing the last 1604 * sector. The second line tests that the last accessed sector is in 1605 * range. 1606 * 1607 * Careful, the computations should not underflow for nb_sectors == 0, 1608 * and a 0-block read to the first LBA beyond the end of device is 1609 * valid. 1610 */ 1611 return (sector_num <= sector_num + nb_sectors && 1612 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1613 } 1614 1615 typedef struct UnmapCBData { 1616 SCSIDiskReq *r; 1617 uint8_t *inbuf; 1618 int count; 1619 } UnmapCBData; 1620 1621 static void scsi_unmap_complete(void *opaque, int ret); 1622 1623 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1624 { 1625 SCSIDiskReq *r = data->r; 1626 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1627 1628 assert(r->req.aiocb == NULL); 1629 1630 if (data->count > 0) { 1631 r->sector = ldq_be_p(&data->inbuf[0]) 1632 * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1633 r->sector_count = (ldl_be_p(&data->inbuf[8]) & 0xffffffffULL) 1634 * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1635 if (!check_lba_range(s, r->sector, r->sector_count)) { 1636 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), 1637 BLOCK_ACCT_UNMAP); 1638 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1639 goto done; 1640 } 1641 1642 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1643 r->sector_count * BDRV_SECTOR_SIZE, 1644 BLOCK_ACCT_UNMAP); 1645 1646 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1647 r->sector * BDRV_SECTOR_SIZE, 1648 r->sector_count * BDRV_SECTOR_SIZE, 1649 scsi_unmap_complete, data); 1650 data->count--; 1651 data->inbuf += 16; 1652 return; 1653 } 1654 1655 scsi_req_complete(&r->req, GOOD); 1656 1657 done: 1658 scsi_req_unref(&r->req); 1659 g_free(data); 1660 } 1661 1662 static void scsi_unmap_complete(void *opaque, int ret) 1663 { 1664 UnmapCBData *data = opaque; 1665 SCSIDiskReq *r = data->r; 1666 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1667 1668 assert(r->req.aiocb != NULL); 1669 r->req.aiocb = NULL; 1670 1671 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1672 if (scsi_disk_req_check_error(r, ret, true)) { 1673 scsi_req_unref(&r->req); 1674 g_free(data); 1675 } else { 1676 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1677 scsi_unmap_complete_noio(data, ret); 1678 } 1679 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1680 } 1681 1682 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1683 { 1684 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1685 uint8_t *p = inbuf; 1686 int len = r->req.cmd.xfer; 1687 UnmapCBData *data; 1688 1689 /* Reject ANCHOR=1. */ 1690 if (r->req.cmd.buf[1] & 0x1) { 1691 goto invalid_field; 1692 } 1693 1694 if (len < 8) { 1695 goto invalid_param_len; 1696 } 1697 if (len < lduw_be_p(&p[0]) + 2) { 1698 goto invalid_param_len; 1699 } 1700 if (len < lduw_be_p(&p[2]) + 8) { 1701 goto invalid_param_len; 1702 } 1703 if (lduw_be_p(&p[2]) & 15) { 1704 goto invalid_param_len; 1705 } 1706 1707 if (blk_is_read_only(s->qdev.conf.blk)) { 1708 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1709 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1710 return; 1711 } 1712 1713 data = g_new0(UnmapCBData, 1); 1714 data->r = r; 1715 data->inbuf = &p[8]; 1716 data->count = lduw_be_p(&p[2]) >> 4; 1717 1718 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1719 scsi_req_ref(&r->req); 1720 scsi_unmap_complete_noio(data, 0); 1721 return; 1722 1723 invalid_param_len: 1724 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1725 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1726 return; 1727 1728 invalid_field: 1729 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1730 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1731 } 1732 1733 typedef struct WriteSameCBData { 1734 SCSIDiskReq *r; 1735 int64_t sector; 1736 int nb_sectors; 1737 QEMUIOVector qiov; 1738 struct iovec iov; 1739 } WriteSameCBData; 1740 1741 static void scsi_write_same_complete(void *opaque, int ret) 1742 { 1743 WriteSameCBData *data = opaque; 1744 SCSIDiskReq *r = data->r; 1745 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1746 1747 assert(r->req.aiocb != NULL); 1748 r->req.aiocb = NULL; 1749 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1750 if (scsi_disk_req_check_error(r, ret, true)) { 1751 goto done; 1752 } 1753 1754 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1755 1756 data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE; 1757 data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE; 1758 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, 1759 data->iov.iov_len); 1760 if (data->iov.iov_len) { 1761 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1762 data->iov.iov_len, BLOCK_ACCT_WRITE); 1763 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1764 * where final qiov may need smaller size */ 1765 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1766 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1767 data->sector << BDRV_SECTOR_BITS, 1768 &data->qiov, 0, 1769 scsi_write_same_complete, data); 1770 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1771 return; 1772 } 1773 1774 scsi_req_complete(&r->req, GOOD); 1775 1776 done: 1777 scsi_req_unref(&r->req); 1778 qemu_vfree(data->iov.iov_base); 1779 g_free(data); 1780 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1781 } 1782 1783 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1784 { 1785 SCSIRequest *req = &r->req; 1786 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1787 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1788 WriteSameCBData *data; 1789 uint8_t *buf; 1790 int i; 1791 1792 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1793 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1794 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1795 return; 1796 } 1797 1798 if (blk_is_read_only(s->qdev.conf.blk)) { 1799 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1800 return; 1801 } 1802 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1803 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1804 return; 1805 } 1806 1807 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1808 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1809 1810 /* The request is used as the AIO opaque value, so add a ref. */ 1811 scsi_req_ref(&r->req); 1812 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1813 nb_sectors * s->qdev.blocksize, 1814 BLOCK_ACCT_WRITE); 1815 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1816 r->req.cmd.lba * s->qdev.blocksize, 1817 nb_sectors * s->qdev.blocksize, 1818 flags, scsi_aio_complete, r); 1819 return; 1820 } 1821 1822 data = g_new0(WriteSameCBData, 1); 1823 data->r = r; 1824 data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1825 data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1826 data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE, 1827 SCSI_WRITE_SAME_MAX); 1828 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1829 data->iov.iov_len); 1830 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1831 1832 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1833 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1834 } 1835 1836 scsi_req_ref(&r->req); 1837 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1838 data->iov.iov_len, BLOCK_ACCT_WRITE); 1839 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1840 data->sector << BDRV_SECTOR_BITS, 1841 &data->qiov, 0, 1842 scsi_write_same_complete, data); 1843 } 1844 1845 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1846 { 1847 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1848 1849 if (r->iov.iov_len) { 1850 int buflen = r->iov.iov_len; 1851 trace_scsi_disk_emulate_write_data(buflen); 1852 r->iov.iov_len = 0; 1853 scsi_req_data(&r->req, buflen); 1854 return; 1855 } 1856 1857 switch (req->cmd.buf[0]) { 1858 case MODE_SELECT: 1859 case MODE_SELECT_10: 1860 /* This also clears the sense buffer for REQUEST SENSE. */ 1861 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1862 break; 1863 1864 case UNMAP: 1865 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1866 break; 1867 1868 case VERIFY_10: 1869 case VERIFY_12: 1870 case VERIFY_16: 1871 if (r->req.status == -1) { 1872 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1873 } 1874 break; 1875 1876 case WRITE_SAME_10: 1877 case WRITE_SAME_16: 1878 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1879 break; 1880 1881 default: 1882 abort(); 1883 } 1884 } 1885 1886 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1887 { 1888 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1889 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1890 uint64_t nb_sectors; 1891 uint8_t *outbuf; 1892 int buflen; 1893 1894 switch (req->cmd.buf[0]) { 1895 case INQUIRY: 1896 case MODE_SENSE: 1897 case MODE_SENSE_10: 1898 case RESERVE: 1899 case RESERVE_10: 1900 case RELEASE: 1901 case RELEASE_10: 1902 case START_STOP: 1903 case ALLOW_MEDIUM_REMOVAL: 1904 case GET_CONFIGURATION: 1905 case GET_EVENT_STATUS_NOTIFICATION: 1906 case MECHANISM_STATUS: 1907 case REQUEST_SENSE: 1908 break; 1909 1910 default: 1911 if (!blk_is_available(s->qdev.conf.blk)) { 1912 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1913 return 0; 1914 } 1915 break; 1916 } 1917 1918 /* 1919 * FIXME: we shouldn't return anything bigger than 4k, but the code 1920 * requires the buffer to be as big as req->cmd.xfer in several 1921 * places. So, do not allow CDBs with a very large ALLOCATION 1922 * LENGTH. The real fix would be to modify scsi_read_data and 1923 * dma_buf_read, so that they return data beyond the buflen 1924 * as all zeros. 1925 */ 1926 if (req->cmd.xfer > 65536) { 1927 goto illegal_request; 1928 } 1929 r->buflen = MAX(4096, req->cmd.xfer); 1930 1931 if (!r->iov.iov_base) { 1932 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1933 } 1934 1935 outbuf = r->iov.iov_base; 1936 memset(outbuf, 0, r->buflen); 1937 switch (req->cmd.buf[0]) { 1938 case TEST_UNIT_READY: 1939 assert(blk_is_available(s->qdev.conf.blk)); 1940 break; 1941 case INQUIRY: 1942 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1943 if (buflen < 0) { 1944 goto illegal_request; 1945 } 1946 break; 1947 case MODE_SENSE: 1948 case MODE_SENSE_10: 1949 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1950 if (buflen < 0) { 1951 goto illegal_request; 1952 } 1953 break; 1954 case READ_TOC: 1955 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1956 if (buflen < 0) { 1957 goto illegal_request; 1958 } 1959 break; 1960 case RESERVE: 1961 if (req->cmd.buf[1] & 1) { 1962 goto illegal_request; 1963 } 1964 break; 1965 case RESERVE_10: 1966 if (req->cmd.buf[1] & 3) { 1967 goto illegal_request; 1968 } 1969 break; 1970 case RELEASE: 1971 if (req->cmd.buf[1] & 1) { 1972 goto illegal_request; 1973 } 1974 break; 1975 case RELEASE_10: 1976 if (req->cmd.buf[1] & 3) { 1977 goto illegal_request; 1978 } 1979 break; 1980 case START_STOP: 1981 if (scsi_disk_emulate_start_stop(r) < 0) { 1982 return 0; 1983 } 1984 break; 1985 case ALLOW_MEDIUM_REMOVAL: 1986 s->tray_locked = req->cmd.buf[4] & 1; 1987 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1988 break; 1989 case READ_CAPACITY_10: 1990 /* The normal LEN field for this command is zero. */ 1991 memset(outbuf, 0, 8); 1992 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1993 if (!nb_sectors) { 1994 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1995 return 0; 1996 } 1997 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1998 goto illegal_request; 1999 } 2000 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2001 /* Returned value is the address of the last sector. */ 2002 nb_sectors--; 2003 /* Remember the new size for read/write sanity checking. */ 2004 s->qdev.max_lba = nb_sectors; 2005 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 2006 if (nb_sectors > UINT32_MAX) { 2007 nb_sectors = UINT32_MAX; 2008 } 2009 outbuf[0] = (nb_sectors >> 24) & 0xff; 2010 outbuf[1] = (nb_sectors >> 16) & 0xff; 2011 outbuf[2] = (nb_sectors >> 8) & 0xff; 2012 outbuf[3] = nb_sectors & 0xff; 2013 outbuf[4] = 0; 2014 outbuf[5] = 0; 2015 outbuf[6] = s->qdev.blocksize >> 8; 2016 outbuf[7] = 0; 2017 break; 2018 case REQUEST_SENSE: 2019 /* Just return "NO SENSE". */ 2020 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2021 (req->cmd.buf[1] & 1) == 0); 2022 if (buflen < 0) { 2023 goto illegal_request; 2024 } 2025 break; 2026 case MECHANISM_STATUS: 2027 buflen = scsi_emulate_mechanism_status(s, outbuf); 2028 if (buflen < 0) { 2029 goto illegal_request; 2030 } 2031 break; 2032 case GET_CONFIGURATION: 2033 buflen = scsi_get_configuration(s, outbuf); 2034 if (buflen < 0) { 2035 goto illegal_request; 2036 } 2037 break; 2038 case GET_EVENT_STATUS_NOTIFICATION: 2039 buflen = scsi_get_event_status_notification(s, r, outbuf); 2040 if (buflen < 0) { 2041 goto illegal_request; 2042 } 2043 break; 2044 case READ_DISC_INFORMATION: 2045 buflen = scsi_read_disc_information(s, r, outbuf); 2046 if (buflen < 0) { 2047 goto illegal_request; 2048 } 2049 break; 2050 case READ_DVD_STRUCTURE: 2051 buflen = scsi_read_dvd_structure(s, r, outbuf); 2052 if (buflen < 0) { 2053 goto illegal_request; 2054 } 2055 break; 2056 case SERVICE_ACTION_IN_16: 2057 /* Service Action In subcommands. */ 2058 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2059 trace_scsi_disk_emulate_command_SAI_16(); 2060 memset(outbuf, 0, req->cmd.xfer); 2061 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2062 if (!nb_sectors) { 2063 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2064 return 0; 2065 } 2066 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2067 goto illegal_request; 2068 } 2069 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2070 /* Returned value is the address of the last sector. */ 2071 nb_sectors--; 2072 /* Remember the new size for read/write sanity checking. */ 2073 s->qdev.max_lba = nb_sectors; 2074 outbuf[0] = (nb_sectors >> 56) & 0xff; 2075 outbuf[1] = (nb_sectors >> 48) & 0xff; 2076 outbuf[2] = (nb_sectors >> 40) & 0xff; 2077 outbuf[3] = (nb_sectors >> 32) & 0xff; 2078 outbuf[4] = (nb_sectors >> 24) & 0xff; 2079 outbuf[5] = (nb_sectors >> 16) & 0xff; 2080 outbuf[6] = (nb_sectors >> 8) & 0xff; 2081 outbuf[7] = nb_sectors & 0xff; 2082 outbuf[8] = 0; 2083 outbuf[9] = 0; 2084 outbuf[10] = s->qdev.blocksize >> 8; 2085 outbuf[11] = 0; 2086 outbuf[12] = 0; 2087 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2088 2089 /* set TPE bit if the format supports discard */ 2090 if (s->qdev.conf.discard_granularity) { 2091 outbuf[14] = 0x80; 2092 } 2093 2094 /* Protection, exponent and lowest lba field left blank. */ 2095 break; 2096 } 2097 trace_scsi_disk_emulate_command_SAI_unsupported(); 2098 goto illegal_request; 2099 case SYNCHRONIZE_CACHE: 2100 /* The request is used as the AIO opaque value, so add a ref. */ 2101 scsi_req_ref(&r->req); 2102 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2103 BLOCK_ACCT_FLUSH); 2104 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2105 return 0; 2106 case SEEK_10: 2107 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); 2108 if (r->req.cmd.lba > s->qdev.max_lba) { 2109 goto illegal_lba; 2110 } 2111 break; 2112 case MODE_SELECT: 2113 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); 2114 break; 2115 case MODE_SELECT_10: 2116 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 2117 break; 2118 case UNMAP: 2119 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); 2120 break; 2121 case VERIFY_10: 2122 case VERIFY_12: 2123 case VERIFY_16: 2124 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 2125 if (req->cmd.buf[1] & 6) { 2126 goto illegal_request; 2127 } 2128 break; 2129 case WRITE_SAME_10: 2130 case WRITE_SAME_16: 2131 trace_scsi_disk_emulate_command_WRITE_SAME( 2132 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); 2133 break; 2134 default: 2135 trace_scsi_disk_emulate_command_UNKNOWN(buf[0], 2136 scsi_command_name(buf[0])); 2137 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2138 return 0; 2139 } 2140 assert(!r->req.aiocb); 2141 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2142 if (r->iov.iov_len == 0) { 2143 scsi_req_complete(&r->req, GOOD); 2144 } 2145 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2146 assert(r->iov.iov_len == req->cmd.xfer); 2147 return -r->iov.iov_len; 2148 } else { 2149 return r->iov.iov_len; 2150 } 2151 2152 illegal_request: 2153 if (r->req.status == -1) { 2154 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2155 } 2156 return 0; 2157 2158 illegal_lba: 2159 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2160 return 0; 2161 } 2162 2163 /* Execute a scsi command. Returns the length of the data expected by the 2164 command. This will be Positive for data transfers from the device 2165 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2166 and zero if the command does not transfer any data. */ 2167 2168 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2169 { 2170 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2171 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2172 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2173 uint32_t len; 2174 uint8_t command; 2175 2176 command = buf[0]; 2177 2178 if (!blk_is_available(s->qdev.conf.blk)) { 2179 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2180 return 0; 2181 } 2182 2183 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2184 switch (command) { 2185 case READ_6: 2186 case READ_10: 2187 case READ_12: 2188 case READ_16: 2189 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); 2190 /* Protection information is not supported. For SCSI versions 2 and 2191 * older (as determined by snooping the guest's INQUIRY commands), 2192 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2193 */ 2194 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2195 goto illegal_request; 2196 } 2197 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2198 goto illegal_lba; 2199 } 2200 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2201 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2202 break; 2203 case WRITE_6: 2204 case WRITE_10: 2205 case WRITE_12: 2206 case WRITE_16: 2207 case WRITE_VERIFY_10: 2208 case WRITE_VERIFY_12: 2209 case WRITE_VERIFY_16: 2210 if (blk_is_read_only(s->qdev.conf.blk)) { 2211 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2212 return 0; 2213 } 2214 trace_scsi_disk_dma_command_WRITE( 2215 (command & 0xe) == 0xe ? "And Verify " : "", 2216 r->req.cmd.lba, len); 2217 /* fall through */ 2218 case VERIFY_10: 2219 case VERIFY_12: 2220 case VERIFY_16: 2221 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2222 * As far as DMA is concerned, we can treat it the same as a write; 2223 * scsi_block_do_sgio will send VERIFY commands. 2224 */ 2225 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2226 goto illegal_request; 2227 } 2228 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2229 goto illegal_lba; 2230 } 2231 r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2232 r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 2233 break; 2234 default: 2235 abort(); 2236 illegal_request: 2237 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2238 return 0; 2239 illegal_lba: 2240 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2241 return 0; 2242 } 2243 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2244 if (r->sector_count == 0) { 2245 scsi_req_complete(&r->req, GOOD); 2246 } 2247 assert(r->iov.iov_len == 0); 2248 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2249 return -r->sector_count * BDRV_SECTOR_SIZE; 2250 } else { 2251 return r->sector_count * BDRV_SECTOR_SIZE; 2252 } 2253 } 2254 2255 static void scsi_disk_reset(DeviceState *dev) 2256 { 2257 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2258 uint64_t nb_sectors; 2259 2260 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2261 2262 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2263 nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE; 2264 if (nb_sectors) { 2265 nb_sectors--; 2266 } 2267 s->qdev.max_lba = nb_sectors; 2268 /* reset tray statuses */ 2269 s->tray_locked = 0; 2270 s->tray_open = 0; 2271 2272 s->qdev.scsi_version = s->qdev.default_scsi_version; 2273 } 2274 2275 static void scsi_disk_resize_cb(void *opaque) 2276 { 2277 SCSIDiskState *s = opaque; 2278 2279 /* SPC lists this sense code as available only for 2280 * direct-access devices. 2281 */ 2282 if (s->qdev.type == TYPE_DISK) { 2283 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2284 } 2285 } 2286 2287 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2288 { 2289 SCSIDiskState *s = opaque; 2290 2291 /* 2292 * When a CD gets changed, we have to report an ejected state and 2293 * then a loaded state to guests so that they detect tray 2294 * open/close and media change events. Guests that do not use 2295 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2296 * states rely on this behavior. 2297 * 2298 * media_changed governs the state machine used for unit attention 2299 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2300 */ 2301 s->media_changed = load; 2302 s->tray_open = !load; 2303 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2304 s->media_event = true; 2305 s->eject_request = false; 2306 } 2307 2308 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2309 { 2310 SCSIDiskState *s = opaque; 2311 2312 s->eject_request = true; 2313 if (force) { 2314 s->tray_locked = false; 2315 } 2316 } 2317 2318 static bool scsi_cd_is_tray_open(void *opaque) 2319 { 2320 return ((SCSIDiskState *)opaque)->tray_open; 2321 } 2322 2323 static bool scsi_cd_is_medium_locked(void *opaque) 2324 { 2325 return ((SCSIDiskState *)opaque)->tray_locked; 2326 } 2327 2328 static const BlockDevOps scsi_disk_removable_block_ops = { 2329 .change_media_cb = scsi_cd_change_media_cb, 2330 .eject_request_cb = scsi_cd_eject_request_cb, 2331 .is_tray_open = scsi_cd_is_tray_open, 2332 .is_medium_locked = scsi_cd_is_medium_locked, 2333 2334 .resize_cb = scsi_disk_resize_cb, 2335 }; 2336 2337 static const BlockDevOps scsi_disk_block_ops = { 2338 .resize_cb = scsi_disk_resize_cb, 2339 }; 2340 2341 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2342 { 2343 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2344 if (s->media_changed) { 2345 s->media_changed = false; 2346 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2347 } 2348 } 2349 2350 static void scsi_realize(SCSIDevice *dev, Error **errp) 2351 { 2352 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2353 bool read_only; 2354 2355 if (!s->qdev.conf.blk) { 2356 error_setg(errp, "drive property not set"); 2357 return; 2358 } 2359 2360 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2361 !blk_is_inserted(s->qdev.conf.blk)) { 2362 error_setg(errp, "Device needs media, but drive is empty"); 2363 return; 2364 } 2365 2366 if (!blkconf_blocksizes(&s->qdev.conf, errp)) { 2367 return; 2368 } 2369 2370 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && 2371 !s->qdev.hba_supports_iothread) 2372 { 2373 error_setg(errp, "HBA does not support iothreads"); 2374 return; 2375 } 2376 2377 if (dev->type == TYPE_DISK) { 2378 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2379 return; 2380 } 2381 } 2382 2383 read_only = blk_is_read_only(s->qdev.conf.blk); 2384 if (dev->type == TYPE_ROM) { 2385 read_only = true; 2386 } 2387 2388 if (!blkconf_apply_backend_options(&dev->conf, read_only, 2389 dev->type == TYPE_DISK, errp)) { 2390 return; 2391 } 2392 2393 if (s->qdev.conf.discard_granularity == -1) { 2394 s->qdev.conf.discard_granularity = 2395 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2396 } 2397 2398 if (!s->version) { 2399 s->version = g_strdup(qemu_hw_version()); 2400 } 2401 if (!s->vendor) { 2402 s->vendor = g_strdup("QEMU"); 2403 } 2404 if (!s->device_id) { 2405 if (s->serial) { 2406 s->device_id = g_strdup_printf("%.20s", s->serial); 2407 } else { 2408 const char *str = blk_name(s->qdev.conf.blk); 2409 if (str && *str) { 2410 s->device_id = g_strdup(str); 2411 } 2412 } 2413 } 2414 2415 if (blk_is_sg(s->qdev.conf.blk)) { 2416 error_setg(errp, "unwanted /dev/sg*"); 2417 return; 2418 } 2419 2420 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2421 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2422 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2423 } else { 2424 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2425 } 2426 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2427 2428 blk_iostatus_enable(s->qdev.conf.blk); 2429 2430 add_boot_device_lchs(&dev->qdev, NULL, 2431 dev->conf.lcyls, 2432 dev->conf.lheads, 2433 dev->conf.lsecs); 2434 } 2435 2436 static void scsi_unrealize(SCSIDevice *dev) 2437 { 2438 del_boot_device_lchs(&dev->qdev, NULL); 2439 } 2440 2441 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2442 { 2443 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2444 AioContext *ctx = NULL; 2445 /* can happen for devices without drive. The error message for missing 2446 * backend will be issued in scsi_realize 2447 */ 2448 if (s->qdev.conf.blk) { 2449 ctx = blk_get_aio_context(s->qdev.conf.blk); 2450 aio_context_acquire(ctx); 2451 if (!blkconf_blocksizes(&s->qdev.conf, errp)) { 2452 goto out; 2453 } 2454 } 2455 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2456 s->qdev.type = TYPE_DISK; 2457 if (!s->product) { 2458 s->product = g_strdup("QEMU HARDDISK"); 2459 } 2460 scsi_realize(&s->qdev, errp); 2461 out: 2462 if (ctx) { 2463 aio_context_release(ctx); 2464 } 2465 } 2466 2467 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2468 { 2469 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2470 AioContext *ctx; 2471 int ret; 2472 2473 if (!dev->conf.blk) { 2474 /* Anonymous BlockBackend for an empty drive. As we put it into 2475 * dev->conf, qdev takes care of detaching on unplug. */ 2476 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 2477 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2478 assert(ret == 0); 2479 } 2480 2481 ctx = blk_get_aio_context(dev->conf.blk); 2482 aio_context_acquire(ctx); 2483 s->qdev.blocksize = 2048; 2484 s->qdev.type = TYPE_ROM; 2485 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2486 if (!s->product) { 2487 s->product = g_strdup("QEMU CD-ROM"); 2488 } 2489 scsi_realize(&s->qdev, errp); 2490 aio_context_release(ctx); 2491 } 2492 2493 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2494 { 2495 DriveInfo *dinfo; 2496 Error *local_err = NULL; 2497 2498 warn_report("'scsi-disk' is deprecated, " 2499 "please use 'scsi-hd' or 'scsi-cd' instead"); 2500 2501 if (!dev->conf.blk) { 2502 scsi_realize(dev, &local_err); 2503 assert(local_err); 2504 error_propagate(errp, local_err); 2505 return; 2506 } 2507 2508 dinfo = blk_legacy_dinfo(dev->conf.blk); 2509 if (dinfo && dinfo->media_cd) { 2510 scsi_cd_realize(dev, errp); 2511 } else { 2512 scsi_hd_realize(dev, errp); 2513 } 2514 } 2515 2516 static const SCSIReqOps scsi_disk_emulate_reqops = { 2517 .size = sizeof(SCSIDiskReq), 2518 .free_req = scsi_free_request, 2519 .send_command = scsi_disk_emulate_command, 2520 .read_data = scsi_disk_emulate_read_data, 2521 .write_data = scsi_disk_emulate_write_data, 2522 .get_buf = scsi_get_buf, 2523 }; 2524 2525 static const SCSIReqOps scsi_disk_dma_reqops = { 2526 .size = sizeof(SCSIDiskReq), 2527 .free_req = scsi_free_request, 2528 .send_command = scsi_disk_dma_command, 2529 .read_data = scsi_read_data, 2530 .write_data = scsi_write_data, 2531 .get_buf = scsi_get_buf, 2532 .load_request = scsi_disk_load_request, 2533 .save_request = scsi_disk_save_request, 2534 }; 2535 2536 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2537 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2538 [INQUIRY] = &scsi_disk_emulate_reqops, 2539 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2540 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2541 [START_STOP] = &scsi_disk_emulate_reqops, 2542 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2543 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2544 [READ_TOC] = &scsi_disk_emulate_reqops, 2545 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2546 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2547 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2548 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2549 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2550 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2551 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2552 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2553 [SEEK_10] = &scsi_disk_emulate_reqops, 2554 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2555 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2556 [UNMAP] = &scsi_disk_emulate_reqops, 2557 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2558 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2559 [VERIFY_10] = &scsi_disk_emulate_reqops, 2560 [VERIFY_12] = &scsi_disk_emulate_reqops, 2561 [VERIFY_16] = &scsi_disk_emulate_reqops, 2562 2563 [READ_6] = &scsi_disk_dma_reqops, 2564 [READ_10] = &scsi_disk_dma_reqops, 2565 [READ_12] = &scsi_disk_dma_reqops, 2566 [READ_16] = &scsi_disk_dma_reqops, 2567 [WRITE_6] = &scsi_disk_dma_reqops, 2568 [WRITE_10] = &scsi_disk_dma_reqops, 2569 [WRITE_12] = &scsi_disk_dma_reqops, 2570 [WRITE_16] = &scsi_disk_dma_reqops, 2571 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2572 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2573 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2574 }; 2575 2576 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) 2577 { 2578 int i; 2579 int len = scsi_cdb_length(buf); 2580 char *line_buffer, *p; 2581 2582 line_buffer = g_malloc(len * 5 + 1); 2583 2584 for (i = 0, p = line_buffer; i < len; i++) { 2585 p += sprintf(p, " 0x%02x", buf[i]); 2586 } 2587 trace_scsi_disk_new_request(lun, tag, line_buffer); 2588 2589 g_free(line_buffer); 2590 } 2591 2592 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2593 uint8_t *buf, void *hba_private) 2594 { 2595 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2596 SCSIRequest *req; 2597 const SCSIReqOps *ops; 2598 uint8_t command; 2599 2600 command = buf[0]; 2601 ops = scsi_disk_reqops_dispatch[command]; 2602 if (!ops) { 2603 ops = &scsi_disk_emulate_reqops; 2604 } 2605 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2606 2607 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { 2608 scsi_disk_new_request_dump(lun, tag, buf); 2609 } 2610 2611 return req; 2612 } 2613 2614 #ifdef __linux__ 2615 static int get_device_type(SCSIDiskState *s) 2616 { 2617 uint8_t cmd[16]; 2618 uint8_t buf[36]; 2619 int ret; 2620 2621 memset(cmd, 0, sizeof(cmd)); 2622 memset(buf, 0, sizeof(buf)); 2623 cmd[0] = INQUIRY; 2624 cmd[4] = sizeof(buf); 2625 2626 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2627 buf, sizeof(buf)); 2628 if (ret < 0) { 2629 return -1; 2630 } 2631 s->qdev.type = buf[0]; 2632 if (buf[1] & 0x80) { 2633 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2634 } 2635 return 0; 2636 } 2637 2638 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2639 { 2640 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2641 AioContext *ctx; 2642 int sg_version; 2643 int rc; 2644 2645 if (!s->qdev.conf.blk) { 2646 error_setg(errp, "drive property not set"); 2647 return; 2648 } 2649 2650 if (s->rotation_rate) { 2651 error_report_once("rotation_rate is specified for scsi-block but is " 2652 "not implemented. This option is deprecated and will " 2653 "be removed in a future version"); 2654 } 2655 2656 ctx = blk_get_aio_context(s->qdev.conf.blk); 2657 aio_context_acquire(ctx); 2658 2659 /* check we are using a driver managing SG_IO (version 3 and after) */ 2660 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2661 if (rc < 0) { 2662 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2663 if (rc != -EPERM) { 2664 error_append_hint(errp, "Is this a SCSI device?\n"); 2665 } 2666 goto out; 2667 } 2668 if (sg_version < 30000) { 2669 error_setg(errp, "scsi generic interface too old"); 2670 goto out; 2671 } 2672 2673 /* get device type from INQUIRY data */ 2674 rc = get_device_type(s); 2675 if (rc < 0) { 2676 error_setg(errp, "INQUIRY failed"); 2677 goto out; 2678 } 2679 2680 /* Make a guess for the block size, we'll fix it when the guest sends. 2681 * READ CAPACITY. If they don't, they likely would assume these sizes 2682 * anyway. (TODO: check in /sys). 2683 */ 2684 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2685 s->qdev.blocksize = 2048; 2686 } else { 2687 s->qdev.blocksize = 512; 2688 } 2689 2690 /* Makes the scsi-block device not removable by using HMP and QMP eject 2691 * command. 2692 */ 2693 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2694 2695 scsi_realize(&s->qdev, errp); 2696 scsi_generic_read_device_inquiry(&s->qdev); 2697 2698 out: 2699 aio_context_release(ctx); 2700 } 2701 2702 typedef struct SCSIBlockReq { 2703 SCSIDiskReq req; 2704 sg_io_hdr_t io_header; 2705 2706 /* Selected bytes of the original CDB, copied into our own CDB. */ 2707 uint8_t cmd, cdb1, group_number; 2708 2709 /* CDB passed to SG_IO. */ 2710 uint8_t cdb[16]; 2711 } SCSIBlockReq; 2712 2713 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2714 int64_t offset, QEMUIOVector *iov, 2715 int direction, 2716 BlockCompletionFunc *cb, void *opaque) 2717 { 2718 sg_io_hdr_t *io_header = &req->io_header; 2719 SCSIDiskReq *r = &req->req; 2720 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2721 int nb_logical_blocks; 2722 uint64_t lba; 2723 BlockAIOCB *aiocb; 2724 2725 /* This is not supported yet. It can only happen if the guest does 2726 * reads and writes that are not aligned to one logical sectors 2727 * _and_ cover multiple MemoryRegions. 2728 */ 2729 assert(offset % s->qdev.blocksize == 0); 2730 assert(iov->size % s->qdev.blocksize == 0); 2731 2732 io_header->interface_id = 'S'; 2733 2734 /* The data transfer comes from the QEMUIOVector. */ 2735 io_header->dxfer_direction = direction; 2736 io_header->dxfer_len = iov->size; 2737 io_header->dxferp = (void *)iov->iov; 2738 io_header->iovec_count = iov->niov; 2739 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2740 2741 /* Build a new CDB with the LBA and length patched in, in case 2742 * DMA helpers split the transfer in multiple segments. Do not 2743 * build a CDB smaller than what the guest wanted, and only build 2744 * a larger one if strictly necessary. 2745 */ 2746 io_header->cmdp = req->cdb; 2747 lba = offset / s->qdev.blocksize; 2748 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2749 2750 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2751 /* 6-byte CDB */ 2752 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2753 req->cdb[4] = nb_logical_blocks; 2754 req->cdb[5] = 0; 2755 io_header->cmd_len = 6; 2756 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2757 /* 10-byte CDB */ 2758 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2759 req->cdb[1] = req->cdb1; 2760 stl_be_p(&req->cdb[2], lba); 2761 req->cdb[6] = req->group_number; 2762 stw_be_p(&req->cdb[7], nb_logical_blocks); 2763 req->cdb[9] = 0; 2764 io_header->cmd_len = 10; 2765 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2766 /* 12-byte CDB */ 2767 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2768 req->cdb[1] = req->cdb1; 2769 stl_be_p(&req->cdb[2], lba); 2770 stl_be_p(&req->cdb[6], nb_logical_blocks); 2771 req->cdb[10] = req->group_number; 2772 req->cdb[11] = 0; 2773 io_header->cmd_len = 12; 2774 } else { 2775 /* 16-byte CDB */ 2776 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2777 req->cdb[1] = req->cdb1; 2778 stq_be_p(&req->cdb[2], lba); 2779 stl_be_p(&req->cdb[10], nb_logical_blocks); 2780 req->cdb[14] = req->group_number; 2781 req->cdb[15] = 0; 2782 io_header->cmd_len = 16; 2783 } 2784 2785 /* The rest is as in scsi-generic.c. */ 2786 io_header->mx_sb_len = sizeof(r->req.sense); 2787 io_header->sbp = r->req.sense; 2788 io_header->timeout = UINT_MAX; 2789 io_header->usr_ptr = r; 2790 io_header->flags |= SG_FLAG_DIRECT_IO; 2791 2792 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2793 assert(aiocb != NULL); 2794 return aiocb; 2795 } 2796 2797 static bool scsi_block_no_fua(SCSICommand *cmd) 2798 { 2799 return false; 2800 } 2801 2802 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2803 QEMUIOVector *iov, 2804 BlockCompletionFunc *cb, void *cb_opaque, 2805 void *opaque) 2806 { 2807 SCSIBlockReq *r = opaque; 2808 return scsi_block_do_sgio(r, offset, iov, 2809 SG_DXFER_FROM_DEV, cb, cb_opaque); 2810 } 2811 2812 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2813 QEMUIOVector *iov, 2814 BlockCompletionFunc *cb, void *cb_opaque, 2815 void *opaque) 2816 { 2817 SCSIBlockReq *r = opaque; 2818 return scsi_block_do_sgio(r, offset, iov, 2819 SG_DXFER_TO_DEV, cb, cb_opaque); 2820 } 2821 2822 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2823 { 2824 switch (buf[0]) { 2825 case VERIFY_10: 2826 case VERIFY_12: 2827 case VERIFY_16: 2828 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2829 * for the number of logical blocks specified in the length 2830 * field). For other modes, do not use scatter/gather operation. 2831 */ 2832 if ((buf[1] & 6) == 2) { 2833 return false; 2834 } 2835 break; 2836 2837 case READ_6: 2838 case READ_10: 2839 case READ_12: 2840 case READ_16: 2841 case WRITE_6: 2842 case WRITE_10: 2843 case WRITE_12: 2844 case WRITE_16: 2845 case WRITE_VERIFY_10: 2846 case WRITE_VERIFY_12: 2847 case WRITE_VERIFY_16: 2848 /* MMC writing cannot be done via DMA helpers, because it sometimes 2849 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2850 * We might use scsi_block_dma_reqops as long as no writing commands are 2851 * seen, but performance usually isn't paramount on optical media. So, 2852 * just make scsi-block operate the same as scsi-generic for them. 2853 */ 2854 if (s->qdev.type != TYPE_ROM) { 2855 return false; 2856 } 2857 break; 2858 2859 default: 2860 break; 2861 } 2862 2863 return true; 2864 } 2865 2866 2867 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2868 { 2869 SCSIBlockReq *r = (SCSIBlockReq *)req; 2870 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2871 2872 r->cmd = req->cmd.buf[0]; 2873 switch (r->cmd >> 5) { 2874 case 0: 2875 /* 6-byte CDB. */ 2876 r->cdb1 = r->group_number = 0; 2877 break; 2878 case 1: 2879 /* 10-byte CDB. */ 2880 r->cdb1 = req->cmd.buf[1]; 2881 r->group_number = req->cmd.buf[6]; 2882 break; 2883 case 4: 2884 /* 12-byte CDB. */ 2885 r->cdb1 = req->cmd.buf[1]; 2886 r->group_number = req->cmd.buf[10]; 2887 break; 2888 case 5: 2889 /* 16-byte CDB. */ 2890 r->cdb1 = req->cmd.buf[1]; 2891 r->group_number = req->cmd.buf[14]; 2892 break; 2893 default: 2894 abort(); 2895 } 2896 2897 /* Protection information is not supported. For SCSI versions 2 and 2898 * older (as determined by snooping the guest's INQUIRY commands), 2899 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2900 */ 2901 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2902 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2903 return 0; 2904 } 2905 2906 r->req.status = &r->io_header.status; 2907 return scsi_disk_dma_command(req, buf); 2908 } 2909 2910 static const SCSIReqOps scsi_block_dma_reqops = { 2911 .size = sizeof(SCSIBlockReq), 2912 .free_req = scsi_free_request, 2913 .send_command = scsi_block_dma_command, 2914 .read_data = scsi_read_data, 2915 .write_data = scsi_write_data, 2916 .get_buf = scsi_get_buf, 2917 .load_request = scsi_disk_load_request, 2918 .save_request = scsi_disk_save_request, 2919 }; 2920 2921 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2922 uint32_t lun, uint8_t *buf, 2923 void *hba_private) 2924 { 2925 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2926 2927 if (scsi_block_is_passthrough(s, buf)) { 2928 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2929 hba_private); 2930 } else { 2931 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2932 hba_private); 2933 } 2934 } 2935 2936 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2937 uint8_t *buf, void *hba_private) 2938 { 2939 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2940 2941 if (scsi_block_is_passthrough(s, buf)) { 2942 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2943 } else { 2944 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2945 } 2946 } 2947 2948 static void scsi_block_update_sense(SCSIRequest *req) 2949 { 2950 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2951 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); 2952 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); 2953 } 2954 #endif 2955 2956 static 2957 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2958 BlockCompletionFunc *cb, void *cb_opaque, 2959 void *opaque) 2960 { 2961 SCSIDiskReq *r = opaque; 2962 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2963 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2964 } 2965 2966 static 2967 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2968 BlockCompletionFunc *cb, void *cb_opaque, 2969 void *opaque) 2970 { 2971 SCSIDiskReq *r = opaque; 2972 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2973 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2974 } 2975 2976 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2977 { 2978 DeviceClass *dc = DEVICE_CLASS(klass); 2979 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2980 2981 dc->fw_name = "disk"; 2982 dc->reset = scsi_disk_reset; 2983 sdc->dma_readv = scsi_dma_readv; 2984 sdc->dma_writev = scsi_dma_writev; 2985 sdc->need_fua_emulation = scsi_is_cmd_fua; 2986 } 2987 2988 static const TypeInfo scsi_disk_base_info = { 2989 .name = TYPE_SCSI_DISK_BASE, 2990 .parent = TYPE_SCSI_DEVICE, 2991 .class_init = scsi_disk_base_class_initfn, 2992 .instance_size = sizeof(SCSIDiskState), 2993 .class_size = sizeof(SCSIDiskClass), 2994 .abstract = true, 2995 }; 2996 2997 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2998 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ 2999 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ 3000 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3001 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 3002 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 3003 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 3004 DEFINE_PROP_STRING("product", SCSIDiskState, product), \ 3005 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) 3006 3007 3008 static Property scsi_hd_properties[] = { 3009 DEFINE_SCSI_DISK_PROPERTIES(), 3010 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3011 SCSI_DISK_F_REMOVABLE, false), 3012 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3013 SCSI_DISK_F_DPOFUA, false), 3014 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3015 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3016 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3017 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3018 DEFAULT_MAX_UNMAP_SIZE), 3019 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3020 DEFAULT_MAX_IO_SIZE), 3021 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3022 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3023 5), 3024 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 3025 DEFINE_PROP_END_OF_LIST(), 3026 }; 3027 3028 static const VMStateDescription vmstate_scsi_disk_state = { 3029 .name = "scsi-disk", 3030 .version_id = 1, 3031 .minimum_version_id = 1, 3032 .fields = (VMStateField[]) { 3033 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 3034 VMSTATE_BOOL(media_changed, SCSIDiskState), 3035 VMSTATE_BOOL(media_event, SCSIDiskState), 3036 VMSTATE_BOOL(eject_request, SCSIDiskState), 3037 VMSTATE_BOOL(tray_open, SCSIDiskState), 3038 VMSTATE_BOOL(tray_locked, SCSIDiskState), 3039 VMSTATE_END_OF_LIST() 3040 } 3041 }; 3042 3043 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 3044 { 3045 DeviceClass *dc = DEVICE_CLASS(klass); 3046 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3047 3048 sc->realize = scsi_hd_realize; 3049 sc->unrealize = scsi_unrealize; 3050 sc->alloc_req = scsi_new_request; 3051 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3052 dc->desc = "virtual SCSI disk"; 3053 device_class_set_props(dc, scsi_hd_properties); 3054 dc->vmsd = &vmstate_scsi_disk_state; 3055 } 3056 3057 static const TypeInfo scsi_hd_info = { 3058 .name = "scsi-hd", 3059 .parent = TYPE_SCSI_DISK_BASE, 3060 .class_init = scsi_hd_class_initfn, 3061 }; 3062 3063 static Property scsi_cd_properties[] = { 3064 DEFINE_SCSI_DISK_PROPERTIES(), 3065 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3066 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3067 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3068 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3069 DEFAULT_MAX_IO_SIZE), 3070 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3071 5), 3072 DEFINE_PROP_END_OF_LIST(), 3073 }; 3074 3075 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3076 { 3077 DeviceClass *dc = DEVICE_CLASS(klass); 3078 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3079 3080 sc->realize = scsi_cd_realize; 3081 sc->alloc_req = scsi_new_request; 3082 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3083 dc->desc = "virtual SCSI CD-ROM"; 3084 device_class_set_props(dc, scsi_cd_properties); 3085 dc->vmsd = &vmstate_scsi_disk_state; 3086 } 3087 3088 static const TypeInfo scsi_cd_info = { 3089 .name = "scsi-cd", 3090 .parent = TYPE_SCSI_DISK_BASE, 3091 .class_init = scsi_cd_class_initfn, 3092 }; 3093 3094 #ifdef __linux__ 3095 static Property scsi_block_properties[] = { 3096 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), 3097 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3098 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3099 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3100 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3101 DEFAULT_MAX_UNMAP_SIZE), 3102 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3103 DEFAULT_MAX_IO_SIZE), 3104 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3105 -1), 3106 DEFINE_PROP_END_OF_LIST(), 3107 }; 3108 3109 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3110 { 3111 DeviceClass *dc = DEVICE_CLASS(klass); 3112 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3113 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3114 3115 sc->realize = scsi_block_realize; 3116 sc->alloc_req = scsi_block_new_request; 3117 sc->parse_cdb = scsi_block_parse_cdb; 3118 sdc->dma_readv = scsi_block_dma_readv; 3119 sdc->dma_writev = scsi_block_dma_writev; 3120 sdc->update_sense = scsi_block_update_sense; 3121 sdc->need_fua_emulation = scsi_block_no_fua; 3122 dc->desc = "SCSI block device passthrough"; 3123 device_class_set_props(dc, scsi_block_properties); 3124 dc->vmsd = &vmstate_scsi_disk_state; 3125 } 3126 3127 static const TypeInfo scsi_block_info = { 3128 .name = "scsi-block", 3129 .parent = TYPE_SCSI_DISK_BASE, 3130 .class_init = scsi_block_class_initfn, 3131 }; 3132 #endif 3133 3134 static Property scsi_disk_properties[] = { 3135 DEFINE_SCSI_DISK_PROPERTIES(), 3136 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3137 SCSI_DISK_F_REMOVABLE, false), 3138 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3139 SCSI_DISK_F_DPOFUA, false), 3140 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3141 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3142 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3143 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3144 DEFAULT_MAX_UNMAP_SIZE), 3145 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3146 DEFAULT_MAX_IO_SIZE), 3147 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3148 5), 3149 DEFINE_PROP_END_OF_LIST(), 3150 }; 3151 3152 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3153 { 3154 DeviceClass *dc = DEVICE_CLASS(klass); 3155 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3156 3157 sc->realize = scsi_disk_realize; 3158 sc->alloc_req = scsi_new_request; 3159 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3160 dc->fw_name = "disk"; 3161 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3162 dc->reset = scsi_disk_reset; 3163 device_class_set_props(dc, scsi_disk_properties); 3164 dc->vmsd = &vmstate_scsi_disk_state; 3165 } 3166 3167 static const TypeInfo scsi_disk_info = { 3168 .name = "scsi-disk", 3169 .parent = TYPE_SCSI_DISK_BASE, 3170 .class_init = scsi_disk_class_initfn, 3171 }; 3172 3173 static void scsi_disk_register_types(void) 3174 { 3175 type_register_static(&scsi_disk_base_info); 3176 type_register_static(&scsi_hd_info); 3177 type_register_static(&scsi_cd_info); 3178 #ifdef __linux__ 3179 type_register_static(&scsi_block_info); 3180 #endif 3181 type_register_static(&scsi_disk_info); 3182 } 3183 3184 type_init(scsi_disk_register_types) 3185