1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 //#define DEBUG_SCSI 23 24 #ifdef DEBUG_SCSI 25 #define DPRINTF(fmt, ...) \ 26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0) 27 #else 28 #define DPRINTF(fmt, ...) do {} while(0) 29 #endif 30 31 #include "qemu/osdep.h" 32 #include "qemu/units.h" 33 #include "qapi/error.h" 34 #include "qemu/error-report.h" 35 #include "hw/scsi/scsi.h" 36 #include "scsi/constants.h" 37 #include "sysemu/sysemu.h" 38 #include "sysemu/block-backend.h" 39 #include "sysemu/blockdev.h" 40 #include "hw/block/block.h" 41 #include "sysemu/dma.h" 42 #include "qemu/cutils.h" 43 44 #ifdef __linux 45 #include <scsi/sg.h> 46 #endif 47 48 #define SCSI_WRITE_SAME_MAX (512 * KiB) 49 #define SCSI_DMA_BUF_SIZE (128 * KiB) 50 #define SCSI_MAX_INQUIRY_LEN 256 51 #define SCSI_MAX_MODE_LEN 256 52 53 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 54 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 55 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 56 57 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 58 59 #define SCSI_DISK_BASE(obj) \ 60 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 61 #define SCSI_DISK_BASE_CLASS(klass) \ 62 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 63 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 64 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 65 66 typedef struct SCSIDiskClass { 67 SCSIDeviceClass parent_class; 68 DMAIOFunc *dma_readv; 69 DMAIOFunc *dma_writev; 70 bool (*need_fua_emulation)(SCSICommand *cmd); 71 } SCSIDiskClass; 72 73 typedef struct SCSIDiskReq { 74 SCSIRequest req; 75 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 76 uint64_t sector; 77 uint32_t sector_count; 78 uint32_t buflen; 79 bool started; 80 bool need_fua_emulation; 81 struct iovec iov; 82 QEMUIOVector qiov; 83 BlockAcctCookie acct; 84 unsigned char *status; 85 } SCSIDiskReq; 86 87 #define SCSI_DISK_F_REMOVABLE 0 88 #define SCSI_DISK_F_DPOFUA 1 89 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 90 91 typedef struct SCSIDiskState 92 { 93 SCSIDevice qdev; 94 uint32_t features; 95 bool media_changed; 96 bool media_event; 97 bool eject_request; 98 uint16_t port_index; 99 uint64_t max_unmap_size; 100 uint64_t max_io_size; 101 QEMUBH *bh; 102 char *version; 103 char *serial; 104 char *vendor; 105 char *product; 106 bool tray_open; 107 bool tray_locked; 108 /* 109 * 0x0000 - rotation rate not reported 110 * 0x0001 - non-rotating medium (SSD) 111 * 0x0002-0x0400 - reserved 112 * 0x0401-0xffe - rotations per minute 113 * 0xffff - reserved 114 */ 115 uint16_t rotation_rate; 116 } SCSIDiskState; 117 118 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 119 120 static void scsi_free_request(SCSIRequest *req) 121 { 122 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 123 124 qemu_vfree(r->iov.iov_base); 125 } 126 127 /* Helper function for command completion with sense. */ 128 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 129 { 130 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n", 131 r->req.tag, sense.key, sense.asc, sense.ascq); 132 scsi_req_build_sense(&r->req, sense); 133 scsi_req_complete(&r->req, CHECK_CONDITION); 134 } 135 136 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 137 { 138 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 139 140 if (!r->iov.iov_base) { 141 r->buflen = size; 142 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 143 } 144 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 145 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 146 } 147 148 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 149 { 150 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 151 152 qemu_put_be64s(f, &r->sector); 153 qemu_put_be32s(f, &r->sector_count); 154 qemu_put_be32s(f, &r->buflen); 155 if (r->buflen) { 156 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 157 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 158 } else if (!req->retry) { 159 uint32_t len = r->iov.iov_len; 160 qemu_put_be32s(f, &len); 161 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 162 } 163 } 164 } 165 166 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 167 { 168 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 169 170 qemu_get_be64s(f, &r->sector); 171 qemu_get_be32s(f, &r->sector_count); 172 qemu_get_be32s(f, &r->buflen); 173 if (r->buflen) { 174 scsi_init_iovec(r, r->buflen); 175 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 176 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 177 } else if (!r->req.retry) { 178 uint32_t len; 179 qemu_get_be32s(f, &len); 180 r->iov.iov_len = len; 181 assert(r->iov.iov_len <= r->buflen); 182 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 183 } 184 } 185 186 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 187 } 188 189 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 190 { 191 if (r->req.io_canceled) { 192 scsi_req_cancel_complete(&r->req); 193 return true; 194 } 195 196 if (ret < 0 || (r->status && *r->status)) { 197 return scsi_handle_rw_error(r, -ret, acct_failed); 198 } 199 200 return false; 201 } 202 203 static void scsi_aio_complete(void *opaque, int ret) 204 { 205 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 206 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 207 208 assert(r->req.aiocb != NULL); 209 r->req.aiocb = NULL; 210 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 211 if (scsi_disk_req_check_error(r, ret, true)) { 212 goto done; 213 } 214 215 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 216 scsi_req_complete(&r->req, GOOD); 217 218 done: 219 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 220 scsi_req_unref(&r->req); 221 } 222 223 static bool scsi_is_cmd_fua(SCSICommand *cmd) 224 { 225 switch (cmd->buf[0]) { 226 case READ_10: 227 case READ_12: 228 case READ_16: 229 case WRITE_10: 230 case WRITE_12: 231 case WRITE_16: 232 return (cmd->buf[1] & 8) != 0; 233 234 case VERIFY_10: 235 case VERIFY_12: 236 case VERIFY_16: 237 case WRITE_VERIFY_10: 238 case WRITE_VERIFY_12: 239 case WRITE_VERIFY_16: 240 return true; 241 242 case READ_6: 243 case WRITE_6: 244 default: 245 return false; 246 } 247 } 248 249 static void scsi_write_do_fua(SCSIDiskReq *r) 250 { 251 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 252 253 assert(r->req.aiocb == NULL); 254 assert(!r->req.io_canceled); 255 256 if (r->need_fua_emulation) { 257 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 258 BLOCK_ACCT_FLUSH); 259 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 260 return; 261 } 262 263 scsi_req_complete(&r->req, GOOD); 264 scsi_req_unref(&r->req); 265 } 266 267 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 268 { 269 assert(r->req.aiocb == NULL); 270 if (scsi_disk_req_check_error(r, ret, false)) { 271 goto done; 272 } 273 274 r->sector += r->sector_count; 275 r->sector_count = 0; 276 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 277 scsi_write_do_fua(r); 278 return; 279 } else { 280 scsi_req_complete(&r->req, GOOD); 281 } 282 283 done: 284 scsi_req_unref(&r->req); 285 } 286 287 static void scsi_dma_complete(void *opaque, int ret) 288 { 289 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 290 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 291 292 assert(r->req.aiocb != NULL); 293 r->req.aiocb = NULL; 294 295 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 296 if (ret < 0) { 297 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 298 } else { 299 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 300 } 301 scsi_dma_complete_noio(r, ret); 302 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 303 } 304 305 static void scsi_read_complete(void * opaque, int ret) 306 { 307 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 308 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 309 int n; 310 311 assert(r->req.aiocb != NULL); 312 r->req.aiocb = NULL; 313 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 314 if (scsi_disk_req_check_error(r, ret, true)) { 315 goto done; 316 } 317 318 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 319 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size); 320 321 n = r->qiov.size / 512; 322 r->sector += n; 323 r->sector_count -= n; 324 scsi_req_data(&r->req, r->qiov.size); 325 326 done: 327 scsi_req_unref(&r->req); 328 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 329 } 330 331 /* Actually issue a read to the block device. */ 332 static void scsi_do_read(SCSIDiskReq *r, int ret) 333 { 334 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 335 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 336 337 assert (r->req.aiocb == NULL); 338 if (scsi_disk_req_check_error(r, ret, false)) { 339 goto done; 340 } 341 342 /* The request is used as the AIO opaque value, so add a ref. */ 343 scsi_req_ref(&r->req); 344 345 if (r->req.sg) { 346 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 347 r->req.resid -= r->req.sg->size; 348 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 349 r->req.sg, r->sector << BDRV_SECTOR_BITS, 350 BDRV_SECTOR_SIZE, 351 sdc->dma_readv, r, scsi_dma_complete, r, 352 DMA_DIRECTION_FROM_DEVICE); 353 } else { 354 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 355 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 356 r->qiov.size, BLOCK_ACCT_READ); 357 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 358 scsi_read_complete, r, r); 359 } 360 361 done: 362 scsi_req_unref(&r->req); 363 } 364 365 static void scsi_do_read_cb(void *opaque, int ret) 366 { 367 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 368 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 369 370 assert (r->req.aiocb != NULL); 371 r->req.aiocb = NULL; 372 373 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 374 if (ret < 0) { 375 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 376 } else { 377 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 378 } 379 scsi_do_read(opaque, ret); 380 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 381 } 382 383 /* Read more data from scsi device into buffer. */ 384 static void scsi_read_data(SCSIRequest *req) 385 { 386 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 387 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 388 bool first; 389 390 DPRINTF("Read sector_count=%d\n", r->sector_count); 391 if (r->sector_count == 0) { 392 /* This also clears the sense buffer for REQUEST SENSE. */ 393 scsi_req_complete(&r->req, GOOD); 394 return; 395 } 396 397 /* No data transfer may already be in progress */ 398 assert(r->req.aiocb == NULL); 399 400 /* The request is used as the AIO opaque value, so add a ref. */ 401 scsi_req_ref(&r->req); 402 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 403 DPRINTF("Data transfer direction invalid\n"); 404 scsi_read_complete(r, -EINVAL); 405 return; 406 } 407 408 if (!blk_is_available(req->dev->conf.blk)) { 409 scsi_read_complete(r, -ENOMEDIUM); 410 return; 411 } 412 413 first = !r->started; 414 r->started = true; 415 if (first && r->need_fua_emulation) { 416 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 417 BLOCK_ACCT_FLUSH); 418 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 419 } else { 420 scsi_do_read(r, 0); 421 } 422 } 423 424 /* 425 * scsi_handle_rw_error has two return values. False means that the error 426 * must be ignored, true means that the error has been processed and the 427 * caller should not do anything else for this request. Note that 428 * scsi_handle_rw_error always manages its reference counts, independent 429 * of the return value. 430 */ 431 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 432 { 433 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 434 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 435 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 436 is_read, error); 437 438 if (action == BLOCK_ERROR_ACTION_REPORT) { 439 if (acct_failed) { 440 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 441 } 442 switch (error) { 443 case 0: 444 /* The command has run, no need to fake sense. */ 445 assert(r->status && *r->status); 446 scsi_req_complete(&r->req, *r->status); 447 break; 448 case ENOMEDIUM: 449 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 450 break; 451 case ENOMEM: 452 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 453 break; 454 case EINVAL: 455 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 456 break; 457 case ENOSPC: 458 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 459 break; 460 default: 461 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 462 break; 463 } 464 } 465 if (!error) { 466 assert(r->status && *r->status); 467 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 468 469 if (error == ECANCELED || error == EAGAIN || error == ENOTCONN || 470 error == 0) { 471 /* These errors are handled by guest. */ 472 scsi_req_complete(&r->req, *r->status); 473 return true; 474 } 475 } 476 477 blk_error_action(s->qdev.conf.blk, action, is_read, error); 478 if (action == BLOCK_ERROR_ACTION_STOP) { 479 scsi_req_retry(&r->req); 480 } 481 return action != BLOCK_ERROR_ACTION_IGNORE; 482 } 483 484 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 485 { 486 uint32_t n; 487 488 assert (r->req.aiocb == NULL); 489 if (scsi_disk_req_check_error(r, ret, false)) { 490 goto done; 491 } 492 493 n = r->qiov.size / 512; 494 r->sector += n; 495 r->sector_count -= n; 496 if (r->sector_count == 0) { 497 scsi_write_do_fua(r); 498 return; 499 } else { 500 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 501 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size); 502 scsi_req_data(&r->req, r->qiov.size); 503 } 504 505 done: 506 scsi_req_unref(&r->req); 507 } 508 509 static void scsi_write_complete(void * opaque, int ret) 510 { 511 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 512 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 513 514 assert (r->req.aiocb != NULL); 515 r->req.aiocb = NULL; 516 517 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 518 if (ret < 0) { 519 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 520 } else { 521 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 522 } 523 scsi_write_complete_noio(r, ret); 524 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 525 } 526 527 static void scsi_write_data(SCSIRequest *req) 528 { 529 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 530 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 531 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 532 533 /* No data transfer may already be in progress */ 534 assert(r->req.aiocb == NULL); 535 536 /* The request is used as the AIO opaque value, so add a ref. */ 537 scsi_req_ref(&r->req); 538 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 539 DPRINTF("Data transfer direction invalid\n"); 540 scsi_write_complete_noio(r, -EINVAL); 541 return; 542 } 543 544 if (!r->req.sg && !r->qiov.size) { 545 /* Called for the first time. Ask the driver to send us more data. */ 546 r->started = true; 547 scsi_write_complete_noio(r, 0); 548 return; 549 } 550 if (!blk_is_available(req->dev->conf.blk)) { 551 scsi_write_complete_noio(r, -ENOMEDIUM); 552 return; 553 } 554 555 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 556 r->req.cmd.buf[0] == VERIFY_16) { 557 if (r->req.sg) { 558 scsi_dma_complete_noio(r, 0); 559 } else { 560 scsi_write_complete_noio(r, 0); 561 } 562 return; 563 } 564 565 if (r->req.sg) { 566 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 567 r->req.resid -= r->req.sg->size; 568 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 569 r->req.sg, r->sector << BDRV_SECTOR_BITS, 570 BDRV_SECTOR_SIZE, 571 sdc->dma_writev, r, scsi_dma_complete, r, 572 DMA_DIRECTION_TO_DEVICE); 573 } else { 574 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 575 r->qiov.size, BLOCK_ACCT_WRITE); 576 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 577 scsi_write_complete, r, r); 578 } 579 } 580 581 /* Return a pointer to the data buffer. */ 582 static uint8_t *scsi_get_buf(SCSIRequest *req) 583 { 584 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 585 586 return (uint8_t *)r->iov.iov_base; 587 } 588 589 int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 590 { 591 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 592 uint8_t page_code = req->cmd.buf[2]; 593 int start, buflen = 0; 594 595 outbuf[buflen++] = s->qdev.type & 0x1f; 596 outbuf[buflen++] = page_code; 597 outbuf[buflen++] = 0x00; 598 outbuf[buflen++] = 0x00; 599 start = buflen; 600 601 switch (page_code) { 602 case 0x00: /* Supported page codes, mandatory */ 603 { 604 DPRINTF("Inquiry EVPD[Supported pages] " 605 "buffer size %zd\n", req->cmd.xfer); 606 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 607 if (s->serial) { 608 outbuf[buflen++] = 0x80; /* unit serial number */ 609 } 610 outbuf[buflen++] = 0x83; /* device identification */ 611 if (s->qdev.type == TYPE_DISK) { 612 outbuf[buflen++] = 0xb0; /* block limits */ 613 outbuf[buflen++] = 0xb1; /* block device characteristics */ 614 outbuf[buflen++] = 0xb2; /* thin provisioning */ 615 } 616 break; 617 } 618 case 0x80: /* Device serial number, optional */ 619 { 620 int l; 621 622 if (!s->serial) { 623 DPRINTF("Inquiry (EVPD[Serial number] not supported\n"); 624 return -1; 625 } 626 627 l = strlen(s->serial); 628 if (l > 36) { 629 l = 36; 630 } 631 632 DPRINTF("Inquiry EVPD[Serial number] " 633 "buffer size %zd\n", req->cmd.xfer); 634 memcpy(outbuf + buflen, s->serial, l); 635 buflen += l; 636 break; 637 } 638 639 case 0x83: /* Device identification page, mandatory */ 640 { 641 const char *str = s->serial ?: blk_name(s->qdev.conf.blk); 642 int max_len = s->serial ? 20 : 255 - 8; 643 int id_len = strlen(str); 644 645 if (id_len > max_len) { 646 id_len = max_len; 647 } 648 DPRINTF("Inquiry EVPD[Device identification] " 649 "buffer size %zd\n", req->cmd.xfer); 650 651 outbuf[buflen++] = 0x2; /* ASCII */ 652 outbuf[buflen++] = 0; /* not officially assigned */ 653 outbuf[buflen++] = 0; /* reserved */ 654 outbuf[buflen++] = id_len; /* length of data following */ 655 memcpy(outbuf + buflen, str, id_len); 656 buflen += id_len; 657 658 if (s->qdev.wwn) { 659 outbuf[buflen++] = 0x1; /* Binary */ 660 outbuf[buflen++] = 0x3; /* NAA */ 661 outbuf[buflen++] = 0; /* reserved */ 662 outbuf[buflen++] = 8; 663 stq_be_p(&outbuf[buflen], s->qdev.wwn); 664 buflen += 8; 665 } 666 667 if (s->qdev.port_wwn) { 668 outbuf[buflen++] = 0x61; /* SAS / Binary */ 669 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 670 outbuf[buflen++] = 0; /* reserved */ 671 outbuf[buflen++] = 8; 672 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 673 buflen += 8; 674 } 675 676 if (s->port_index) { 677 outbuf[buflen++] = 0x61; /* SAS / Binary */ 678 679 /* PIV/Target port/relative target port */ 680 outbuf[buflen++] = 0x94; 681 682 outbuf[buflen++] = 0; /* reserved */ 683 outbuf[buflen++] = 4; 684 stw_be_p(&outbuf[buflen + 2], s->port_index); 685 buflen += 4; 686 } 687 break; 688 } 689 case 0xb0: /* block limits */ 690 { 691 unsigned int unmap_sectors = 692 s->qdev.conf.discard_granularity / s->qdev.blocksize; 693 unsigned int min_io_size = 694 s->qdev.conf.min_io_size / s->qdev.blocksize; 695 unsigned int opt_io_size = 696 s->qdev.conf.opt_io_size / s->qdev.blocksize; 697 unsigned int max_unmap_sectors = 698 s->max_unmap_size / s->qdev.blocksize; 699 unsigned int max_io_sectors = 700 s->max_io_size / s->qdev.blocksize; 701 702 if (s->qdev.type == TYPE_ROM) { 703 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n", 704 page_code); 705 return -1; 706 } 707 if (s->qdev.type == TYPE_DISK) { 708 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 709 int max_io_sectors_blk = 710 max_transfer_blk / s->qdev.blocksize; 711 712 max_io_sectors = 713 MIN_NON_ZERO(max_io_sectors_blk, max_io_sectors); 714 715 /* min_io_size and opt_io_size can't be greater than 716 * max_io_sectors */ 717 if (min_io_size) { 718 min_io_size = MIN(min_io_size, max_io_sectors); 719 } 720 if (opt_io_size) { 721 opt_io_size = MIN(opt_io_size, max_io_sectors); 722 } 723 } 724 /* required VPD size with unmap support */ 725 buflen = 0x40; 726 memset(outbuf + 4, 0, buflen - 4); 727 728 outbuf[4] = 0x1; /* wsnz */ 729 730 /* optimal transfer length granularity */ 731 outbuf[6] = (min_io_size >> 8) & 0xff; 732 outbuf[7] = min_io_size & 0xff; 733 734 /* maximum transfer length */ 735 outbuf[8] = (max_io_sectors >> 24) & 0xff; 736 outbuf[9] = (max_io_sectors >> 16) & 0xff; 737 outbuf[10] = (max_io_sectors >> 8) & 0xff; 738 outbuf[11] = max_io_sectors & 0xff; 739 740 /* optimal transfer length */ 741 outbuf[12] = (opt_io_size >> 24) & 0xff; 742 outbuf[13] = (opt_io_size >> 16) & 0xff; 743 outbuf[14] = (opt_io_size >> 8) & 0xff; 744 outbuf[15] = opt_io_size & 0xff; 745 746 /* max unmap LBA count, default is 1GB */ 747 outbuf[20] = (max_unmap_sectors >> 24) & 0xff; 748 outbuf[21] = (max_unmap_sectors >> 16) & 0xff; 749 outbuf[22] = (max_unmap_sectors >> 8) & 0xff; 750 outbuf[23] = max_unmap_sectors & 0xff; 751 752 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header */ 753 outbuf[24] = 0; 754 outbuf[25] = 0; 755 outbuf[26] = 0; 756 outbuf[27] = 255; 757 758 /* optimal unmap granularity */ 759 outbuf[28] = (unmap_sectors >> 24) & 0xff; 760 outbuf[29] = (unmap_sectors >> 16) & 0xff; 761 outbuf[30] = (unmap_sectors >> 8) & 0xff; 762 outbuf[31] = unmap_sectors & 0xff; 763 764 /* max write same size */ 765 outbuf[36] = 0; 766 outbuf[37] = 0; 767 outbuf[38] = 0; 768 outbuf[39] = 0; 769 770 outbuf[40] = (max_io_sectors >> 24) & 0xff; 771 outbuf[41] = (max_io_sectors >> 16) & 0xff; 772 outbuf[42] = (max_io_sectors >> 8) & 0xff; 773 outbuf[43] = max_io_sectors & 0xff; 774 break; 775 } 776 case 0xb1: /* block device characteristics */ 777 { 778 buflen = 8; 779 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 780 outbuf[5] = s->rotation_rate & 0xff; 781 outbuf[6] = 0; 782 outbuf[7] = 0; 783 break; 784 } 785 case 0xb2: /* thin provisioning */ 786 { 787 buflen = 8; 788 outbuf[4] = 0; 789 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 790 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 791 outbuf[7] = 0; 792 break; 793 } 794 default: 795 return -1; 796 } 797 /* done with EVPD */ 798 assert(buflen - start <= 255); 799 outbuf[start - 1] = buflen - start; 800 return buflen; 801 } 802 803 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 804 { 805 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 806 int buflen = 0; 807 808 if (req->cmd.buf[1] & 0x1) { 809 /* Vital product data */ 810 return scsi_disk_emulate_vpd_page(req, outbuf); 811 } 812 813 /* Standard INQUIRY data */ 814 if (req->cmd.buf[2] != 0) { 815 return -1; 816 } 817 818 /* PAGE CODE == 0 */ 819 buflen = req->cmd.xfer; 820 if (buflen > SCSI_MAX_INQUIRY_LEN) { 821 buflen = SCSI_MAX_INQUIRY_LEN; 822 } 823 824 outbuf[0] = s->qdev.type & 0x1f; 825 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 826 827 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 828 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 829 830 memset(&outbuf[32], 0, 4); 831 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 832 /* 833 * We claim conformance to SPC-3, which is required for guests 834 * to ask for modern features like READ CAPACITY(16) or the 835 * block characteristics VPD page by default. Not all of SPC-3 836 * is actually implemented, but we're good enough. 837 */ 838 outbuf[2] = s->qdev.default_scsi_version; 839 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 840 841 if (buflen > 36) { 842 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 843 } else { 844 /* If the allocation length of CDB is too small, 845 the additional length is not adjusted */ 846 outbuf[4] = 36 - 5; 847 } 848 849 /* Sync data transfer and TCQ. */ 850 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 851 return buflen; 852 } 853 854 static inline bool media_is_dvd(SCSIDiskState *s) 855 { 856 uint64_t nb_sectors; 857 if (s->qdev.type != TYPE_ROM) { 858 return false; 859 } 860 if (!blk_is_available(s->qdev.conf.blk)) { 861 return false; 862 } 863 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 864 return nb_sectors > CD_MAX_SECTORS; 865 } 866 867 static inline bool media_is_cd(SCSIDiskState *s) 868 { 869 uint64_t nb_sectors; 870 if (s->qdev.type != TYPE_ROM) { 871 return false; 872 } 873 if (!blk_is_available(s->qdev.conf.blk)) { 874 return false; 875 } 876 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 877 return nb_sectors <= CD_MAX_SECTORS; 878 } 879 880 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 881 uint8_t *outbuf) 882 { 883 uint8_t type = r->req.cmd.buf[1] & 7; 884 885 if (s->qdev.type != TYPE_ROM) { 886 return -1; 887 } 888 889 /* Types 1/2 are only defined for Blu-Ray. */ 890 if (type != 0) { 891 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 892 return -1; 893 } 894 895 memset(outbuf, 0, 34); 896 outbuf[1] = 32; 897 outbuf[2] = 0xe; /* last session complete, disc finalized */ 898 outbuf[3] = 1; /* first track on disc */ 899 outbuf[4] = 1; /* # of sessions */ 900 outbuf[5] = 1; /* first track of last session */ 901 outbuf[6] = 1; /* last track of last session */ 902 outbuf[7] = 0x20; /* unrestricted use */ 903 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 904 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 905 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 906 /* 24-31: disc bar code */ 907 /* 32: disc application code */ 908 /* 33: number of OPC tables */ 909 910 return 34; 911 } 912 913 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 914 uint8_t *outbuf) 915 { 916 static const int rds_caps_size[5] = { 917 [0] = 2048 + 4, 918 [1] = 4 + 4, 919 [3] = 188 + 4, 920 [4] = 2048 + 4, 921 }; 922 923 uint8_t media = r->req.cmd.buf[1]; 924 uint8_t layer = r->req.cmd.buf[6]; 925 uint8_t format = r->req.cmd.buf[7]; 926 int size = -1; 927 928 if (s->qdev.type != TYPE_ROM) { 929 return -1; 930 } 931 if (media != 0) { 932 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 933 return -1; 934 } 935 936 if (format != 0xff) { 937 if (!blk_is_available(s->qdev.conf.blk)) { 938 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 939 return -1; 940 } 941 if (media_is_cd(s)) { 942 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 943 return -1; 944 } 945 if (format >= ARRAY_SIZE(rds_caps_size)) { 946 return -1; 947 } 948 size = rds_caps_size[format]; 949 memset(outbuf, 0, size); 950 } 951 952 switch (format) { 953 case 0x00: { 954 /* Physical format information */ 955 uint64_t nb_sectors; 956 if (layer != 0) { 957 goto fail; 958 } 959 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 960 961 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 962 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 963 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 964 outbuf[7] = 0; /* default densities */ 965 966 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 967 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 968 break; 969 } 970 971 case 0x01: /* DVD copyright information, all zeros */ 972 break; 973 974 case 0x03: /* BCA information - invalid field for no BCA info */ 975 return -1; 976 977 case 0x04: /* DVD disc manufacturing information, all zeros */ 978 break; 979 980 case 0xff: { /* List capabilities */ 981 int i; 982 size = 4; 983 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 984 if (!rds_caps_size[i]) { 985 continue; 986 } 987 outbuf[size] = i; 988 outbuf[size + 1] = 0x40; /* Not writable, readable */ 989 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 990 size += 4; 991 } 992 break; 993 } 994 995 default: 996 return -1; 997 } 998 999 /* Size of buffer, not including 2 byte size field */ 1000 stw_be_p(outbuf, size - 2); 1001 return size; 1002 1003 fail: 1004 return -1; 1005 } 1006 1007 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 1008 { 1009 uint8_t event_code, media_status; 1010 1011 media_status = 0; 1012 if (s->tray_open) { 1013 media_status = MS_TRAY_OPEN; 1014 } else if (blk_is_inserted(s->qdev.conf.blk)) { 1015 media_status = MS_MEDIA_PRESENT; 1016 } 1017 1018 /* Event notification descriptor */ 1019 event_code = MEC_NO_CHANGE; 1020 if (media_status != MS_TRAY_OPEN) { 1021 if (s->media_event) { 1022 event_code = MEC_NEW_MEDIA; 1023 s->media_event = false; 1024 } else if (s->eject_request) { 1025 event_code = MEC_EJECT_REQUESTED; 1026 s->eject_request = false; 1027 } 1028 } 1029 1030 outbuf[0] = event_code; 1031 outbuf[1] = media_status; 1032 1033 /* These fields are reserved, just clear them. */ 1034 outbuf[2] = 0; 1035 outbuf[3] = 0; 1036 return 4; 1037 } 1038 1039 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 1040 uint8_t *outbuf) 1041 { 1042 int size; 1043 uint8_t *buf = r->req.cmd.buf; 1044 uint8_t notification_class_request = buf[4]; 1045 if (s->qdev.type != TYPE_ROM) { 1046 return -1; 1047 } 1048 if ((buf[1] & 1) == 0) { 1049 /* asynchronous */ 1050 return -1; 1051 } 1052 1053 size = 4; 1054 outbuf[0] = outbuf[1] = 0; 1055 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1056 if (notification_class_request & (1 << GESN_MEDIA)) { 1057 outbuf[2] = GESN_MEDIA; 1058 size += scsi_event_status_media(s, &outbuf[size]); 1059 } else { 1060 outbuf[2] = 0x80; 1061 } 1062 stw_be_p(outbuf, size - 4); 1063 return size; 1064 } 1065 1066 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1067 { 1068 int current; 1069 1070 if (s->qdev.type != TYPE_ROM) { 1071 return -1; 1072 } 1073 1074 if (media_is_dvd(s)) { 1075 current = MMC_PROFILE_DVD_ROM; 1076 } else if (media_is_cd(s)) { 1077 current = MMC_PROFILE_CD_ROM; 1078 } else { 1079 current = MMC_PROFILE_NONE; 1080 } 1081 1082 memset(outbuf, 0, 40); 1083 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1084 stw_be_p(&outbuf[6], current); 1085 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1086 outbuf[10] = 0x03; /* persistent, current */ 1087 outbuf[11] = 8; /* two profiles */ 1088 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1089 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1090 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1091 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1092 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1093 stw_be_p(&outbuf[20], 1); 1094 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1095 outbuf[23] = 8; 1096 stl_be_p(&outbuf[24], 1); /* SCSI */ 1097 outbuf[28] = 1; /* DBE = 1, mandatory */ 1098 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1099 stw_be_p(&outbuf[32], 3); 1100 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1101 outbuf[35] = 4; 1102 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1103 /* TODO: Random readable, CD read, DVD read, drive serial number, 1104 power management */ 1105 return 40; 1106 } 1107 1108 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1109 { 1110 if (s->qdev.type != TYPE_ROM) { 1111 return -1; 1112 } 1113 memset(outbuf, 0, 8); 1114 outbuf[5] = 1; /* CD-ROM */ 1115 return 8; 1116 } 1117 1118 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1119 int page_control) 1120 { 1121 static const int mode_sense_valid[0x3f] = { 1122 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1123 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1124 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1125 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1126 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1127 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1128 }; 1129 1130 uint8_t *p = *p_outbuf + 2; 1131 int length; 1132 1133 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1134 return -1; 1135 } 1136 1137 /* 1138 * If Changeable Values are requested, a mask denoting those mode parameters 1139 * that are changeable shall be returned. As we currently don't support 1140 * parameter changes via MODE_SELECT all bits are returned set to zero. 1141 * The buffer was already menset to zero by the caller of this function. 1142 * 1143 * The offsets here are off by two compared to the descriptions in the 1144 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1145 * but it is done so that offsets are consistent within our implementation 1146 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1147 * 2-byte and 4-byte headers. 1148 */ 1149 switch (page) { 1150 case MODE_PAGE_HD_GEOMETRY: 1151 length = 0x16; 1152 if (page_control == 1) { /* Changeable Values */ 1153 break; 1154 } 1155 /* if a geometry hint is available, use it */ 1156 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1157 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1158 p[2] = s->qdev.conf.cyls & 0xff; 1159 p[3] = s->qdev.conf.heads & 0xff; 1160 /* Write precomp start cylinder, disabled */ 1161 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1162 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1163 p[6] = s->qdev.conf.cyls & 0xff; 1164 /* Reduced current start cylinder, disabled */ 1165 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1166 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1167 p[9] = s->qdev.conf.cyls & 0xff; 1168 /* Device step rate [ns], 200ns */ 1169 p[10] = 0; 1170 p[11] = 200; 1171 /* Landing zone cylinder */ 1172 p[12] = 0xff; 1173 p[13] = 0xff; 1174 p[14] = 0xff; 1175 /* Medium rotation rate [rpm], 5400 rpm */ 1176 p[18] = (5400 >> 8) & 0xff; 1177 p[19] = 5400 & 0xff; 1178 break; 1179 1180 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1181 length = 0x1e; 1182 if (page_control == 1) { /* Changeable Values */ 1183 break; 1184 } 1185 /* Transfer rate [kbit/s], 5Mbit/s */ 1186 p[0] = 5000 >> 8; 1187 p[1] = 5000 & 0xff; 1188 /* if a geometry hint is available, use it */ 1189 p[2] = s->qdev.conf.heads & 0xff; 1190 p[3] = s->qdev.conf.secs & 0xff; 1191 p[4] = s->qdev.blocksize >> 8; 1192 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1193 p[7] = s->qdev.conf.cyls & 0xff; 1194 /* Write precomp start cylinder, disabled */ 1195 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1196 p[9] = s->qdev.conf.cyls & 0xff; 1197 /* Reduced current start cylinder, disabled */ 1198 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1199 p[11] = s->qdev.conf.cyls & 0xff; 1200 /* Device step rate [100us], 100us */ 1201 p[12] = 0; 1202 p[13] = 1; 1203 /* Device step pulse width [us], 1us */ 1204 p[14] = 1; 1205 /* Device head settle delay [100us], 100us */ 1206 p[15] = 0; 1207 p[16] = 1; 1208 /* Motor on delay [0.1s], 0.1s */ 1209 p[17] = 1; 1210 /* Motor off delay [0.1s], 0.1s */ 1211 p[18] = 1; 1212 /* Medium rotation rate [rpm], 5400 rpm */ 1213 p[26] = (5400 >> 8) & 0xff; 1214 p[27] = 5400 & 0xff; 1215 break; 1216 1217 case MODE_PAGE_CACHING: 1218 length = 0x12; 1219 if (page_control == 1 || /* Changeable Values */ 1220 blk_enable_write_cache(s->qdev.conf.blk)) { 1221 p[0] = 4; /* WCE */ 1222 } 1223 break; 1224 1225 case MODE_PAGE_R_W_ERROR: 1226 length = 10; 1227 if (page_control == 1) { /* Changeable Values */ 1228 break; 1229 } 1230 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1231 if (s->qdev.type == TYPE_ROM) { 1232 p[1] = 0x20; /* Read Retry Count */ 1233 } 1234 break; 1235 1236 case MODE_PAGE_AUDIO_CTL: 1237 length = 14; 1238 break; 1239 1240 case MODE_PAGE_CAPABILITIES: 1241 length = 0x14; 1242 if (page_control == 1) { /* Changeable Values */ 1243 break; 1244 } 1245 1246 p[0] = 0x3b; /* CD-R & CD-RW read */ 1247 p[1] = 0; /* Writing not supported */ 1248 p[2] = 0x7f; /* Audio, composite, digital out, 1249 mode 2 form 1&2, multi session */ 1250 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1251 RW corrected, C2 errors, ISRC, 1252 UPC, Bar code */ 1253 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1254 /* Locking supported, jumper present, eject, tray */ 1255 p[5] = 0; /* no volume & mute control, no 1256 changer */ 1257 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1258 p[7] = (50 * 176) & 0xff; 1259 p[8] = 2 >> 8; /* Two volume levels */ 1260 p[9] = 2 & 0xff; 1261 p[10] = 2048 >> 8; /* 2M buffer */ 1262 p[11] = 2048 & 0xff; 1263 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1264 p[13] = (16 * 176) & 0xff; 1265 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1266 p[17] = (16 * 176) & 0xff; 1267 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1268 p[19] = (16 * 176) & 0xff; 1269 break; 1270 1271 default: 1272 return -1; 1273 } 1274 1275 assert(length < 256); 1276 (*p_outbuf)[0] = page; 1277 (*p_outbuf)[1] = length; 1278 *p_outbuf += length + 2; 1279 return length + 2; 1280 } 1281 1282 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1283 { 1284 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1285 uint64_t nb_sectors; 1286 bool dbd; 1287 int page, buflen, ret, page_control; 1288 uint8_t *p; 1289 uint8_t dev_specific_param; 1290 1291 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1292 page = r->req.cmd.buf[2] & 0x3f; 1293 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1294 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n", 1295 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control); 1296 memset(outbuf, 0, r->req.cmd.xfer); 1297 p = outbuf; 1298 1299 if (s->qdev.type == TYPE_DISK) { 1300 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1301 if (blk_is_read_only(s->qdev.conf.blk)) { 1302 dev_specific_param |= 0x80; /* Readonly. */ 1303 } 1304 } else { 1305 /* MMC prescribes that CD/DVD drives have no block descriptors, 1306 * and defines no device-specific parameter. */ 1307 dev_specific_param = 0x00; 1308 dbd = true; 1309 } 1310 1311 if (r->req.cmd.buf[0] == MODE_SENSE) { 1312 p[1] = 0; /* Default media type. */ 1313 p[2] = dev_specific_param; 1314 p[3] = 0; /* Block descriptor length. */ 1315 p += 4; 1316 } else { /* MODE_SENSE_10 */ 1317 p[2] = 0; /* Default media type. */ 1318 p[3] = dev_specific_param; 1319 p[6] = p[7] = 0; /* Block descriptor length. */ 1320 p += 8; 1321 } 1322 1323 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1324 if (!dbd && nb_sectors) { 1325 if (r->req.cmd.buf[0] == MODE_SENSE) { 1326 outbuf[3] = 8; /* Block descriptor length */ 1327 } else { /* MODE_SENSE_10 */ 1328 outbuf[7] = 8; /* Block descriptor length */ 1329 } 1330 nb_sectors /= (s->qdev.blocksize / 512); 1331 if (nb_sectors > 0xffffff) { 1332 nb_sectors = 0; 1333 } 1334 p[0] = 0; /* media density code */ 1335 p[1] = (nb_sectors >> 16) & 0xff; 1336 p[2] = (nb_sectors >> 8) & 0xff; 1337 p[3] = nb_sectors & 0xff; 1338 p[4] = 0; /* reserved */ 1339 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1340 p[6] = s->qdev.blocksize >> 8; 1341 p[7] = 0; 1342 p += 8; 1343 } 1344 1345 if (page_control == 3) { 1346 /* Saved Values */ 1347 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1348 return -1; 1349 } 1350 1351 if (page == 0x3f) { 1352 for (page = 0; page <= 0x3e; page++) { 1353 mode_sense_page(s, page, &p, page_control); 1354 } 1355 } else { 1356 ret = mode_sense_page(s, page, &p, page_control); 1357 if (ret == -1) { 1358 return -1; 1359 } 1360 } 1361 1362 buflen = p - outbuf; 1363 /* 1364 * The mode data length field specifies the length in bytes of the 1365 * following data that is available to be transferred. The mode data 1366 * length does not include itself. 1367 */ 1368 if (r->req.cmd.buf[0] == MODE_SENSE) { 1369 outbuf[0] = buflen - 1; 1370 } else { /* MODE_SENSE_10 */ 1371 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1372 outbuf[1] = (buflen - 2) & 0xff; 1373 } 1374 return buflen; 1375 } 1376 1377 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1378 { 1379 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1380 int start_track, format, msf, toclen; 1381 uint64_t nb_sectors; 1382 1383 msf = req->cmd.buf[1] & 2; 1384 format = req->cmd.buf[2] & 0xf; 1385 start_track = req->cmd.buf[6]; 1386 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1387 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1); 1388 nb_sectors /= s->qdev.blocksize / 512; 1389 switch (format) { 1390 case 0: 1391 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1392 break; 1393 case 1: 1394 /* multi session : only a single session defined */ 1395 toclen = 12; 1396 memset(outbuf, 0, 12); 1397 outbuf[1] = 0x0a; 1398 outbuf[2] = 0x01; 1399 outbuf[3] = 0x01; 1400 break; 1401 case 2: 1402 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1403 break; 1404 default: 1405 return -1; 1406 } 1407 return toclen; 1408 } 1409 1410 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1411 { 1412 SCSIRequest *req = &r->req; 1413 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1414 bool start = req->cmd.buf[4] & 1; 1415 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1416 int pwrcnd = req->cmd.buf[4] & 0xf0; 1417 1418 if (pwrcnd) { 1419 /* eject/load only happens for power condition == 0 */ 1420 return 0; 1421 } 1422 1423 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1424 if (!start && !s->tray_open && s->tray_locked) { 1425 scsi_check_condition(r, 1426 blk_is_inserted(s->qdev.conf.blk) 1427 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1428 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1429 return -1; 1430 } 1431 1432 if (s->tray_open != !start) { 1433 blk_eject(s->qdev.conf.blk, !start); 1434 s->tray_open = !start; 1435 } 1436 } 1437 return 0; 1438 } 1439 1440 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1441 { 1442 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1443 int buflen = r->iov.iov_len; 1444 1445 if (buflen) { 1446 DPRINTF("Read buf_len=%d\n", buflen); 1447 r->iov.iov_len = 0; 1448 r->started = true; 1449 scsi_req_data(&r->req, buflen); 1450 return; 1451 } 1452 1453 /* This also clears the sense buffer for REQUEST SENSE. */ 1454 scsi_req_complete(&r->req, GOOD); 1455 } 1456 1457 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1458 uint8_t *inbuf, int inlen) 1459 { 1460 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1461 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1462 uint8_t *p; 1463 int len, expected_len, changeable_len, i; 1464 1465 /* The input buffer does not include the page header, so it is 1466 * off by 2 bytes. 1467 */ 1468 expected_len = inlen + 2; 1469 if (expected_len > SCSI_MAX_MODE_LEN) { 1470 return -1; 1471 } 1472 1473 p = mode_current; 1474 memset(mode_current, 0, inlen + 2); 1475 len = mode_sense_page(s, page, &p, 0); 1476 if (len < 0 || len != expected_len) { 1477 return -1; 1478 } 1479 1480 p = mode_changeable; 1481 memset(mode_changeable, 0, inlen + 2); 1482 changeable_len = mode_sense_page(s, page, &p, 1); 1483 assert(changeable_len == len); 1484 1485 /* Check that unchangeable bits are the same as what MODE SENSE 1486 * would return. 1487 */ 1488 for (i = 2; i < len; i++) { 1489 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1490 return -1; 1491 } 1492 } 1493 return 0; 1494 } 1495 1496 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1497 { 1498 switch (page) { 1499 case MODE_PAGE_CACHING: 1500 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1501 break; 1502 1503 default: 1504 break; 1505 } 1506 } 1507 1508 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1509 { 1510 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1511 1512 while (len > 0) { 1513 int page, subpage, page_len; 1514 1515 /* Parse both possible formats for the mode page headers. */ 1516 page = p[0] & 0x3f; 1517 if (p[0] & 0x40) { 1518 if (len < 4) { 1519 goto invalid_param_len; 1520 } 1521 subpage = p[1]; 1522 page_len = lduw_be_p(&p[2]); 1523 p += 4; 1524 len -= 4; 1525 } else { 1526 if (len < 2) { 1527 goto invalid_param_len; 1528 } 1529 subpage = 0; 1530 page_len = p[1]; 1531 p += 2; 1532 len -= 2; 1533 } 1534 1535 if (subpage) { 1536 goto invalid_param; 1537 } 1538 if (page_len > len) { 1539 goto invalid_param_len; 1540 } 1541 1542 if (!change) { 1543 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1544 goto invalid_param; 1545 } 1546 } else { 1547 scsi_disk_apply_mode_select(s, page, p); 1548 } 1549 1550 p += page_len; 1551 len -= page_len; 1552 } 1553 return 0; 1554 1555 invalid_param: 1556 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1557 return -1; 1558 1559 invalid_param_len: 1560 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1561 return -1; 1562 } 1563 1564 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1565 { 1566 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1567 uint8_t *p = inbuf; 1568 int cmd = r->req.cmd.buf[0]; 1569 int len = r->req.cmd.xfer; 1570 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1571 int bd_len; 1572 int pass; 1573 1574 /* We only support PF=1, SP=0. */ 1575 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1576 goto invalid_field; 1577 } 1578 1579 if (len < hdr_len) { 1580 goto invalid_param_len; 1581 } 1582 1583 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1584 len -= hdr_len; 1585 p += hdr_len; 1586 if (len < bd_len) { 1587 goto invalid_param_len; 1588 } 1589 if (bd_len != 0 && bd_len != 8) { 1590 goto invalid_param; 1591 } 1592 1593 len -= bd_len; 1594 p += bd_len; 1595 1596 /* Ensure no change is made if there is an error! */ 1597 for (pass = 0; pass < 2; pass++) { 1598 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1599 assert(pass == 0); 1600 return; 1601 } 1602 } 1603 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1604 /* The request is used as the AIO opaque value, so add a ref. */ 1605 scsi_req_ref(&r->req); 1606 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1607 BLOCK_ACCT_FLUSH); 1608 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1609 return; 1610 } 1611 1612 scsi_req_complete(&r->req, GOOD); 1613 return; 1614 1615 invalid_param: 1616 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1617 return; 1618 1619 invalid_param_len: 1620 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1621 return; 1622 1623 invalid_field: 1624 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1625 } 1626 1627 static inline bool check_lba_range(SCSIDiskState *s, 1628 uint64_t sector_num, uint32_t nb_sectors) 1629 { 1630 /* 1631 * The first line tests that no overflow happens when computing the last 1632 * sector. The second line tests that the last accessed sector is in 1633 * range. 1634 * 1635 * Careful, the computations should not underflow for nb_sectors == 0, 1636 * and a 0-block read to the first LBA beyond the end of device is 1637 * valid. 1638 */ 1639 return (sector_num <= sector_num + nb_sectors && 1640 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1641 } 1642 1643 typedef struct UnmapCBData { 1644 SCSIDiskReq *r; 1645 uint8_t *inbuf; 1646 int count; 1647 } UnmapCBData; 1648 1649 static void scsi_unmap_complete(void *opaque, int ret); 1650 1651 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1652 { 1653 SCSIDiskReq *r = data->r; 1654 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1655 uint64_t sector_num; 1656 uint32_t nb_sectors; 1657 1658 assert(r->req.aiocb == NULL); 1659 if (scsi_disk_req_check_error(r, ret, false)) { 1660 goto done; 1661 } 1662 1663 if (data->count > 0) { 1664 sector_num = ldq_be_p(&data->inbuf[0]); 1665 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1666 if (!check_lba_range(s, sector_num, nb_sectors)) { 1667 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1668 goto done; 1669 } 1670 1671 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1672 sector_num * s->qdev.blocksize, 1673 nb_sectors * s->qdev.blocksize, 1674 scsi_unmap_complete, data); 1675 data->count--; 1676 data->inbuf += 16; 1677 return; 1678 } 1679 1680 scsi_req_complete(&r->req, GOOD); 1681 1682 done: 1683 scsi_req_unref(&r->req); 1684 g_free(data); 1685 } 1686 1687 static void scsi_unmap_complete(void *opaque, int ret) 1688 { 1689 UnmapCBData *data = opaque; 1690 SCSIDiskReq *r = data->r; 1691 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1692 1693 assert(r->req.aiocb != NULL); 1694 r->req.aiocb = NULL; 1695 1696 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1697 scsi_unmap_complete_noio(data, ret); 1698 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1699 } 1700 1701 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1702 { 1703 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1704 uint8_t *p = inbuf; 1705 int len = r->req.cmd.xfer; 1706 UnmapCBData *data; 1707 1708 /* Reject ANCHOR=1. */ 1709 if (r->req.cmd.buf[1] & 0x1) { 1710 goto invalid_field; 1711 } 1712 1713 if (len < 8) { 1714 goto invalid_param_len; 1715 } 1716 if (len < lduw_be_p(&p[0]) + 2) { 1717 goto invalid_param_len; 1718 } 1719 if (len < lduw_be_p(&p[2]) + 8) { 1720 goto invalid_param_len; 1721 } 1722 if (lduw_be_p(&p[2]) & 15) { 1723 goto invalid_param_len; 1724 } 1725 1726 if (blk_is_read_only(s->qdev.conf.blk)) { 1727 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1728 return; 1729 } 1730 1731 data = g_new0(UnmapCBData, 1); 1732 data->r = r; 1733 data->inbuf = &p[8]; 1734 data->count = lduw_be_p(&p[2]) >> 4; 1735 1736 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1737 scsi_req_ref(&r->req); 1738 scsi_unmap_complete_noio(data, 0); 1739 return; 1740 1741 invalid_param_len: 1742 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1743 return; 1744 1745 invalid_field: 1746 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1747 } 1748 1749 typedef struct WriteSameCBData { 1750 SCSIDiskReq *r; 1751 int64_t sector; 1752 int nb_sectors; 1753 QEMUIOVector qiov; 1754 struct iovec iov; 1755 } WriteSameCBData; 1756 1757 static void scsi_write_same_complete(void *opaque, int ret) 1758 { 1759 WriteSameCBData *data = opaque; 1760 SCSIDiskReq *r = data->r; 1761 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1762 1763 assert(r->req.aiocb != NULL); 1764 r->req.aiocb = NULL; 1765 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1766 if (scsi_disk_req_check_error(r, ret, true)) { 1767 goto done; 1768 } 1769 1770 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1771 1772 data->nb_sectors -= data->iov.iov_len / 512; 1773 data->sector += data->iov.iov_len / 512; 1774 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1775 if (data->iov.iov_len) { 1776 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1777 data->iov.iov_len, BLOCK_ACCT_WRITE); 1778 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1779 * where final qiov may need smaller size */ 1780 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1781 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1782 data->sector << BDRV_SECTOR_BITS, 1783 &data->qiov, 0, 1784 scsi_write_same_complete, data); 1785 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1786 return; 1787 } 1788 1789 scsi_req_complete(&r->req, GOOD); 1790 1791 done: 1792 scsi_req_unref(&r->req); 1793 qemu_vfree(data->iov.iov_base); 1794 g_free(data); 1795 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1796 } 1797 1798 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1799 { 1800 SCSIRequest *req = &r->req; 1801 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1802 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1803 WriteSameCBData *data; 1804 uint8_t *buf; 1805 int i; 1806 1807 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1808 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1809 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1810 return; 1811 } 1812 1813 if (blk_is_read_only(s->qdev.conf.blk)) { 1814 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1815 return; 1816 } 1817 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1818 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1819 return; 1820 } 1821 1822 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1823 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1824 1825 /* The request is used as the AIO opaque value, so add a ref. */ 1826 scsi_req_ref(&r->req); 1827 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1828 nb_sectors * s->qdev.blocksize, 1829 BLOCK_ACCT_WRITE); 1830 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1831 r->req.cmd.lba * s->qdev.blocksize, 1832 nb_sectors * s->qdev.blocksize, 1833 flags, scsi_aio_complete, r); 1834 return; 1835 } 1836 1837 data = g_new0(WriteSameCBData, 1); 1838 data->r = r; 1839 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1840 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1841 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1842 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1843 data->iov.iov_len); 1844 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1845 1846 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1847 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1848 } 1849 1850 scsi_req_ref(&r->req); 1851 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1852 data->iov.iov_len, BLOCK_ACCT_WRITE); 1853 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1854 data->sector << BDRV_SECTOR_BITS, 1855 &data->qiov, 0, 1856 scsi_write_same_complete, data); 1857 } 1858 1859 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1860 { 1861 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1862 1863 if (r->iov.iov_len) { 1864 int buflen = r->iov.iov_len; 1865 DPRINTF("Write buf_len=%d\n", buflen); 1866 r->iov.iov_len = 0; 1867 scsi_req_data(&r->req, buflen); 1868 return; 1869 } 1870 1871 switch (req->cmd.buf[0]) { 1872 case MODE_SELECT: 1873 case MODE_SELECT_10: 1874 /* This also clears the sense buffer for REQUEST SENSE. */ 1875 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1876 break; 1877 1878 case UNMAP: 1879 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1880 break; 1881 1882 case VERIFY_10: 1883 case VERIFY_12: 1884 case VERIFY_16: 1885 if (r->req.status == -1) { 1886 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1887 } 1888 break; 1889 1890 case WRITE_SAME_10: 1891 case WRITE_SAME_16: 1892 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1893 break; 1894 1895 default: 1896 abort(); 1897 } 1898 } 1899 1900 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1901 { 1902 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1903 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1904 uint64_t nb_sectors; 1905 uint8_t *outbuf; 1906 int buflen; 1907 1908 switch (req->cmd.buf[0]) { 1909 case INQUIRY: 1910 case MODE_SENSE: 1911 case MODE_SENSE_10: 1912 case RESERVE: 1913 case RESERVE_10: 1914 case RELEASE: 1915 case RELEASE_10: 1916 case START_STOP: 1917 case ALLOW_MEDIUM_REMOVAL: 1918 case GET_CONFIGURATION: 1919 case GET_EVENT_STATUS_NOTIFICATION: 1920 case MECHANISM_STATUS: 1921 case REQUEST_SENSE: 1922 break; 1923 1924 default: 1925 if (!blk_is_available(s->qdev.conf.blk)) { 1926 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1927 return 0; 1928 } 1929 break; 1930 } 1931 1932 /* 1933 * FIXME: we shouldn't return anything bigger than 4k, but the code 1934 * requires the buffer to be as big as req->cmd.xfer in several 1935 * places. So, do not allow CDBs with a very large ALLOCATION 1936 * LENGTH. The real fix would be to modify scsi_read_data and 1937 * dma_buf_read, so that they return data beyond the buflen 1938 * as all zeros. 1939 */ 1940 if (req->cmd.xfer > 65536) { 1941 goto illegal_request; 1942 } 1943 r->buflen = MAX(4096, req->cmd.xfer); 1944 1945 if (!r->iov.iov_base) { 1946 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1947 } 1948 1949 buflen = req->cmd.xfer; 1950 outbuf = r->iov.iov_base; 1951 memset(outbuf, 0, r->buflen); 1952 switch (req->cmd.buf[0]) { 1953 case TEST_UNIT_READY: 1954 assert(blk_is_available(s->qdev.conf.blk)); 1955 break; 1956 case INQUIRY: 1957 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1958 if (buflen < 0) { 1959 goto illegal_request; 1960 } 1961 break; 1962 case MODE_SENSE: 1963 case MODE_SENSE_10: 1964 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1965 if (buflen < 0) { 1966 goto illegal_request; 1967 } 1968 break; 1969 case READ_TOC: 1970 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1971 if (buflen < 0) { 1972 goto illegal_request; 1973 } 1974 break; 1975 case RESERVE: 1976 if (req->cmd.buf[1] & 1) { 1977 goto illegal_request; 1978 } 1979 break; 1980 case RESERVE_10: 1981 if (req->cmd.buf[1] & 3) { 1982 goto illegal_request; 1983 } 1984 break; 1985 case RELEASE: 1986 if (req->cmd.buf[1] & 1) { 1987 goto illegal_request; 1988 } 1989 break; 1990 case RELEASE_10: 1991 if (req->cmd.buf[1] & 3) { 1992 goto illegal_request; 1993 } 1994 break; 1995 case START_STOP: 1996 if (scsi_disk_emulate_start_stop(r) < 0) { 1997 return 0; 1998 } 1999 break; 2000 case ALLOW_MEDIUM_REMOVAL: 2001 s->tray_locked = req->cmd.buf[4] & 1; 2002 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 2003 break; 2004 case READ_CAPACITY_10: 2005 /* The normal LEN field for this command is zero. */ 2006 memset(outbuf, 0, 8); 2007 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2008 if (!nb_sectors) { 2009 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2010 return 0; 2011 } 2012 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 2013 goto illegal_request; 2014 } 2015 nb_sectors /= s->qdev.blocksize / 512; 2016 /* Returned value is the address of the last sector. */ 2017 nb_sectors--; 2018 /* Remember the new size for read/write sanity checking. */ 2019 s->qdev.max_lba = nb_sectors; 2020 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 2021 if (nb_sectors > UINT32_MAX) { 2022 nb_sectors = UINT32_MAX; 2023 } 2024 outbuf[0] = (nb_sectors >> 24) & 0xff; 2025 outbuf[1] = (nb_sectors >> 16) & 0xff; 2026 outbuf[2] = (nb_sectors >> 8) & 0xff; 2027 outbuf[3] = nb_sectors & 0xff; 2028 outbuf[4] = 0; 2029 outbuf[5] = 0; 2030 outbuf[6] = s->qdev.blocksize >> 8; 2031 outbuf[7] = 0; 2032 break; 2033 case REQUEST_SENSE: 2034 /* Just return "NO SENSE". */ 2035 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2036 (req->cmd.buf[1] & 1) == 0); 2037 if (buflen < 0) { 2038 goto illegal_request; 2039 } 2040 break; 2041 case MECHANISM_STATUS: 2042 buflen = scsi_emulate_mechanism_status(s, outbuf); 2043 if (buflen < 0) { 2044 goto illegal_request; 2045 } 2046 break; 2047 case GET_CONFIGURATION: 2048 buflen = scsi_get_configuration(s, outbuf); 2049 if (buflen < 0) { 2050 goto illegal_request; 2051 } 2052 break; 2053 case GET_EVENT_STATUS_NOTIFICATION: 2054 buflen = scsi_get_event_status_notification(s, r, outbuf); 2055 if (buflen < 0) { 2056 goto illegal_request; 2057 } 2058 break; 2059 case READ_DISC_INFORMATION: 2060 buflen = scsi_read_disc_information(s, r, outbuf); 2061 if (buflen < 0) { 2062 goto illegal_request; 2063 } 2064 break; 2065 case READ_DVD_STRUCTURE: 2066 buflen = scsi_read_dvd_structure(s, r, outbuf); 2067 if (buflen < 0) { 2068 goto illegal_request; 2069 } 2070 break; 2071 case SERVICE_ACTION_IN_16: 2072 /* Service Action In subcommands. */ 2073 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2074 DPRINTF("SAI READ CAPACITY(16)\n"); 2075 memset(outbuf, 0, req->cmd.xfer); 2076 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2077 if (!nb_sectors) { 2078 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2079 return 0; 2080 } 2081 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2082 goto illegal_request; 2083 } 2084 nb_sectors /= s->qdev.blocksize / 512; 2085 /* Returned value is the address of the last sector. */ 2086 nb_sectors--; 2087 /* Remember the new size for read/write sanity checking. */ 2088 s->qdev.max_lba = nb_sectors; 2089 outbuf[0] = (nb_sectors >> 56) & 0xff; 2090 outbuf[1] = (nb_sectors >> 48) & 0xff; 2091 outbuf[2] = (nb_sectors >> 40) & 0xff; 2092 outbuf[3] = (nb_sectors >> 32) & 0xff; 2093 outbuf[4] = (nb_sectors >> 24) & 0xff; 2094 outbuf[5] = (nb_sectors >> 16) & 0xff; 2095 outbuf[6] = (nb_sectors >> 8) & 0xff; 2096 outbuf[7] = nb_sectors & 0xff; 2097 outbuf[8] = 0; 2098 outbuf[9] = 0; 2099 outbuf[10] = s->qdev.blocksize >> 8; 2100 outbuf[11] = 0; 2101 outbuf[12] = 0; 2102 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2103 2104 /* set TPE bit if the format supports discard */ 2105 if (s->qdev.conf.discard_granularity) { 2106 outbuf[14] = 0x80; 2107 } 2108 2109 /* Protection, exponent and lowest lba field left blank. */ 2110 break; 2111 } 2112 DPRINTF("Unsupported Service Action In\n"); 2113 goto illegal_request; 2114 case SYNCHRONIZE_CACHE: 2115 /* The request is used as the AIO opaque value, so add a ref. */ 2116 scsi_req_ref(&r->req); 2117 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2118 BLOCK_ACCT_FLUSH); 2119 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2120 return 0; 2121 case SEEK_10: 2122 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba); 2123 if (r->req.cmd.lba > s->qdev.max_lba) { 2124 goto illegal_lba; 2125 } 2126 break; 2127 case MODE_SELECT: 2128 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2129 break; 2130 case MODE_SELECT_10: 2131 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2132 break; 2133 case UNMAP: 2134 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2135 break; 2136 case VERIFY_10: 2137 case VERIFY_12: 2138 case VERIFY_16: 2139 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3); 2140 if (req->cmd.buf[1] & 6) { 2141 goto illegal_request; 2142 } 2143 break; 2144 case WRITE_SAME_10: 2145 case WRITE_SAME_16: 2146 DPRINTF("WRITE SAME %d (len %lu)\n", 2147 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, 2148 (unsigned long)r->req.cmd.xfer); 2149 break; 2150 default: 2151 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0], 2152 scsi_command_name(buf[0])); 2153 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2154 return 0; 2155 } 2156 assert(!r->req.aiocb); 2157 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2158 if (r->iov.iov_len == 0) { 2159 scsi_req_complete(&r->req, GOOD); 2160 } 2161 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2162 assert(r->iov.iov_len == req->cmd.xfer); 2163 return -r->iov.iov_len; 2164 } else { 2165 return r->iov.iov_len; 2166 } 2167 2168 illegal_request: 2169 if (r->req.status == -1) { 2170 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2171 } 2172 return 0; 2173 2174 illegal_lba: 2175 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2176 return 0; 2177 } 2178 2179 /* Execute a scsi command. Returns the length of the data expected by the 2180 command. This will be Positive for data transfers from the device 2181 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2182 and zero if the command does not transfer any data. */ 2183 2184 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2185 { 2186 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2187 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2188 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2189 uint32_t len; 2190 uint8_t command; 2191 2192 command = buf[0]; 2193 2194 if (!blk_is_available(s->qdev.conf.blk)) { 2195 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2196 return 0; 2197 } 2198 2199 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2200 switch (command) { 2201 case READ_6: 2202 case READ_10: 2203 case READ_12: 2204 case READ_16: 2205 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len); 2206 /* Protection information is not supported. For SCSI versions 2 and 2207 * older (as determined by snooping the guest's INQUIRY commands), 2208 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2209 */ 2210 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2211 goto illegal_request; 2212 } 2213 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2214 goto illegal_lba; 2215 } 2216 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2217 r->sector_count = len * (s->qdev.blocksize / 512); 2218 break; 2219 case WRITE_6: 2220 case WRITE_10: 2221 case WRITE_12: 2222 case WRITE_16: 2223 case WRITE_VERIFY_10: 2224 case WRITE_VERIFY_12: 2225 case WRITE_VERIFY_16: 2226 if (blk_is_read_only(s->qdev.conf.blk)) { 2227 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2228 return 0; 2229 } 2230 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n", 2231 (command & 0xe) == 0xe ? "And Verify " : "", 2232 r->req.cmd.lba, len); 2233 /* fall through */ 2234 case VERIFY_10: 2235 case VERIFY_12: 2236 case VERIFY_16: 2237 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2238 * As far as DMA is concerned, we can treat it the same as a write; 2239 * scsi_block_do_sgio will send VERIFY commands. 2240 */ 2241 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2242 goto illegal_request; 2243 } 2244 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2245 goto illegal_lba; 2246 } 2247 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2248 r->sector_count = len * (s->qdev.blocksize / 512); 2249 break; 2250 default: 2251 abort(); 2252 illegal_request: 2253 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2254 return 0; 2255 illegal_lba: 2256 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2257 return 0; 2258 } 2259 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2260 if (r->sector_count == 0) { 2261 scsi_req_complete(&r->req, GOOD); 2262 } 2263 assert(r->iov.iov_len == 0); 2264 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2265 return -r->sector_count * 512; 2266 } else { 2267 return r->sector_count * 512; 2268 } 2269 } 2270 2271 static void scsi_disk_reset(DeviceState *dev) 2272 { 2273 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2274 uint64_t nb_sectors; 2275 2276 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2277 2278 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2279 nb_sectors /= s->qdev.blocksize / 512; 2280 if (nb_sectors) { 2281 nb_sectors--; 2282 } 2283 s->qdev.max_lba = nb_sectors; 2284 /* reset tray statuses */ 2285 s->tray_locked = 0; 2286 s->tray_open = 0; 2287 2288 s->qdev.scsi_version = s->qdev.default_scsi_version; 2289 } 2290 2291 static void scsi_disk_resize_cb(void *opaque) 2292 { 2293 SCSIDiskState *s = opaque; 2294 2295 /* SPC lists this sense code as available only for 2296 * direct-access devices. 2297 */ 2298 if (s->qdev.type == TYPE_DISK) { 2299 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2300 } 2301 } 2302 2303 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2304 { 2305 SCSIDiskState *s = opaque; 2306 2307 /* 2308 * When a CD gets changed, we have to report an ejected state and 2309 * then a loaded state to guests so that they detect tray 2310 * open/close and media change events. Guests that do not use 2311 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2312 * states rely on this behavior. 2313 * 2314 * media_changed governs the state machine used for unit attention 2315 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2316 */ 2317 s->media_changed = load; 2318 s->tray_open = !load; 2319 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2320 s->media_event = true; 2321 s->eject_request = false; 2322 } 2323 2324 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2325 { 2326 SCSIDiskState *s = opaque; 2327 2328 s->eject_request = true; 2329 if (force) { 2330 s->tray_locked = false; 2331 } 2332 } 2333 2334 static bool scsi_cd_is_tray_open(void *opaque) 2335 { 2336 return ((SCSIDiskState *)opaque)->tray_open; 2337 } 2338 2339 static bool scsi_cd_is_medium_locked(void *opaque) 2340 { 2341 return ((SCSIDiskState *)opaque)->tray_locked; 2342 } 2343 2344 static const BlockDevOps scsi_disk_removable_block_ops = { 2345 .change_media_cb = scsi_cd_change_media_cb, 2346 .eject_request_cb = scsi_cd_eject_request_cb, 2347 .is_tray_open = scsi_cd_is_tray_open, 2348 .is_medium_locked = scsi_cd_is_medium_locked, 2349 2350 .resize_cb = scsi_disk_resize_cb, 2351 }; 2352 2353 static const BlockDevOps scsi_disk_block_ops = { 2354 .resize_cb = scsi_disk_resize_cb, 2355 }; 2356 2357 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2358 { 2359 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2360 if (s->media_changed) { 2361 s->media_changed = false; 2362 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2363 } 2364 } 2365 2366 static void scsi_realize(SCSIDevice *dev, Error **errp) 2367 { 2368 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2369 2370 if (!s->qdev.conf.blk) { 2371 error_setg(errp, "drive property not set"); 2372 return; 2373 } 2374 2375 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2376 !blk_is_inserted(s->qdev.conf.blk)) { 2377 error_setg(errp, "Device needs media, but drive is empty"); 2378 return; 2379 } 2380 2381 blkconf_blocksizes(&s->qdev.conf); 2382 2383 if (s->qdev.conf.logical_block_size > 2384 s->qdev.conf.physical_block_size) { 2385 error_setg(errp, 2386 "logical_block_size > physical_block_size not supported"); 2387 return; 2388 } 2389 2390 if (dev->type == TYPE_DISK) { 2391 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2392 return; 2393 } 2394 } 2395 if (!blkconf_apply_backend_options(&dev->conf, 2396 blk_is_read_only(s->qdev.conf.blk), 2397 dev->type == TYPE_DISK, errp)) { 2398 return; 2399 } 2400 2401 if (s->qdev.conf.discard_granularity == -1) { 2402 s->qdev.conf.discard_granularity = 2403 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2404 } 2405 2406 if (!s->version) { 2407 s->version = g_strdup(qemu_hw_version()); 2408 } 2409 if (!s->vendor) { 2410 s->vendor = g_strdup("QEMU"); 2411 } 2412 2413 if (blk_is_sg(s->qdev.conf.blk)) { 2414 error_setg(errp, "unwanted /dev/sg*"); 2415 return; 2416 } 2417 2418 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2419 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2420 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2421 } else { 2422 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2423 } 2424 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2425 2426 blk_iostatus_enable(s->qdev.conf.blk); 2427 } 2428 2429 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2430 { 2431 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2432 /* can happen for devices without drive. The error message for missing 2433 * backend will be issued in scsi_realize 2434 */ 2435 if (s->qdev.conf.blk) { 2436 blkconf_blocksizes(&s->qdev.conf); 2437 } 2438 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2439 s->qdev.type = TYPE_DISK; 2440 if (!s->product) { 2441 s->product = g_strdup("QEMU HARDDISK"); 2442 } 2443 scsi_realize(&s->qdev, errp); 2444 } 2445 2446 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2447 { 2448 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2449 int ret; 2450 2451 if (!dev->conf.blk) { 2452 /* Anonymous BlockBackend for an empty drive. As we put it into 2453 * dev->conf, qdev takes care of detaching on unplug. */ 2454 dev->conf.blk = blk_new(0, BLK_PERM_ALL); 2455 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2456 assert(ret == 0); 2457 } 2458 2459 s->qdev.blocksize = 2048; 2460 s->qdev.type = TYPE_ROM; 2461 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2462 if (!s->product) { 2463 s->product = g_strdup("QEMU CD-ROM"); 2464 } 2465 scsi_realize(&s->qdev, errp); 2466 } 2467 2468 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2469 { 2470 DriveInfo *dinfo; 2471 Error *local_err = NULL; 2472 2473 if (!dev->conf.blk) { 2474 scsi_realize(dev, &local_err); 2475 assert(local_err); 2476 error_propagate(errp, local_err); 2477 return; 2478 } 2479 2480 dinfo = blk_legacy_dinfo(dev->conf.blk); 2481 if (dinfo && dinfo->media_cd) { 2482 scsi_cd_realize(dev, errp); 2483 } else { 2484 scsi_hd_realize(dev, errp); 2485 } 2486 } 2487 2488 static const SCSIReqOps scsi_disk_emulate_reqops = { 2489 .size = sizeof(SCSIDiskReq), 2490 .free_req = scsi_free_request, 2491 .send_command = scsi_disk_emulate_command, 2492 .read_data = scsi_disk_emulate_read_data, 2493 .write_data = scsi_disk_emulate_write_data, 2494 .get_buf = scsi_get_buf, 2495 }; 2496 2497 static const SCSIReqOps scsi_disk_dma_reqops = { 2498 .size = sizeof(SCSIDiskReq), 2499 .free_req = scsi_free_request, 2500 .send_command = scsi_disk_dma_command, 2501 .read_data = scsi_read_data, 2502 .write_data = scsi_write_data, 2503 .get_buf = scsi_get_buf, 2504 .load_request = scsi_disk_load_request, 2505 .save_request = scsi_disk_save_request, 2506 }; 2507 2508 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2509 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2510 [INQUIRY] = &scsi_disk_emulate_reqops, 2511 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2512 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2513 [START_STOP] = &scsi_disk_emulate_reqops, 2514 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2515 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2516 [READ_TOC] = &scsi_disk_emulate_reqops, 2517 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2518 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2519 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2520 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2521 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2522 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2523 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2524 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2525 [SEEK_10] = &scsi_disk_emulate_reqops, 2526 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2527 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2528 [UNMAP] = &scsi_disk_emulate_reqops, 2529 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2530 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2531 [VERIFY_10] = &scsi_disk_emulate_reqops, 2532 [VERIFY_12] = &scsi_disk_emulate_reqops, 2533 [VERIFY_16] = &scsi_disk_emulate_reqops, 2534 2535 [READ_6] = &scsi_disk_dma_reqops, 2536 [READ_10] = &scsi_disk_dma_reqops, 2537 [READ_12] = &scsi_disk_dma_reqops, 2538 [READ_16] = &scsi_disk_dma_reqops, 2539 [WRITE_6] = &scsi_disk_dma_reqops, 2540 [WRITE_10] = &scsi_disk_dma_reqops, 2541 [WRITE_12] = &scsi_disk_dma_reqops, 2542 [WRITE_16] = &scsi_disk_dma_reqops, 2543 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2544 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2545 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2546 }; 2547 2548 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2549 uint8_t *buf, void *hba_private) 2550 { 2551 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2552 SCSIRequest *req; 2553 const SCSIReqOps *ops; 2554 uint8_t command; 2555 2556 command = buf[0]; 2557 ops = scsi_disk_reqops_dispatch[command]; 2558 if (!ops) { 2559 ops = &scsi_disk_emulate_reqops; 2560 } 2561 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2562 2563 #ifdef DEBUG_SCSI 2564 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]); 2565 { 2566 int i; 2567 for (i = 1; i < scsi_cdb_length(buf); i++) { 2568 printf(" 0x%02x", buf[i]); 2569 } 2570 printf("\n"); 2571 } 2572 #endif 2573 2574 return req; 2575 } 2576 2577 #ifdef __linux__ 2578 static int get_device_type(SCSIDiskState *s) 2579 { 2580 uint8_t cmd[16]; 2581 uint8_t buf[36]; 2582 int ret; 2583 2584 memset(cmd, 0, sizeof(cmd)); 2585 memset(buf, 0, sizeof(buf)); 2586 cmd[0] = INQUIRY; 2587 cmd[4] = sizeof(buf); 2588 2589 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2590 buf, sizeof(buf)); 2591 if (ret < 0) { 2592 return -1; 2593 } 2594 s->qdev.type = buf[0]; 2595 if (buf[1] & 0x80) { 2596 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2597 } 2598 return 0; 2599 } 2600 2601 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2602 { 2603 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2604 int sg_version; 2605 int rc; 2606 2607 if (!s->qdev.conf.blk) { 2608 error_setg(errp, "drive property not set"); 2609 return; 2610 } 2611 2612 /* check we are using a driver managing SG_IO (version 3 and after) */ 2613 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2614 if (rc < 0) { 2615 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2616 if (rc != -EPERM) { 2617 error_append_hint(errp, "Is this a SCSI device?\n"); 2618 } 2619 return; 2620 } 2621 if (sg_version < 30000) { 2622 error_setg(errp, "scsi generic interface too old"); 2623 return; 2624 } 2625 2626 /* get device type from INQUIRY data */ 2627 rc = get_device_type(s); 2628 if (rc < 0) { 2629 error_setg(errp, "INQUIRY failed"); 2630 return; 2631 } 2632 2633 /* Make a guess for the block size, we'll fix it when the guest sends. 2634 * READ CAPACITY. If they don't, they likely would assume these sizes 2635 * anyway. (TODO: check in /sys). 2636 */ 2637 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2638 s->qdev.blocksize = 2048; 2639 } else { 2640 s->qdev.blocksize = 512; 2641 } 2642 2643 /* Makes the scsi-block device not removable by using HMP and QMP eject 2644 * command. 2645 */ 2646 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2647 2648 scsi_realize(&s->qdev, errp); 2649 scsi_generic_read_device_inquiry(&s->qdev); 2650 } 2651 2652 typedef struct SCSIBlockReq { 2653 SCSIDiskReq req; 2654 sg_io_hdr_t io_header; 2655 2656 /* Selected bytes of the original CDB, copied into our own CDB. */ 2657 uint8_t cmd, cdb1, group_number; 2658 2659 /* CDB passed to SG_IO. */ 2660 uint8_t cdb[16]; 2661 } SCSIBlockReq; 2662 2663 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2664 int64_t offset, QEMUIOVector *iov, 2665 int direction, 2666 BlockCompletionFunc *cb, void *opaque) 2667 { 2668 sg_io_hdr_t *io_header = &req->io_header; 2669 SCSIDiskReq *r = &req->req; 2670 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2671 int nb_logical_blocks; 2672 uint64_t lba; 2673 BlockAIOCB *aiocb; 2674 2675 /* This is not supported yet. It can only happen if the guest does 2676 * reads and writes that are not aligned to one logical sectors 2677 * _and_ cover multiple MemoryRegions. 2678 */ 2679 assert(offset % s->qdev.blocksize == 0); 2680 assert(iov->size % s->qdev.blocksize == 0); 2681 2682 io_header->interface_id = 'S'; 2683 2684 /* The data transfer comes from the QEMUIOVector. */ 2685 io_header->dxfer_direction = direction; 2686 io_header->dxfer_len = iov->size; 2687 io_header->dxferp = (void *)iov->iov; 2688 io_header->iovec_count = iov->niov; 2689 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2690 2691 /* Build a new CDB with the LBA and length patched in, in case 2692 * DMA helpers split the transfer in multiple segments. Do not 2693 * build a CDB smaller than what the guest wanted, and only build 2694 * a larger one if strictly necessary. 2695 */ 2696 io_header->cmdp = req->cdb; 2697 lba = offset / s->qdev.blocksize; 2698 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2699 2700 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2701 /* 6-byte CDB */ 2702 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2703 req->cdb[4] = nb_logical_blocks; 2704 req->cdb[5] = 0; 2705 io_header->cmd_len = 6; 2706 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2707 /* 10-byte CDB */ 2708 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2709 req->cdb[1] = req->cdb1; 2710 stl_be_p(&req->cdb[2], lba); 2711 req->cdb[6] = req->group_number; 2712 stw_be_p(&req->cdb[7], nb_logical_blocks); 2713 req->cdb[9] = 0; 2714 io_header->cmd_len = 10; 2715 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2716 /* 12-byte CDB */ 2717 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2718 req->cdb[1] = req->cdb1; 2719 stl_be_p(&req->cdb[2], lba); 2720 stl_be_p(&req->cdb[6], nb_logical_blocks); 2721 req->cdb[10] = req->group_number; 2722 req->cdb[11] = 0; 2723 io_header->cmd_len = 12; 2724 } else { 2725 /* 16-byte CDB */ 2726 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2727 req->cdb[1] = req->cdb1; 2728 stq_be_p(&req->cdb[2], lba); 2729 stl_be_p(&req->cdb[10], nb_logical_blocks); 2730 req->cdb[14] = req->group_number; 2731 req->cdb[15] = 0; 2732 io_header->cmd_len = 16; 2733 } 2734 2735 /* The rest is as in scsi-generic.c. */ 2736 io_header->mx_sb_len = sizeof(r->req.sense); 2737 io_header->sbp = r->req.sense; 2738 io_header->timeout = UINT_MAX; 2739 io_header->usr_ptr = r; 2740 io_header->flags |= SG_FLAG_DIRECT_IO; 2741 2742 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2743 assert(aiocb != NULL); 2744 return aiocb; 2745 } 2746 2747 static bool scsi_block_no_fua(SCSICommand *cmd) 2748 { 2749 return false; 2750 } 2751 2752 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2753 QEMUIOVector *iov, 2754 BlockCompletionFunc *cb, void *cb_opaque, 2755 void *opaque) 2756 { 2757 SCSIBlockReq *r = opaque; 2758 return scsi_block_do_sgio(r, offset, iov, 2759 SG_DXFER_FROM_DEV, cb, cb_opaque); 2760 } 2761 2762 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2763 QEMUIOVector *iov, 2764 BlockCompletionFunc *cb, void *cb_opaque, 2765 void *opaque) 2766 { 2767 SCSIBlockReq *r = opaque; 2768 return scsi_block_do_sgio(r, offset, iov, 2769 SG_DXFER_TO_DEV, cb, cb_opaque); 2770 } 2771 2772 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2773 { 2774 switch (buf[0]) { 2775 case VERIFY_10: 2776 case VERIFY_12: 2777 case VERIFY_16: 2778 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2779 * for the number of logical blocks specified in the length 2780 * field). For other modes, do not use scatter/gather operation. 2781 */ 2782 if ((buf[1] & 6) == 2) { 2783 return false; 2784 } 2785 break; 2786 2787 case READ_6: 2788 case READ_10: 2789 case READ_12: 2790 case READ_16: 2791 case WRITE_6: 2792 case WRITE_10: 2793 case WRITE_12: 2794 case WRITE_16: 2795 case WRITE_VERIFY_10: 2796 case WRITE_VERIFY_12: 2797 case WRITE_VERIFY_16: 2798 /* MMC writing cannot be done via DMA helpers, because it sometimes 2799 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2800 * We might use scsi_block_dma_reqops as long as no writing commands are 2801 * seen, but performance usually isn't paramount on optical media. So, 2802 * just make scsi-block operate the same as scsi-generic for them. 2803 */ 2804 if (s->qdev.type != TYPE_ROM) { 2805 return false; 2806 } 2807 break; 2808 2809 default: 2810 break; 2811 } 2812 2813 return true; 2814 } 2815 2816 2817 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2818 { 2819 SCSIBlockReq *r = (SCSIBlockReq *)req; 2820 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2821 2822 r->cmd = req->cmd.buf[0]; 2823 switch (r->cmd >> 5) { 2824 case 0: 2825 /* 6-byte CDB. */ 2826 r->cdb1 = r->group_number = 0; 2827 break; 2828 case 1: 2829 /* 10-byte CDB. */ 2830 r->cdb1 = req->cmd.buf[1]; 2831 r->group_number = req->cmd.buf[6]; 2832 break; 2833 case 4: 2834 /* 12-byte CDB. */ 2835 r->cdb1 = req->cmd.buf[1]; 2836 r->group_number = req->cmd.buf[10]; 2837 break; 2838 case 5: 2839 /* 16-byte CDB. */ 2840 r->cdb1 = req->cmd.buf[1]; 2841 r->group_number = req->cmd.buf[14]; 2842 break; 2843 default: 2844 abort(); 2845 } 2846 2847 /* Protection information is not supported. For SCSI versions 2 and 2848 * older (as determined by snooping the guest's INQUIRY commands), 2849 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2850 */ 2851 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2852 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2853 return 0; 2854 } 2855 2856 r->req.status = &r->io_header.status; 2857 return scsi_disk_dma_command(req, buf); 2858 } 2859 2860 static const SCSIReqOps scsi_block_dma_reqops = { 2861 .size = sizeof(SCSIBlockReq), 2862 .free_req = scsi_free_request, 2863 .send_command = scsi_block_dma_command, 2864 .read_data = scsi_read_data, 2865 .write_data = scsi_write_data, 2866 .get_buf = scsi_get_buf, 2867 .load_request = scsi_disk_load_request, 2868 .save_request = scsi_disk_save_request, 2869 }; 2870 2871 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2872 uint32_t lun, uint8_t *buf, 2873 void *hba_private) 2874 { 2875 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2876 2877 if (scsi_block_is_passthrough(s, buf)) { 2878 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2879 hba_private); 2880 } else { 2881 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2882 hba_private); 2883 } 2884 } 2885 2886 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2887 uint8_t *buf, void *hba_private) 2888 { 2889 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2890 2891 if (scsi_block_is_passthrough(s, buf)) { 2892 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2893 } else { 2894 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2895 } 2896 } 2897 2898 #endif 2899 2900 static 2901 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2902 BlockCompletionFunc *cb, void *cb_opaque, 2903 void *opaque) 2904 { 2905 SCSIDiskReq *r = opaque; 2906 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2907 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2908 } 2909 2910 static 2911 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2912 BlockCompletionFunc *cb, void *cb_opaque, 2913 void *opaque) 2914 { 2915 SCSIDiskReq *r = opaque; 2916 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2917 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2918 } 2919 2920 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2921 { 2922 DeviceClass *dc = DEVICE_CLASS(klass); 2923 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2924 2925 dc->fw_name = "disk"; 2926 dc->reset = scsi_disk_reset; 2927 sdc->dma_readv = scsi_dma_readv; 2928 sdc->dma_writev = scsi_dma_writev; 2929 sdc->need_fua_emulation = scsi_is_cmd_fua; 2930 } 2931 2932 static const TypeInfo scsi_disk_base_info = { 2933 .name = TYPE_SCSI_DISK_BASE, 2934 .parent = TYPE_SCSI_DEVICE, 2935 .class_init = scsi_disk_base_class_initfn, 2936 .instance_size = sizeof(SCSIDiskState), 2937 .class_size = sizeof(SCSIDiskClass), 2938 .abstract = true, 2939 }; 2940 2941 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2942 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ 2943 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2944 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2945 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2946 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2947 DEFINE_PROP_STRING("product", SCSIDiskState, product) 2948 2949 static Property scsi_hd_properties[] = { 2950 DEFINE_SCSI_DISK_PROPERTIES(), 2951 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2952 SCSI_DISK_F_REMOVABLE, false), 2953 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2954 SCSI_DISK_F_DPOFUA, false), 2955 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2956 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2957 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2958 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2959 DEFAULT_MAX_UNMAP_SIZE), 2960 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2961 DEFAULT_MAX_IO_SIZE), 2962 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2963 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2964 5), 2965 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2966 DEFINE_PROP_END_OF_LIST(), 2967 }; 2968 2969 static const VMStateDescription vmstate_scsi_disk_state = { 2970 .name = "scsi-disk", 2971 .version_id = 1, 2972 .minimum_version_id = 1, 2973 .fields = (VMStateField[]) { 2974 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2975 VMSTATE_BOOL(media_changed, SCSIDiskState), 2976 VMSTATE_BOOL(media_event, SCSIDiskState), 2977 VMSTATE_BOOL(eject_request, SCSIDiskState), 2978 VMSTATE_BOOL(tray_open, SCSIDiskState), 2979 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2980 VMSTATE_END_OF_LIST() 2981 } 2982 }; 2983 2984 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 2985 { 2986 DeviceClass *dc = DEVICE_CLASS(klass); 2987 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2988 2989 sc->realize = scsi_hd_realize; 2990 sc->alloc_req = scsi_new_request; 2991 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 2992 dc->desc = "virtual SCSI disk"; 2993 dc->props = scsi_hd_properties; 2994 dc->vmsd = &vmstate_scsi_disk_state; 2995 } 2996 2997 static const TypeInfo scsi_hd_info = { 2998 .name = "scsi-hd", 2999 .parent = TYPE_SCSI_DISK_BASE, 3000 .class_init = scsi_hd_class_initfn, 3001 }; 3002 3003 static Property scsi_cd_properties[] = { 3004 DEFINE_SCSI_DISK_PROPERTIES(), 3005 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3006 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3007 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3008 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3009 DEFAULT_MAX_IO_SIZE), 3010 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3011 5), 3012 DEFINE_PROP_END_OF_LIST(), 3013 }; 3014 3015 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3016 { 3017 DeviceClass *dc = DEVICE_CLASS(klass); 3018 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3019 3020 sc->realize = scsi_cd_realize; 3021 sc->alloc_req = scsi_new_request; 3022 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3023 dc->desc = "virtual SCSI CD-ROM"; 3024 dc->props = scsi_cd_properties; 3025 dc->vmsd = &vmstate_scsi_disk_state; 3026 } 3027 3028 static const TypeInfo scsi_cd_info = { 3029 .name = "scsi-cd", 3030 .parent = TYPE_SCSI_DISK_BASE, 3031 .class_init = scsi_cd_class_initfn, 3032 }; 3033 3034 #ifdef __linux__ 3035 static Property scsi_block_properties[] = { 3036 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3037 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3038 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3039 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3040 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3041 DEFAULT_MAX_UNMAP_SIZE), 3042 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3043 DEFAULT_MAX_IO_SIZE), 3044 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3045 -1), 3046 DEFINE_PROP_END_OF_LIST(), 3047 }; 3048 3049 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3050 { 3051 DeviceClass *dc = DEVICE_CLASS(klass); 3052 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3053 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3054 3055 sc->realize = scsi_block_realize; 3056 sc->alloc_req = scsi_block_new_request; 3057 sc->parse_cdb = scsi_block_parse_cdb; 3058 sdc->dma_readv = scsi_block_dma_readv; 3059 sdc->dma_writev = scsi_block_dma_writev; 3060 sdc->need_fua_emulation = scsi_block_no_fua; 3061 dc->desc = "SCSI block device passthrough"; 3062 dc->props = scsi_block_properties; 3063 dc->vmsd = &vmstate_scsi_disk_state; 3064 } 3065 3066 static const TypeInfo scsi_block_info = { 3067 .name = "scsi-block", 3068 .parent = TYPE_SCSI_DISK_BASE, 3069 .class_init = scsi_block_class_initfn, 3070 }; 3071 #endif 3072 3073 static Property scsi_disk_properties[] = { 3074 DEFINE_SCSI_DISK_PROPERTIES(), 3075 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3076 SCSI_DISK_F_REMOVABLE, false), 3077 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3078 SCSI_DISK_F_DPOFUA, false), 3079 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3080 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3081 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3082 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3083 DEFAULT_MAX_UNMAP_SIZE), 3084 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3085 DEFAULT_MAX_IO_SIZE), 3086 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3087 5), 3088 DEFINE_PROP_END_OF_LIST(), 3089 }; 3090 3091 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3092 { 3093 DeviceClass *dc = DEVICE_CLASS(klass); 3094 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3095 3096 sc->realize = scsi_disk_realize; 3097 sc->alloc_req = scsi_new_request; 3098 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3099 dc->fw_name = "disk"; 3100 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3101 dc->reset = scsi_disk_reset; 3102 dc->props = scsi_disk_properties; 3103 dc->vmsd = &vmstate_scsi_disk_state; 3104 } 3105 3106 static const TypeInfo scsi_disk_info = { 3107 .name = "scsi-disk", 3108 .parent = TYPE_SCSI_DISK_BASE, 3109 .class_init = scsi_disk_class_initfn, 3110 }; 3111 3112 static void scsi_disk_register_types(void) 3113 { 3114 type_register_static(&scsi_disk_base_info); 3115 type_register_static(&scsi_hd_info); 3116 type_register_static(&scsi_cd_info); 3117 #ifdef __linux__ 3118 type_register_static(&scsi_block_info); 3119 #endif 3120 type_register_static(&scsi_disk_info); 3121 } 3122 3123 type_init(scsi_disk_register_types) 3124