1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 //#define DEBUG_SCSI 23 24 #ifdef DEBUG_SCSI 25 #define DPRINTF(fmt, ...) \ 26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0) 27 #else 28 #define DPRINTF(fmt, ...) do {} while(0) 29 #endif 30 31 #include "qemu/osdep.h" 32 #include "qemu/units.h" 33 #include "qapi/error.h" 34 #include "qemu/error-report.h" 35 #include "hw/scsi/scsi.h" 36 #include "scsi/constants.h" 37 #include "sysemu/sysemu.h" 38 #include "sysemu/block-backend.h" 39 #include "sysemu/blockdev.h" 40 #include "hw/block/block.h" 41 #include "sysemu/dma.h" 42 #include "qemu/cutils.h" 43 44 #ifdef __linux 45 #include <scsi/sg.h> 46 #endif 47 48 #define SCSI_WRITE_SAME_MAX (512 * KiB) 49 #define SCSI_DMA_BUF_SIZE (128 * KiB) 50 #define SCSI_MAX_INQUIRY_LEN 256 51 #define SCSI_MAX_MODE_LEN 256 52 53 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 54 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 55 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 56 57 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 58 59 #define SCSI_DISK_BASE(obj) \ 60 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 61 #define SCSI_DISK_BASE_CLASS(klass) \ 62 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 63 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 64 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 65 66 typedef struct SCSIDiskClass { 67 SCSIDeviceClass parent_class; 68 DMAIOFunc *dma_readv; 69 DMAIOFunc *dma_writev; 70 bool (*need_fua_emulation)(SCSICommand *cmd); 71 } SCSIDiskClass; 72 73 typedef struct SCSIDiskReq { 74 SCSIRequest req; 75 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 76 uint64_t sector; 77 uint32_t sector_count; 78 uint32_t buflen; 79 bool started; 80 bool need_fua_emulation; 81 struct iovec iov; 82 QEMUIOVector qiov; 83 BlockAcctCookie acct; 84 unsigned char *status; 85 } SCSIDiskReq; 86 87 #define SCSI_DISK_F_REMOVABLE 0 88 #define SCSI_DISK_F_DPOFUA 1 89 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 90 91 typedef struct SCSIDiskState 92 { 93 SCSIDevice qdev; 94 uint32_t features; 95 bool media_changed; 96 bool media_event; 97 bool eject_request; 98 uint16_t port_index; 99 uint64_t max_unmap_size; 100 uint64_t max_io_size; 101 QEMUBH *bh; 102 char *version; 103 char *serial; 104 char *vendor; 105 char *product; 106 bool tray_open; 107 bool tray_locked; 108 /* 109 * 0x0000 - rotation rate not reported 110 * 0x0001 - non-rotating medium (SSD) 111 * 0x0002-0x0400 - reserved 112 * 0x0401-0xffe - rotations per minute 113 * 0xffff - reserved 114 */ 115 uint16_t rotation_rate; 116 } SCSIDiskState; 117 118 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 119 120 static void scsi_free_request(SCSIRequest *req) 121 { 122 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 123 124 qemu_vfree(r->iov.iov_base); 125 } 126 127 /* Helper function for command completion with sense. */ 128 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 129 { 130 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n", 131 r->req.tag, sense.key, sense.asc, sense.ascq); 132 scsi_req_build_sense(&r->req, sense); 133 scsi_req_complete(&r->req, CHECK_CONDITION); 134 } 135 136 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 137 { 138 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 139 140 if (!r->iov.iov_base) { 141 r->buflen = size; 142 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 143 } 144 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 145 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 146 } 147 148 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 149 { 150 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 151 152 qemu_put_be64s(f, &r->sector); 153 qemu_put_be32s(f, &r->sector_count); 154 qemu_put_be32s(f, &r->buflen); 155 if (r->buflen) { 156 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 157 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 158 } else if (!req->retry) { 159 uint32_t len = r->iov.iov_len; 160 qemu_put_be32s(f, &len); 161 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 162 } 163 } 164 } 165 166 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 167 { 168 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 169 170 qemu_get_be64s(f, &r->sector); 171 qemu_get_be32s(f, &r->sector_count); 172 qemu_get_be32s(f, &r->buflen); 173 if (r->buflen) { 174 scsi_init_iovec(r, r->buflen); 175 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 176 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 177 } else if (!r->req.retry) { 178 uint32_t len; 179 qemu_get_be32s(f, &len); 180 r->iov.iov_len = len; 181 assert(r->iov.iov_len <= r->buflen); 182 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 183 } 184 } 185 186 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 187 } 188 189 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 190 { 191 if (r->req.io_canceled) { 192 scsi_req_cancel_complete(&r->req); 193 return true; 194 } 195 196 if (ret < 0 || (r->status && *r->status)) { 197 return scsi_handle_rw_error(r, -ret, acct_failed); 198 } 199 200 return false; 201 } 202 203 static void scsi_aio_complete(void *opaque, int ret) 204 { 205 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 206 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 207 208 assert(r->req.aiocb != NULL); 209 r->req.aiocb = NULL; 210 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 211 if (scsi_disk_req_check_error(r, ret, true)) { 212 goto done; 213 } 214 215 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 216 scsi_req_complete(&r->req, GOOD); 217 218 done: 219 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 220 scsi_req_unref(&r->req); 221 } 222 223 static bool scsi_is_cmd_fua(SCSICommand *cmd) 224 { 225 switch (cmd->buf[0]) { 226 case READ_10: 227 case READ_12: 228 case READ_16: 229 case WRITE_10: 230 case WRITE_12: 231 case WRITE_16: 232 return (cmd->buf[1] & 8) != 0; 233 234 case VERIFY_10: 235 case VERIFY_12: 236 case VERIFY_16: 237 case WRITE_VERIFY_10: 238 case WRITE_VERIFY_12: 239 case WRITE_VERIFY_16: 240 return true; 241 242 case READ_6: 243 case WRITE_6: 244 default: 245 return false; 246 } 247 } 248 249 static void scsi_write_do_fua(SCSIDiskReq *r) 250 { 251 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 252 253 assert(r->req.aiocb == NULL); 254 assert(!r->req.io_canceled); 255 256 if (r->need_fua_emulation) { 257 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 258 BLOCK_ACCT_FLUSH); 259 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 260 return; 261 } 262 263 scsi_req_complete(&r->req, GOOD); 264 scsi_req_unref(&r->req); 265 } 266 267 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 268 { 269 assert(r->req.aiocb == NULL); 270 if (scsi_disk_req_check_error(r, ret, false)) { 271 goto done; 272 } 273 274 r->sector += r->sector_count; 275 r->sector_count = 0; 276 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 277 scsi_write_do_fua(r); 278 return; 279 } else { 280 scsi_req_complete(&r->req, GOOD); 281 } 282 283 done: 284 scsi_req_unref(&r->req); 285 } 286 287 static void scsi_dma_complete(void *opaque, int ret) 288 { 289 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 290 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 291 292 assert(r->req.aiocb != NULL); 293 r->req.aiocb = NULL; 294 295 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 296 if (ret < 0) { 297 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 298 } else { 299 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 300 } 301 scsi_dma_complete_noio(r, ret); 302 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 303 } 304 305 static void scsi_read_complete(void * opaque, int ret) 306 { 307 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 308 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 309 int n; 310 311 assert(r->req.aiocb != NULL); 312 r->req.aiocb = NULL; 313 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 314 if (scsi_disk_req_check_error(r, ret, true)) { 315 goto done; 316 } 317 318 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 319 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size); 320 321 n = r->qiov.size / 512; 322 r->sector += n; 323 r->sector_count -= n; 324 scsi_req_data(&r->req, r->qiov.size); 325 326 done: 327 scsi_req_unref(&r->req); 328 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 329 } 330 331 /* Actually issue a read to the block device. */ 332 static void scsi_do_read(SCSIDiskReq *r, int ret) 333 { 334 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 335 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 336 337 assert (r->req.aiocb == NULL); 338 if (scsi_disk_req_check_error(r, ret, false)) { 339 goto done; 340 } 341 342 /* The request is used as the AIO opaque value, so add a ref. */ 343 scsi_req_ref(&r->req); 344 345 if (r->req.sg) { 346 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 347 r->req.resid -= r->req.sg->size; 348 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 349 r->req.sg, r->sector << BDRV_SECTOR_BITS, 350 BDRV_SECTOR_SIZE, 351 sdc->dma_readv, r, scsi_dma_complete, r, 352 DMA_DIRECTION_FROM_DEVICE); 353 } else { 354 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 355 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 356 r->qiov.size, BLOCK_ACCT_READ); 357 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 358 scsi_read_complete, r, r); 359 } 360 361 done: 362 scsi_req_unref(&r->req); 363 } 364 365 static void scsi_do_read_cb(void *opaque, int ret) 366 { 367 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 368 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 369 370 assert (r->req.aiocb != NULL); 371 r->req.aiocb = NULL; 372 373 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 374 if (ret < 0) { 375 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 376 } else { 377 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 378 } 379 scsi_do_read(opaque, ret); 380 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 381 } 382 383 /* Read more data from scsi device into buffer. */ 384 static void scsi_read_data(SCSIRequest *req) 385 { 386 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 387 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 388 bool first; 389 390 DPRINTF("Read sector_count=%d\n", r->sector_count); 391 if (r->sector_count == 0) { 392 /* This also clears the sense buffer for REQUEST SENSE. */ 393 scsi_req_complete(&r->req, GOOD); 394 return; 395 } 396 397 /* No data transfer may already be in progress */ 398 assert(r->req.aiocb == NULL); 399 400 /* The request is used as the AIO opaque value, so add a ref. */ 401 scsi_req_ref(&r->req); 402 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 403 DPRINTF("Data transfer direction invalid\n"); 404 scsi_read_complete(r, -EINVAL); 405 return; 406 } 407 408 if (!blk_is_available(req->dev->conf.blk)) { 409 scsi_read_complete(r, -ENOMEDIUM); 410 return; 411 } 412 413 first = !r->started; 414 r->started = true; 415 if (first && r->need_fua_emulation) { 416 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 417 BLOCK_ACCT_FLUSH); 418 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 419 } else { 420 scsi_do_read(r, 0); 421 } 422 } 423 424 /* 425 * scsi_handle_rw_error has two return values. False means that the error 426 * must be ignored, true means that the error has been processed and the 427 * caller should not do anything else for this request. Note that 428 * scsi_handle_rw_error always manages its reference counts, independent 429 * of the return value. 430 */ 431 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 432 { 433 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 434 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 435 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 436 is_read, error); 437 438 if (action == BLOCK_ERROR_ACTION_REPORT) { 439 if (acct_failed) { 440 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 441 } 442 switch (error) { 443 case 0: 444 /* A passthrough command has run and has produced sense data; check 445 * whether the error has to be handled by the guest or should rather 446 * pause the host. 447 */ 448 assert(r->status && *r->status); 449 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 450 if (error == ECANCELED || error == EAGAIN || error == ENOTCONN || 451 error == 0) { 452 /* These errors are handled by guest. */ 453 scsi_req_complete(&r->req, *r->status); 454 return true; 455 } 456 break; 457 case ENOMEDIUM: 458 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 459 break; 460 case ENOMEM: 461 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 462 break; 463 case EINVAL: 464 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 465 break; 466 case ENOSPC: 467 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 468 break; 469 default: 470 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 471 break; 472 } 473 } 474 475 blk_error_action(s->qdev.conf.blk, action, is_read, error); 476 if (action == BLOCK_ERROR_ACTION_IGNORE) { 477 scsi_req_complete(&r->req, 0); 478 return true; 479 } 480 481 if (action == BLOCK_ERROR_ACTION_STOP) { 482 scsi_req_retry(&r->req); 483 } 484 return false; 485 } 486 487 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 488 { 489 uint32_t n; 490 491 assert (r->req.aiocb == NULL); 492 if (scsi_disk_req_check_error(r, ret, false)) { 493 goto done; 494 } 495 496 n = r->qiov.size / 512; 497 r->sector += n; 498 r->sector_count -= n; 499 if (r->sector_count == 0) { 500 scsi_write_do_fua(r); 501 return; 502 } else { 503 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 504 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size); 505 scsi_req_data(&r->req, r->qiov.size); 506 } 507 508 done: 509 scsi_req_unref(&r->req); 510 } 511 512 static void scsi_write_complete(void * opaque, int ret) 513 { 514 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 515 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 516 517 assert (r->req.aiocb != NULL); 518 r->req.aiocb = NULL; 519 520 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 521 if (ret < 0) { 522 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 523 } else { 524 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 525 } 526 scsi_write_complete_noio(r, ret); 527 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 528 } 529 530 static void scsi_write_data(SCSIRequest *req) 531 { 532 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 533 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 534 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 535 536 /* No data transfer may already be in progress */ 537 assert(r->req.aiocb == NULL); 538 539 /* The request is used as the AIO opaque value, so add a ref. */ 540 scsi_req_ref(&r->req); 541 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 542 DPRINTF("Data transfer direction invalid\n"); 543 scsi_write_complete_noio(r, -EINVAL); 544 return; 545 } 546 547 if (!r->req.sg && !r->qiov.size) { 548 /* Called for the first time. Ask the driver to send us more data. */ 549 r->started = true; 550 scsi_write_complete_noio(r, 0); 551 return; 552 } 553 if (!blk_is_available(req->dev->conf.blk)) { 554 scsi_write_complete_noio(r, -ENOMEDIUM); 555 return; 556 } 557 558 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 559 r->req.cmd.buf[0] == VERIFY_16) { 560 if (r->req.sg) { 561 scsi_dma_complete_noio(r, 0); 562 } else { 563 scsi_write_complete_noio(r, 0); 564 } 565 return; 566 } 567 568 if (r->req.sg) { 569 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 570 r->req.resid -= r->req.sg->size; 571 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 572 r->req.sg, r->sector << BDRV_SECTOR_BITS, 573 BDRV_SECTOR_SIZE, 574 sdc->dma_writev, r, scsi_dma_complete, r, 575 DMA_DIRECTION_TO_DEVICE); 576 } else { 577 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 578 r->qiov.size, BLOCK_ACCT_WRITE); 579 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 580 scsi_write_complete, r, r); 581 } 582 } 583 584 /* Return a pointer to the data buffer. */ 585 static uint8_t *scsi_get_buf(SCSIRequest *req) 586 { 587 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 588 589 return (uint8_t *)r->iov.iov_base; 590 } 591 592 int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 593 { 594 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 595 uint8_t page_code = req->cmd.buf[2]; 596 int start, buflen = 0; 597 598 outbuf[buflen++] = s->qdev.type & 0x1f; 599 outbuf[buflen++] = page_code; 600 outbuf[buflen++] = 0x00; 601 outbuf[buflen++] = 0x00; 602 start = buflen; 603 604 switch (page_code) { 605 case 0x00: /* Supported page codes, mandatory */ 606 { 607 DPRINTF("Inquiry EVPD[Supported pages] " 608 "buffer size %zd\n", req->cmd.xfer); 609 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 610 if (s->serial) { 611 outbuf[buflen++] = 0x80; /* unit serial number */ 612 } 613 outbuf[buflen++] = 0x83; /* device identification */ 614 if (s->qdev.type == TYPE_DISK) { 615 outbuf[buflen++] = 0xb0; /* block limits */ 616 outbuf[buflen++] = 0xb1; /* block device characteristics */ 617 outbuf[buflen++] = 0xb2; /* thin provisioning */ 618 } 619 break; 620 } 621 case 0x80: /* Device serial number, optional */ 622 { 623 int l; 624 625 if (!s->serial) { 626 DPRINTF("Inquiry (EVPD[Serial number] not supported\n"); 627 return -1; 628 } 629 630 l = strlen(s->serial); 631 if (l > 36) { 632 l = 36; 633 } 634 635 DPRINTF("Inquiry EVPD[Serial number] " 636 "buffer size %zd\n", req->cmd.xfer); 637 memcpy(outbuf + buflen, s->serial, l); 638 buflen += l; 639 break; 640 } 641 642 case 0x83: /* Device identification page, mandatory */ 643 { 644 const char *str = s->serial ?: blk_name(s->qdev.conf.blk); 645 int max_len = s->serial ? 20 : 255 - 8; 646 int id_len = strlen(str); 647 648 if (id_len > max_len) { 649 id_len = max_len; 650 } 651 DPRINTF("Inquiry EVPD[Device identification] " 652 "buffer size %zd\n", req->cmd.xfer); 653 654 outbuf[buflen++] = 0x2; /* ASCII */ 655 outbuf[buflen++] = 0; /* not officially assigned */ 656 outbuf[buflen++] = 0; /* reserved */ 657 outbuf[buflen++] = id_len; /* length of data following */ 658 memcpy(outbuf + buflen, str, id_len); 659 buflen += id_len; 660 661 if (s->qdev.wwn) { 662 outbuf[buflen++] = 0x1; /* Binary */ 663 outbuf[buflen++] = 0x3; /* NAA */ 664 outbuf[buflen++] = 0; /* reserved */ 665 outbuf[buflen++] = 8; 666 stq_be_p(&outbuf[buflen], s->qdev.wwn); 667 buflen += 8; 668 } 669 670 if (s->qdev.port_wwn) { 671 outbuf[buflen++] = 0x61; /* SAS / Binary */ 672 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 673 outbuf[buflen++] = 0; /* reserved */ 674 outbuf[buflen++] = 8; 675 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 676 buflen += 8; 677 } 678 679 if (s->port_index) { 680 outbuf[buflen++] = 0x61; /* SAS / Binary */ 681 682 /* PIV/Target port/relative target port */ 683 outbuf[buflen++] = 0x94; 684 685 outbuf[buflen++] = 0; /* reserved */ 686 outbuf[buflen++] = 4; 687 stw_be_p(&outbuf[buflen + 2], s->port_index); 688 buflen += 4; 689 } 690 break; 691 } 692 case 0xb0: /* block limits */ 693 { 694 unsigned int unmap_sectors = 695 s->qdev.conf.discard_granularity / s->qdev.blocksize; 696 unsigned int min_io_size = 697 s->qdev.conf.min_io_size / s->qdev.blocksize; 698 unsigned int opt_io_size = 699 s->qdev.conf.opt_io_size / s->qdev.blocksize; 700 unsigned int max_unmap_sectors = 701 s->max_unmap_size / s->qdev.blocksize; 702 unsigned int max_io_sectors = 703 s->max_io_size / s->qdev.blocksize; 704 705 if (s->qdev.type == TYPE_ROM) { 706 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n", 707 page_code); 708 return -1; 709 } 710 if (s->qdev.type == TYPE_DISK) { 711 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 712 int max_io_sectors_blk = 713 max_transfer_blk / s->qdev.blocksize; 714 715 max_io_sectors = 716 MIN_NON_ZERO(max_io_sectors_blk, max_io_sectors); 717 718 /* min_io_size and opt_io_size can't be greater than 719 * max_io_sectors */ 720 if (min_io_size) { 721 min_io_size = MIN(min_io_size, max_io_sectors); 722 } 723 if (opt_io_size) { 724 opt_io_size = MIN(opt_io_size, max_io_sectors); 725 } 726 } 727 /* required VPD size with unmap support */ 728 buflen = 0x40; 729 memset(outbuf + 4, 0, buflen - 4); 730 731 outbuf[4] = 0x1; /* wsnz */ 732 733 /* optimal transfer length granularity */ 734 outbuf[6] = (min_io_size >> 8) & 0xff; 735 outbuf[7] = min_io_size & 0xff; 736 737 /* maximum transfer length */ 738 outbuf[8] = (max_io_sectors >> 24) & 0xff; 739 outbuf[9] = (max_io_sectors >> 16) & 0xff; 740 outbuf[10] = (max_io_sectors >> 8) & 0xff; 741 outbuf[11] = max_io_sectors & 0xff; 742 743 /* optimal transfer length */ 744 outbuf[12] = (opt_io_size >> 24) & 0xff; 745 outbuf[13] = (opt_io_size >> 16) & 0xff; 746 outbuf[14] = (opt_io_size >> 8) & 0xff; 747 outbuf[15] = opt_io_size & 0xff; 748 749 /* max unmap LBA count, default is 1GB */ 750 outbuf[20] = (max_unmap_sectors >> 24) & 0xff; 751 outbuf[21] = (max_unmap_sectors >> 16) & 0xff; 752 outbuf[22] = (max_unmap_sectors >> 8) & 0xff; 753 outbuf[23] = max_unmap_sectors & 0xff; 754 755 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header */ 756 outbuf[24] = 0; 757 outbuf[25] = 0; 758 outbuf[26] = 0; 759 outbuf[27] = 255; 760 761 /* optimal unmap granularity */ 762 outbuf[28] = (unmap_sectors >> 24) & 0xff; 763 outbuf[29] = (unmap_sectors >> 16) & 0xff; 764 outbuf[30] = (unmap_sectors >> 8) & 0xff; 765 outbuf[31] = unmap_sectors & 0xff; 766 767 /* max write same size */ 768 outbuf[36] = 0; 769 outbuf[37] = 0; 770 outbuf[38] = 0; 771 outbuf[39] = 0; 772 773 outbuf[40] = (max_io_sectors >> 24) & 0xff; 774 outbuf[41] = (max_io_sectors >> 16) & 0xff; 775 outbuf[42] = (max_io_sectors >> 8) & 0xff; 776 outbuf[43] = max_io_sectors & 0xff; 777 break; 778 } 779 case 0xb1: /* block device characteristics */ 780 { 781 buflen = 0x40; 782 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 783 outbuf[5] = s->rotation_rate & 0xff; 784 outbuf[6] = 0; /* PRODUCT TYPE */ 785 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 786 outbuf[8] = 0; /* VBULS */ 787 break; 788 } 789 case 0xb2: /* thin provisioning */ 790 { 791 buflen = 8; 792 outbuf[4] = 0; 793 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 794 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 795 outbuf[7] = 0; 796 break; 797 } 798 default: 799 return -1; 800 } 801 /* done with EVPD */ 802 assert(buflen - start <= 255); 803 outbuf[start - 1] = buflen - start; 804 return buflen; 805 } 806 807 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 808 { 809 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 810 int buflen = 0; 811 812 if (req->cmd.buf[1] & 0x1) { 813 /* Vital product data */ 814 return scsi_disk_emulate_vpd_page(req, outbuf); 815 } 816 817 /* Standard INQUIRY data */ 818 if (req->cmd.buf[2] != 0) { 819 return -1; 820 } 821 822 /* PAGE CODE == 0 */ 823 buflen = req->cmd.xfer; 824 if (buflen > SCSI_MAX_INQUIRY_LEN) { 825 buflen = SCSI_MAX_INQUIRY_LEN; 826 } 827 828 outbuf[0] = s->qdev.type & 0x1f; 829 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 830 831 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 832 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 833 834 memset(&outbuf[32], 0, 4); 835 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 836 /* 837 * We claim conformance to SPC-3, which is required for guests 838 * to ask for modern features like READ CAPACITY(16) or the 839 * block characteristics VPD page by default. Not all of SPC-3 840 * is actually implemented, but we're good enough. 841 */ 842 outbuf[2] = s->qdev.default_scsi_version; 843 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 844 845 if (buflen > 36) { 846 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 847 } else { 848 /* If the allocation length of CDB is too small, 849 the additional length is not adjusted */ 850 outbuf[4] = 36 - 5; 851 } 852 853 /* Sync data transfer and TCQ. */ 854 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 855 return buflen; 856 } 857 858 static inline bool media_is_dvd(SCSIDiskState *s) 859 { 860 uint64_t nb_sectors; 861 if (s->qdev.type != TYPE_ROM) { 862 return false; 863 } 864 if (!blk_is_available(s->qdev.conf.blk)) { 865 return false; 866 } 867 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 868 return nb_sectors > CD_MAX_SECTORS; 869 } 870 871 static inline bool media_is_cd(SCSIDiskState *s) 872 { 873 uint64_t nb_sectors; 874 if (s->qdev.type != TYPE_ROM) { 875 return false; 876 } 877 if (!blk_is_available(s->qdev.conf.blk)) { 878 return false; 879 } 880 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 881 return nb_sectors <= CD_MAX_SECTORS; 882 } 883 884 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 885 uint8_t *outbuf) 886 { 887 uint8_t type = r->req.cmd.buf[1] & 7; 888 889 if (s->qdev.type != TYPE_ROM) { 890 return -1; 891 } 892 893 /* Types 1/2 are only defined for Blu-Ray. */ 894 if (type != 0) { 895 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 896 return -1; 897 } 898 899 memset(outbuf, 0, 34); 900 outbuf[1] = 32; 901 outbuf[2] = 0xe; /* last session complete, disc finalized */ 902 outbuf[3] = 1; /* first track on disc */ 903 outbuf[4] = 1; /* # of sessions */ 904 outbuf[5] = 1; /* first track of last session */ 905 outbuf[6] = 1; /* last track of last session */ 906 outbuf[7] = 0x20; /* unrestricted use */ 907 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 908 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 909 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 910 /* 24-31: disc bar code */ 911 /* 32: disc application code */ 912 /* 33: number of OPC tables */ 913 914 return 34; 915 } 916 917 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 918 uint8_t *outbuf) 919 { 920 static const int rds_caps_size[5] = { 921 [0] = 2048 + 4, 922 [1] = 4 + 4, 923 [3] = 188 + 4, 924 [4] = 2048 + 4, 925 }; 926 927 uint8_t media = r->req.cmd.buf[1]; 928 uint8_t layer = r->req.cmd.buf[6]; 929 uint8_t format = r->req.cmd.buf[7]; 930 int size = -1; 931 932 if (s->qdev.type != TYPE_ROM) { 933 return -1; 934 } 935 if (media != 0) { 936 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 937 return -1; 938 } 939 940 if (format != 0xff) { 941 if (!blk_is_available(s->qdev.conf.blk)) { 942 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 943 return -1; 944 } 945 if (media_is_cd(s)) { 946 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 947 return -1; 948 } 949 if (format >= ARRAY_SIZE(rds_caps_size)) { 950 return -1; 951 } 952 size = rds_caps_size[format]; 953 memset(outbuf, 0, size); 954 } 955 956 switch (format) { 957 case 0x00: { 958 /* Physical format information */ 959 uint64_t nb_sectors; 960 if (layer != 0) { 961 goto fail; 962 } 963 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 964 965 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 966 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 967 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 968 outbuf[7] = 0; /* default densities */ 969 970 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 971 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 972 break; 973 } 974 975 case 0x01: /* DVD copyright information, all zeros */ 976 break; 977 978 case 0x03: /* BCA information - invalid field for no BCA info */ 979 return -1; 980 981 case 0x04: /* DVD disc manufacturing information, all zeros */ 982 break; 983 984 case 0xff: { /* List capabilities */ 985 int i; 986 size = 4; 987 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 988 if (!rds_caps_size[i]) { 989 continue; 990 } 991 outbuf[size] = i; 992 outbuf[size + 1] = 0x40; /* Not writable, readable */ 993 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 994 size += 4; 995 } 996 break; 997 } 998 999 default: 1000 return -1; 1001 } 1002 1003 /* Size of buffer, not including 2 byte size field */ 1004 stw_be_p(outbuf, size - 2); 1005 return size; 1006 1007 fail: 1008 return -1; 1009 } 1010 1011 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 1012 { 1013 uint8_t event_code, media_status; 1014 1015 media_status = 0; 1016 if (s->tray_open) { 1017 media_status = MS_TRAY_OPEN; 1018 } else if (blk_is_inserted(s->qdev.conf.blk)) { 1019 media_status = MS_MEDIA_PRESENT; 1020 } 1021 1022 /* Event notification descriptor */ 1023 event_code = MEC_NO_CHANGE; 1024 if (media_status != MS_TRAY_OPEN) { 1025 if (s->media_event) { 1026 event_code = MEC_NEW_MEDIA; 1027 s->media_event = false; 1028 } else if (s->eject_request) { 1029 event_code = MEC_EJECT_REQUESTED; 1030 s->eject_request = false; 1031 } 1032 } 1033 1034 outbuf[0] = event_code; 1035 outbuf[1] = media_status; 1036 1037 /* These fields are reserved, just clear them. */ 1038 outbuf[2] = 0; 1039 outbuf[3] = 0; 1040 return 4; 1041 } 1042 1043 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 1044 uint8_t *outbuf) 1045 { 1046 int size; 1047 uint8_t *buf = r->req.cmd.buf; 1048 uint8_t notification_class_request = buf[4]; 1049 if (s->qdev.type != TYPE_ROM) { 1050 return -1; 1051 } 1052 if ((buf[1] & 1) == 0) { 1053 /* asynchronous */ 1054 return -1; 1055 } 1056 1057 size = 4; 1058 outbuf[0] = outbuf[1] = 0; 1059 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1060 if (notification_class_request & (1 << GESN_MEDIA)) { 1061 outbuf[2] = GESN_MEDIA; 1062 size += scsi_event_status_media(s, &outbuf[size]); 1063 } else { 1064 outbuf[2] = 0x80; 1065 } 1066 stw_be_p(outbuf, size - 4); 1067 return size; 1068 } 1069 1070 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1071 { 1072 int current; 1073 1074 if (s->qdev.type != TYPE_ROM) { 1075 return -1; 1076 } 1077 1078 if (media_is_dvd(s)) { 1079 current = MMC_PROFILE_DVD_ROM; 1080 } else if (media_is_cd(s)) { 1081 current = MMC_PROFILE_CD_ROM; 1082 } else { 1083 current = MMC_PROFILE_NONE; 1084 } 1085 1086 memset(outbuf, 0, 40); 1087 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1088 stw_be_p(&outbuf[6], current); 1089 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1090 outbuf[10] = 0x03; /* persistent, current */ 1091 outbuf[11] = 8; /* two profiles */ 1092 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1093 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1094 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1095 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1096 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1097 stw_be_p(&outbuf[20], 1); 1098 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1099 outbuf[23] = 8; 1100 stl_be_p(&outbuf[24], 1); /* SCSI */ 1101 outbuf[28] = 1; /* DBE = 1, mandatory */ 1102 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1103 stw_be_p(&outbuf[32], 3); 1104 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1105 outbuf[35] = 4; 1106 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1107 /* TODO: Random readable, CD read, DVD read, drive serial number, 1108 power management */ 1109 return 40; 1110 } 1111 1112 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1113 { 1114 if (s->qdev.type != TYPE_ROM) { 1115 return -1; 1116 } 1117 memset(outbuf, 0, 8); 1118 outbuf[5] = 1; /* CD-ROM */ 1119 return 8; 1120 } 1121 1122 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1123 int page_control) 1124 { 1125 static const int mode_sense_valid[0x3f] = { 1126 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1127 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1128 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1129 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1130 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1131 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1132 }; 1133 1134 uint8_t *p = *p_outbuf + 2; 1135 int length; 1136 1137 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1138 return -1; 1139 } 1140 1141 /* 1142 * If Changeable Values are requested, a mask denoting those mode parameters 1143 * that are changeable shall be returned. As we currently don't support 1144 * parameter changes via MODE_SELECT all bits are returned set to zero. 1145 * The buffer was already menset to zero by the caller of this function. 1146 * 1147 * The offsets here are off by two compared to the descriptions in the 1148 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1149 * but it is done so that offsets are consistent within our implementation 1150 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1151 * 2-byte and 4-byte headers. 1152 */ 1153 switch (page) { 1154 case MODE_PAGE_HD_GEOMETRY: 1155 length = 0x16; 1156 if (page_control == 1) { /* Changeable Values */ 1157 break; 1158 } 1159 /* if a geometry hint is available, use it */ 1160 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1161 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1162 p[2] = s->qdev.conf.cyls & 0xff; 1163 p[3] = s->qdev.conf.heads & 0xff; 1164 /* Write precomp start cylinder, disabled */ 1165 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1166 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1167 p[6] = s->qdev.conf.cyls & 0xff; 1168 /* Reduced current start cylinder, disabled */ 1169 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1170 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1171 p[9] = s->qdev.conf.cyls & 0xff; 1172 /* Device step rate [ns], 200ns */ 1173 p[10] = 0; 1174 p[11] = 200; 1175 /* Landing zone cylinder */ 1176 p[12] = 0xff; 1177 p[13] = 0xff; 1178 p[14] = 0xff; 1179 /* Medium rotation rate [rpm], 5400 rpm */ 1180 p[18] = (5400 >> 8) & 0xff; 1181 p[19] = 5400 & 0xff; 1182 break; 1183 1184 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1185 length = 0x1e; 1186 if (page_control == 1) { /* Changeable Values */ 1187 break; 1188 } 1189 /* Transfer rate [kbit/s], 5Mbit/s */ 1190 p[0] = 5000 >> 8; 1191 p[1] = 5000 & 0xff; 1192 /* if a geometry hint is available, use it */ 1193 p[2] = s->qdev.conf.heads & 0xff; 1194 p[3] = s->qdev.conf.secs & 0xff; 1195 p[4] = s->qdev.blocksize >> 8; 1196 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1197 p[7] = s->qdev.conf.cyls & 0xff; 1198 /* Write precomp start cylinder, disabled */ 1199 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1200 p[9] = s->qdev.conf.cyls & 0xff; 1201 /* Reduced current start cylinder, disabled */ 1202 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1203 p[11] = s->qdev.conf.cyls & 0xff; 1204 /* Device step rate [100us], 100us */ 1205 p[12] = 0; 1206 p[13] = 1; 1207 /* Device step pulse width [us], 1us */ 1208 p[14] = 1; 1209 /* Device head settle delay [100us], 100us */ 1210 p[15] = 0; 1211 p[16] = 1; 1212 /* Motor on delay [0.1s], 0.1s */ 1213 p[17] = 1; 1214 /* Motor off delay [0.1s], 0.1s */ 1215 p[18] = 1; 1216 /* Medium rotation rate [rpm], 5400 rpm */ 1217 p[26] = (5400 >> 8) & 0xff; 1218 p[27] = 5400 & 0xff; 1219 break; 1220 1221 case MODE_PAGE_CACHING: 1222 length = 0x12; 1223 if (page_control == 1 || /* Changeable Values */ 1224 blk_enable_write_cache(s->qdev.conf.blk)) { 1225 p[0] = 4; /* WCE */ 1226 } 1227 break; 1228 1229 case MODE_PAGE_R_W_ERROR: 1230 length = 10; 1231 if (page_control == 1) { /* Changeable Values */ 1232 break; 1233 } 1234 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1235 if (s->qdev.type == TYPE_ROM) { 1236 p[1] = 0x20; /* Read Retry Count */ 1237 } 1238 break; 1239 1240 case MODE_PAGE_AUDIO_CTL: 1241 length = 14; 1242 break; 1243 1244 case MODE_PAGE_CAPABILITIES: 1245 length = 0x14; 1246 if (page_control == 1) { /* Changeable Values */ 1247 break; 1248 } 1249 1250 p[0] = 0x3b; /* CD-R & CD-RW read */ 1251 p[1] = 0; /* Writing not supported */ 1252 p[2] = 0x7f; /* Audio, composite, digital out, 1253 mode 2 form 1&2, multi session */ 1254 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1255 RW corrected, C2 errors, ISRC, 1256 UPC, Bar code */ 1257 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1258 /* Locking supported, jumper present, eject, tray */ 1259 p[5] = 0; /* no volume & mute control, no 1260 changer */ 1261 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1262 p[7] = (50 * 176) & 0xff; 1263 p[8] = 2 >> 8; /* Two volume levels */ 1264 p[9] = 2 & 0xff; 1265 p[10] = 2048 >> 8; /* 2M buffer */ 1266 p[11] = 2048 & 0xff; 1267 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1268 p[13] = (16 * 176) & 0xff; 1269 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1270 p[17] = (16 * 176) & 0xff; 1271 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1272 p[19] = (16 * 176) & 0xff; 1273 break; 1274 1275 default: 1276 return -1; 1277 } 1278 1279 assert(length < 256); 1280 (*p_outbuf)[0] = page; 1281 (*p_outbuf)[1] = length; 1282 *p_outbuf += length + 2; 1283 return length + 2; 1284 } 1285 1286 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1287 { 1288 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1289 uint64_t nb_sectors; 1290 bool dbd; 1291 int page, buflen, ret, page_control; 1292 uint8_t *p; 1293 uint8_t dev_specific_param; 1294 1295 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1296 page = r->req.cmd.buf[2] & 0x3f; 1297 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1298 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n", 1299 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control); 1300 memset(outbuf, 0, r->req.cmd.xfer); 1301 p = outbuf; 1302 1303 if (s->qdev.type == TYPE_DISK) { 1304 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1305 if (blk_is_read_only(s->qdev.conf.blk)) { 1306 dev_specific_param |= 0x80; /* Readonly. */ 1307 } 1308 } else { 1309 /* MMC prescribes that CD/DVD drives have no block descriptors, 1310 * and defines no device-specific parameter. */ 1311 dev_specific_param = 0x00; 1312 dbd = true; 1313 } 1314 1315 if (r->req.cmd.buf[0] == MODE_SENSE) { 1316 p[1] = 0; /* Default media type. */ 1317 p[2] = dev_specific_param; 1318 p[3] = 0; /* Block descriptor length. */ 1319 p += 4; 1320 } else { /* MODE_SENSE_10 */ 1321 p[2] = 0; /* Default media type. */ 1322 p[3] = dev_specific_param; 1323 p[6] = p[7] = 0; /* Block descriptor length. */ 1324 p += 8; 1325 } 1326 1327 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1328 if (!dbd && nb_sectors) { 1329 if (r->req.cmd.buf[0] == MODE_SENSE) { 1330 outbuf[3] = 8; /* Block descriptor length */ 1331 } else { /* MODE_SENSE_10 */ 1332 outbuf[7] = 8; /* Block descriptor length */ 1333 } 1334 nb_sectors /= (s->qdev.blocksize / 512); 1335 if (nb_sectors > 0xffffff) { 1336 nb_sectors = 0; 1337 } 1338 p[0] = 0; /* media density code */ 1339 p[1] = (nb_sectors >> 16) & 0xff; 1340 p[2] = (nb_sectors >> 8) & 0xff; 1341 p[3] = nb_sectors & 0xff; 1342 p[4] = 0; /* reserved */ 1343 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1344 p[6] = s->qdev.blocksize >> 8; 1345 p[7] = 0; 1346 p += 8; 1347 } 1348 1349 if (page_control == 3) { 1350 /* Saved Values */ 1351 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1352 return -1; 1353 } 1354 1355 if (page == 0x3f) { 1356 for (page = 0; page <= 0x3e; page++) { 1357 mode_sense_page(s, page, &p, page_control); 1358 } 1359 } else { 1360 ret = mode_sense_page(s, page, &p, page_control); 1361 if (ret == -1) { 1362 return -1; 1363 } 1364 } 1365 1366 buflen = p - outbuf; 1367 /* 1368 * The mode data length field specifies the length in bytes of the 1369 * following data that is available to be transferred. The mode data 1370 * length does not include itself. 1371 */ 1372 if (r->req.cmd.buf[0] == MODE_SENSE) { 1373 outbuf[0] = buflen - 1; 1374 } else { /* MODE_SENSE_10 */ 1375 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1376 outbuf[1] = (buflen - 2) & 0xff; 1377 } 1378 return buflen; 1379 } 1380 1381 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1382 { 1383 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1384 int start_track, format, msf, toclen; 1385 uint64_t nb_sectors; 1386 1387 msf = req->cmd.buf[1] & 2; 1388 format = req->cmd.buf[2] & 0xf; 1389 start_track = req->cmd.buf[6]; 1390 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1391 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1); 1392 nb_sectors /= s->qdev.blocksize / 512; 1393 switch (format) { 1394 case 0: 1395 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1396 break; 1397 case 1: 1398 /* multi session : only a single session defined */ 1399 toclen = 12; 1400 memset(outbuf, 0, 12); 1401 outbuf[1] = 0x0a; 1402 outbuf[2] = 0x01; 1403 outbuf[3] = 0x01; 1404 break; 1405 case 2: 1406 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1407 break; 1408 default: 1409 return -1; 1410 } 1411 return toclen; 1412 } 1413 1414 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1415 { 1416 SCSIRequest *req = &r->req; 1417 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1418 bool start = req->cmd.buf[4] & 1; 1419 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1420 int pwrcnd = req->cmd.buf[4] & 0xf0; 1421 1422 if (pwrcnd) { 1423 /* eject/load only happens for power condition == 0 */ 1424 return 0; 1425 } 1426 1427 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1428 if (!start && !s->tray_open && s->tray_locked) { 1429 scsi_check_condition(r, 1430 blk_is_inserted(s->qdev.conf.blk) 1431 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1432 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1433 return -1; 1434 } 1435 1436 if (s->tray_open != !start) { 1437 blk_eject(s->qdev.conf.blk, !start); 1438 s->tray_open = !start; 1439 } 1440 } 1441 return 0; 1442 } 1443 1444 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1445 { 1446 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1447 int buflen = r->iov.iov_len; 1448 1449 if (buflen) { 1450 DPRINTF("Read buf_len=%d\n", buflen); 1451 r->iov.iov_len = 0; 1452 r->started = true; 1453 scsi_req_data(&r->req, buflen); 1454 return; 1455 } 1456 1457 /* This also clears the sense buffer for REQUEST SENSE. */ 1458 scsi_req_complete(&r->req, GOOD); 1459 } 1460 1461 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1462 uint8_t *inbuf, int inlen) 1463 { 1464 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1465 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1466 uint8_t *p; 1467 int len, expected_len, changeable_len, i; 1468 1469 /* The input buffer does not include the page header, so it is 1470 * off by 2 bytes. 1471 */ 1472 expected_len = inlen + 2; 1473 if (expected_len > SCSI_MAX_MODE_LEN) { 1474 return -1; 1475 } 1476 1477 p = mode_current; 1478 memset(mode_current, 0, inlen + 2); 1479 len = mode_sense_page(s, page, &p, 0); 1480 if (len < 0 || len != expected_len) { 1481 return -1; 1482 } 1483 1484 p = mode_changeable; 1485 memset(mode_changeable, 0, inlen + 2); 1486 changeable_len = mode_sense_page(s, page, &p, 1); 1487 assert(changeable_len == len); 1488 1489 /* Check that unchangeable bits are the same as what MODE SENSE 1490 * would return. 1491 */ 1492 for (i = 2; i < len; i++) { 1493 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1494 return -1; 1495 } 1496 } 1497 return 0; 1498 } 1499 1500 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1501 { 1502 switch (page) { 1503 case MODE_PAGE_CACHING: 1504 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1505 break; 1506 1507 default: 1508 break; 1509 } 1510 } 1511 1512 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1513 { 1514 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1515 1516 while (len > 0) { 1517 int page, subpage, page_len; 1518 1519 /* Parse both possible formats for the mode page headers. */ 1520 page = p[0] & 0x3f; 1521 if (p[0] & 0x40) { 1522 if (len < 4) { 1523 goto invalid_param_len; 1524 } 1525 subpage = p[1]; 1526 page_len = lduw_be_p(&p[2]); 1527 p += 4; 1528 len -= 4; 1529 } else { 1530 if (len < 2) { 1531 goto invalid_param_len; 1532 } 1533 subpage = 0; 1534 page_len = p[1]; 1535 p += 2; 1536 len -= 2; 1537 } 1538 1539 if (subpage) { 1540 goto invalid_param; 1541 } 1542 if (page_len > len) { 1543 goto invalid_param_len; 1544 } 1545 1546 if (!change) { 1547 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1548 goto invalid_param; 1549 } 1550 } else { 1551 scsi_disk_apply_mode_select(s, page, p); 1552 } 1553 1554 p += page_len; 1555 len -= page_len; 1556 } 1557 return 0; 1558 1559 invalid_param: 1560 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1561 return -1; 1562 1563 invalid_param_len: 1564 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1565 return -1; 1566 } 1567 1568 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1569 { 1570 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1571 uint8_t *p = inbuf; 1572 int cmd = r->req.cmd.buf[0]; 1573 int len = r->req.cmd.xfer; 1574 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1575 int bd_len; 1576 int pass; 1577 1578 /* We only support PF=1, SP=0. */ 1579 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1580 goto invalid_field; 1581 } 1582 1583 if (len < hdr_len) { 1584 goto invalid_param_len; 1585 } 1586 1587 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1588 len -= hdr_len; 1589 p += hdr_len; 1590 if (len < bd_len) { 1591 goto invalid_param_len; 1592 } 1593 if (bd_len != 0 && bd_len != 8) { 1594 goto invalid_param; 1595 } 1596 1597 len -= bd_len; 1598 p += bd_len; 1599 1600 /* Ensure no change is made if there is an error! */ 1601 for (pass = 0; pass < 2; pass++) { 1602 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1603 assert(pass == 0); 1604 return; 1605 } 1606 } 1607 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1608 /* The request is used as the AIO opaque value, so add a ref. */ 1609 scsi_req_ref(&r->req); 1610 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1611 BLOCK_ACCT_FLUSH); 1612 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1613 return; 1614 } 1615 1616 scsi_req_complete(&r->req, GOOD); 1617 return; 1618 1619 invalid_param: 1620 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1621 return; 1622 1623 invalid_param_len: 1624 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1625 return; 1626 1627 invalid_field: 1628 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1629 } 1630 1631 static inline bool check_lba_range(SCSIDiskState *s, 1632 uint64_t sector_num, uint32_t nb_sectors) 1633 { 1634 /* 1635 * The first line tests that no overflow happens when computing the last 1636 * sector. The second line tests that the last accessed sector is in 1637 * range. 1638 * 1639 * Careful, the computations should not underflow for nb_sectors == 0, 1640 * and a 0-block read to the first LBA beyond the end of device is 1641 * valid. 1642 */ 1643 return (sector_num <= sector_num + nb_sectors && 1644 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1645 } 1646 1647 typedef struct UnmapCBData { 1648 SCSIDiskReq *r; 1649 uint8_t *inbuf; 1650 int count; 1651 } UnmapCBData; 1652 1653 static void scsi_unmap_complete(void *opaque, int ret); 1654 1655 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1656 { 1657 SCSIDiskReq *r = data->r; 1658 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1659 uint64_t sector_num; 1660 uint32_t nb_sectors; 1661 1662 assert(r->req.aiocb == NULL); 1663 if (scsi_disk_req_check_error(r, ret, false)) { 1664 goto done; 1665 } 1666 1667 if (data->count > 0) { 1668 sector_num = ldq_be_p(&data->inbuf[0]); 1669 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1670 if (!check_lba_range(s, sector_num, nb_sectors)) { 1671 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1672 goto done; 1673 } 1674 1675 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1676 sector_num * s->qdev.blocksize, 1677 nb_sectors * s->qdev.blocksize, 1678 scsi_unmap_complete, data); 1679 data->count--; 1680 data->inbuf += 16; 1681 return; 1682 } 1683 1684 scsi_req_complete(&r->req, GOOD); 1685 1686 done: 1687 scsi_req_unref(&r->req); 1688 g_free(data); 1689 } 1690 1691 static void scsi_unmap_complete(void *opaque, int ret) 1692 { 1693 UnmapCBData *data = opaque; 1694 SCSIDiskReq *r = data->r; 1695 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1696 1697 assert(r->req.aiocb != NULL); 1698 r->req.aiocb = NULL; 1699 1700 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1701 scsi_unmap_complete_noio(data, ret); 1702 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1703 } 1704 1705 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1706 { 1707 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1708 uint8_t *p = inbuf; 1709 int len = r->req.cmd.xfer; 1710 UnmapCBData *data; 1711 1712 /* Reject ANCHOR=1. */ 1713 if (r->req.cmd.buf[1] & 0x1) { 1714 goto invalid_field; 1715 } 1716 1717 if (len < 8) { 1718 goto invalid_param_len; 1719 } 1720 if (len < lduw_be_p(&p[0]) + 2) { 1721 goto invalid_param_len; 1722 } 1723 if (len < lduw_be_p(&p[2]) + 8) { 1724 goto invalid_param_len; 1725 } 1726 if (lduw_be_p(&p[2]) & 15) { 1727 goto invalid_param_len; 1728 } 1729 1730 if (blk_is_read_only(s->qdev.conf.blk)) { 1731 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1732 return; 1733 } 1734 1735 data = g_new0(UnmapCBData, 1); 1736 data->r = r; 1737 data->inbuf = &p[8]; 1738 data->count = lduw_be_p(&p[2]) >> 4; 1739 1740 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1741 scsi_req_ref(&r->req); 1742 scsi_unmap_complete_noio(data, 0); 1743 return; 1744 1745 invalid_param_len: 1746 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1747 return; 1748 1749 invalid_field: 1750 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1751 } 1752 1753 typedef struct WriteSameCBData { 1754 SCSIDiskReq *r; 1755 int64_t sector; 1756 int nb_sectors; 1757 QEMUIOVector qiov; 1758 struct iovec iov; 1759 } WriteSameCBData; 1760 1761 static void scsi_write_same_complete(void *opaque, int ret) 1762 { 1763 WriteSameCBData *data = opaque; 1764 SCSIDiskReq *r = data->r; 1765 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1766 1767 assert(r->req.aiocb != NULL); 1768 r->req.aiocb = NULL; 1769 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1770 if (scsi_disk_req_check_error(r, ret, true)) { 1771 goto done; 1772 } 1773 1774 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1775 1776 data->nb_sectors -= data->iov.iov_len / 512; 1777 data->sector += data->iov.iov_len / 512; 1778 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1779 if (data->iov.iov_len) { 1780 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1781 data->iov.iov_len, BLOCK_ACCT_WRITE); 1782 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1783 * where final qiov may need smaller size */ 1784 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1785 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1786 data->sector << BDRV_SECTOR_BITS, 1787 &data->qiov, 0, 1788 scsi_write_same_complete, data); 1789 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1790 return; 1791 } 1792 1793 scsi_req_complete(&r->req, GOOD); 1794 1795 done: 1796 scsi_req_unref(&r->req); 1797 qemu_vfree(data->iov.iov_base); 1798 g_free(data); 1799 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1800 } 1801 1802 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1803 { 1804 SCSIRequest *req = &r->req; 1805 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1806 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1807 WriteSameCBData *data; 1808 uint8_t *buf; 1809 int i; 1810 1811 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1812 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1813 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1814 return; 1815 } 1816 1817 if (blk_is_read_only(s->qdev.conf.blk)) { 1818 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1819 return; 1820 } 1821 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1822 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1823 return; 1824 } 1825 1826 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1827 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1828 1829 /* The request is used as the AIO opaque value, so add a ref. */ 1830 scsi_req_ref(&r->req); 1831 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1832 nb_sectors * s->qdev.blocksize, 1833 BLOCK_ACCT_WRITE); 1834 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1835 r->req.cmd.lba * s->qdev.blocksize, 1836 nb_sectors * s->qdev.blocksize, 1837 flags, scsi_aio_complete, r); 1838 return; 1839 } 1840 1841 data = g_new0(WriteSameCBData, 1); 1842 data->r = r; 1843 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1844 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1845 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1846 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1847 data->iov.iov_len); 1848 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1849 1850 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1851 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1852 } 1853 1854 scsi_req_ref(&r->req); 1855 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1856 data->iov.iov_len, BLOCK_ACCT_WRITE); 1857 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1858 data->sector << BDRV_SECTOR_BITS, 1859 &data->qiov, 0, 1860 scsi_write_same_complete, data); 1861 } 1862 1863 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1864 { 1865 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1866 1867 if (r->iov.iov_len) { 1868 int buflen = r->iov.iov_len; 1869 DPRINTF("Write buf_len=%d\n", buflen); 1870 r->iov.iov_len = 0; 1871 scsi_req_data(&r->req, buflen); 1872 return; 1873 } 1874 1875 switch (req->cmd.buf[0]) { 1876 case MODE_SELECT: 1877 case MODE_SELECT_10: 1878 /* This also clears the sense buffer for REQUEST SENSE. */ 1879 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1880 break; 1881 1882 case UNMAP: 1883 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1884 break; 1885 1886 case VERIFY_10: 1887 case VERIFY_12: 1888 case VERIFY_16: 1889 if (r->req.status == -1) { 1890 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1891 } 1892 break; 1893 1894 case WRITE_SAME_10: 1895 case WRITE_SAME_16: 1896 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1897 break; 1898 1899 default: 1900 abort(); 1901 } 1902 } 1903 1904 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1905 { 1906 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1907 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1908 uint64_t nb_sectors; 1909 uint8_t *outbuf; 1910 int buflen; 1911 1912 switch (req->cmd.buf[0]) { 1913 case INQUIRY: 1914 case MODE_SENSE: 1915 case MODE_SENSE_10: 1916 case RESERVE: 1917 case RESERVE_10: 1918 case RELEASE: 1919 case RELEASE_10: 1920 case START_STOP: 1921 case ALLOW_MEDIUM_REMOVAL: 1922 case GET_CONFIGURATION: 1923 case GET_EVENT_STATUS_NOTIFICATION: 1924 case MECHANISM_STATUS: 1925 case REQUEST_SENSE: 1926 break; 1927 1928 default: 1929 if (!blk_is_available(s->qdev.conf.blk)) { 1930 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1931 return 0; 1932 } 1933 break; 1934 } 1935 1936 /* 1937 * FIXME: we shouldn't return anything bigger than 4k, but the code 1938 * requires the buffer to be as big as req->cmd.xfer in several 1939 * places. So, do not allow CDBs with a very large ALLOCATION 1940 * LENGTH. The real fix would be to modify scsi_read_data and 1941 * dma_buf_read, so that they return data beyond the buflen 1942 * as all zeros. 1943 */ 1944 if (req->cmd.xfer > 65536) { 1945 goto illegal_request; 1946 } 1947 r->buflen = MAX(4096, req->cmd.xfer); 1948 1949 if (!r->iov.iov_base) { 1950 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1951 } 1952 1953 buflen = req->cmd.xfer; 1954 outbuf = r->iov.iov_base; 1955 memset(outbuf, 0, r->buflen); 1956 switch (req->cmd.buf[0]) { 1957 case TEST_UNIT_READY: 1958 assert(blk_is_available(s->qdev.conf.blk)); 1959 break; 1960 case INQUIRY: 1961 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1962 if (buflen < 0) { 1963 goto illegal_request; 1964 } 1965 break; 1966 case MODE_SENSE: 1967 case MODE_SENSE_10: 1968 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1969 if (buflen < 0) { 1970 goto illegal_request; 1971 } 1972 break; 1973 case READ_TOC: 1974 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1975 if (buflen < 0) { 1976 goto illegal_request; 1977 } 1978 break; 1979 case RESERVE: 1980 if (req->cmd.buf[1] & 1) { 1981 goto illegal_request; 1982 } 1983 break; 1984 case RESERVE_10: 1985 if (req->cmd.buf[1] & 3) { 1986 goto illegal_request; 1987 } 1988 break; 1989 case RELEASE: 1990 if (req->cmd.buf[1] & 1) { 1991 goto illegal_request; 1992 } 1993 break; 1994 case RELEASE_10: 1995 if (req->cmd.buf[1] & 3) { 1996 goto illegal_request; 1997 } 1998 break; 1999 case START_STOP: 2000 if (scsi_disk_emulate_start_stop(r) < 0) { 2001 return 0; 2002 } 2003 break; 2004 case ALLOW_MEDIUM_REMOVAL: 2005 s->tray_locked = req->cmd.buf[4] & 1; 2006 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 2007 break; 2008 case READ_CAPACITY_10: 2009 /* The normal LEN field for this command is zero. */ 2010 memset(outbuf, 0, 8); 2011 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2012 if (!nb_sectors) { 2013 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2014 return 0; 2015 } 2016 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 2017 goto illegal_request; 2018 } 2019 nb_sectors /= s->qdev.blocksize / 512; 2020 /* Returned value is the address of the last sector. */ 2021 nb_sectors--; 2022 /* Remember the new size for read/write sanity checking. */ 2023 s->qdev.max_lba = nb_sectors; 2024 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 2025 if (nb_sectors > UINT32_MAX) { 2026 nb_sectors = UINT32_MAX; 2027 } 2028 outbuf[0] = (nb_sectors >> 24) & 0xff; 2029 outbuf[1] = (nb_sectors >> 16) & 0xff; 2030 outbuf[2] = (nb_sectors >> 8) & 0xff; 2031 outbuf[3] = nb_sectors & 0xff; 2032 outbuf[4] = 0; 2033 outbuf[5] = 0; 2034 outbuf[6] = s->qdev.blocksize >> 8; 2035 outbuf[7] = 0; 2036 break; 2037 case REQUEST_SENSE: 2038 /* Just return "NO SENSE". */ 2039 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2040 (req->cmd.buf[1] & 1) == 0); 2041 if (buflen < 0) { 2042 goto illegal_request; 2043 } 2044 break; 2045 case MECHANISM_STATUS: 2046 buflen = scsi_emulate_mechanism_status(s, outbuf); 2047 if (buflen < 0) { 2048 goto illegal_request; 2049 } 2050 break; 2051 case GET_CONFIGURATION: 2052 buflen = scsi_get_configuration(s, outbuf); 2053 if (buflen < 0) { 2054 goto illegal_request; 2055 } 2056 break; 2057 case GET_EVENT_STATUS_NOTIFICATION: 2058 buflen = scsi_get_event_status_notification(s, r, outbuf); 2059 if (buflen < 0) { 2060 goto illegal_request; 2061 } 2062 break; 2063 case READ_DISC_INFORMATION: 2064 buflen = scsi_read_disc_information(s, r, outbuf); 2065 if (buflen < 0) { 2066 goto illegal_request; 2067 } 2068 break; 2069 case READ_DVD_STRUCTURE: 2070 buflen = scsi_read_dvd_structure(s, r, outbuf); 2071 if (buflen < 0) { 2072 goto illegal_request; 2073 } 2074 break; 2075 case SERVICE_ACTION_IN_16: 2076 /* Service Action In subcommands. */ 2077 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2078 DPRINTF("SAI READ CAPACITY(16)\n"); 2079 memset(outbuf, 0, req->cmd.xfer); 2080 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2081 if (!nb_sectors) { 2082 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2083 return 0; 2084 } 2085 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2086 goto illegal_request; 2087 } 2088 nb_sectors /= s->qdev.blocksize / 512; 2089 /* Returned value is the address of the last sector. */ 2090 nb_sectors--; 2091 /* Remember the new size for read/write sanity checking. */ 2092 s->qdev.max_lba = nb_sectors; 2093 outbuf[0] = (nb_sectors >> 56) & 0xff; 2094 outbuf[1] = (nb_sectors >> 48) & 0xff; 2095 outbuf[2] = (nb_sectors >> 40) & 0xff; 2096 outbuf[3] = (nb_sectors >> 32) & 0xff; 2097 outbuf[4] = (nb_sectors >> 24) & 0xff; 2098 outbuf[5] = (nb_sectors >> 16) & 0xff; 2099 outbuf[6] = (nb_sectors >> 8) & 0xff; 2100 outbuf[7] = nb_sectors & 0xff; 2101 outbuf[8] = 0; 2102 outbuf[9] = 0; 2103 outbuf[10] = s->qdev.blocksize >> 8; 2104 outbuf[11] = 0; 2105 outbuf[12] = 0; 2106 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2107 2108 /* set TPE bit if the format supports discard */ 2109 if (s->qdev.conf.discard_granularity) { 2110 outbuf[14] = 0x80; 2111 } 2112 2113 /* Protection, exponent and lowest lba field left blank. */ 2114 break; 2115 } 2116 DPRINTF("Unsupported Service Action In\n"); 2117 goto illegal_request; 2118 case SYNCHRONIZE_CACHE: 2119 /* The request is used as the AIO opaque value, so add a ref. */ 2120 scsi_req_ref(&r->req); 2121 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2122 BLOCK_ACCT_FLUSH); 2123 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2124 return 0; 2125 case SEEK_10: 2126 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba); 2127 if (r->req.cmd.lba > s->qdev.max_lba) { 2128 goto illegal_lba; 2129 } 2130 break; 2131 case MODE_SELECT: 2132 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2133 break; 2134 case MODE_SELECT_10: 2135 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2136 break; 2137 case UNMAP: 2138 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2139 break; 2140 case VERIFY_10: 2141 case VERIFY_12: 2142 case VERIFY_16: 2143 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3); 2144 if (req->cmd.buf[1] & 6) { 2145 goto illegal_request; 2146 } 2147 break; 2148 case WRITE_SAME_10: 2149 case WRITE_SAME_16: 2150 DPRINTF("WRITE SAME %d (len %lu)\n", 2151 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, 2152 (unsigned long)r->req.cmd.xfer); 2153 break; 2154 default: 2155 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0], 2156 scsi_command_name(buf[0])); 2157 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2158 return 0; 2159 } 2160 assert(!r->req.aiocb); 2161 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2162 if (r->iov.iov_len == 0) { 2163 scsi_req_complete(&r->req, GOOD); 2164 } 2165 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2166 assert(r->iov.iov_len == req->cmd.xfer); 2167 return -r->iov.iov_len; 2168 } else { 2169 return r->iov.iov_len; 2170 } 2171 2172 illegal_request: 2173 if (r->req.status == -1) { 2174 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2175 } 2176 return 0; 2177 2178 illegal_lba: 2179 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2180 return 0; 2181 } 2182 2183 /* Execute a scsi command. Returns the length of the data expected by the 2184 command. This will be Positive for data transfers from the device 2185 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2186 and zero if the command does not transfer any data. */ 2187 2188 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2189 { 2190 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2191 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2192 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2193 uint32_t len; 2194 uint8_t command; 2195 2196 command = buf[0]; 2197 2198 if (!blk_is_available(s->qdev.conf.blk)) { 2199 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2200 return 0; 2201 } 2202 2203 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2204 switch (command) { 2205 case READ_6: 2206 case READ_10: 2207 case READ_12: 2208 case READ_16: 2209 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len); 2210 /* Protection information is not supported. For SCSI versions 2 and 2211 * older (as determined by snooping the guest's INQUIRY commands), 2212 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2213 */ 2214 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2215 goto illegal_request; 2216 } 2217 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2218 goto illegal_lba; 2219 } 2220 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2221 r->sector_count = len * (s->qdev.blocksize / 512); 2222 break; 2223 case WRITE_6: 2224 case WRITE_10: 2225 case WRITE_12: 2226 case WRITE_16: 2227 case WRITE_VERIFY_10: 2228 case WRITE_VERIFY_12: 2229 case WRITE_VERIFY_16: 2230 if (blk_is_read_only(s->qdev.conf.blk)) { 2231 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2232 return 0; 2233 } 2234 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n", 2235 (command & 0xe) == 0xe ? "And Verify " : "", 2236 r->req.cmd.lba, len); 2237 /* fall through */ 2238 case VERIFY_10: 2239 case VERIFY_12: 2240 case VERIFY_16: 2241 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2242 * As far as DMA is concerned, we can treat it the same as a write; 2243 * scsi_block_do_sgio will send VERIFY commands. 2244 */ 2245 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2246 goto illegal_request; 2247 } 2248 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2249 goto illegal_lba; 2250 } 2251 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2252 r->sector_count = len * (s->qdev.blocksize / 512); 2253 break; 2254 default: 2255 abort(); 2256 illegal_request: 2257 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2258 return 0; 2259 illegal_lba: 2260 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2261 return 0; 2262 } 2263 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2264 if (r->sector_count == 0) { 2265 scsi_req_complete(&r->req, GOOD); 2266 } 2267 assert(r->iov.iov_len == 0); 2268 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2269 return -r->sector_count * 512; 2270 } else { 2271 return r->sector_count * 512; 2272 } 2273 } 2274 2275 static void scsi_disk_reset(DeviceState *dev) 2276 { 2277 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2278 uint64_t nb_sectors; 2279 2280 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2281 2282 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2283 nb_sectors /= s->qdev.blocksize / 512; 2284 if (nb_sectors) { 2285 nb_sectors--; 2286 } 2287 s->qdev.max_lba = nb_sectors; 2288 /* reset tray statuses */ 2289 s->tray_locked = 0; 2290 s->tray_open = 0; 2291 2292 s->qdev.scsi_version = s->qdev.default_scsi_version; 2293 } 2294 2295 static void scsi_disk_resize_cb(void *opaque) 2296 { 2297 SCSIDiskState *s = opaque; 2298 2299 /* SPC lists this sense code as available only for 2300 * direct-access devices. 2301 */ 2302 if (s->qdev.type == TYPE_DISK) { 2303 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2304 } 2305 } 2306 2307 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2308 { 2309 SCSIDiskState *s = opaque; 2310 2311 /* 2312 * When a CD gets changed, we have to report an ejected state and 2313 * then a loaded state to guests so that they detect tray 2314 * open/close and media change events. Guests that do not use 2315 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2316 * states rely on this behavior. 2317 * 2318 * media_changed governs the state machine used for unit attention 2319 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2320 */ 2321 s->media_changed = load; 2322 s->tray_open = !load; 2323 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2324 s->media_event = true; 2325 s->eject_request = false; 2326 } 2327 2328 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2329 { 2330 SCSIDiskState *s = opaque; 2331 2332 s->eject_request = true; 2333 if (force) { 2334 s->tray_locked = false; 2335 } 2336 } 2337 2338 static bool scsi_cd_is_tray_open(void *opaque) 2339 { 2340 return ((SCSIDiskState *)opaque)->tray_open; 2341 } 2342 2343 static bool scsi_cd_is_medium_locked(void *opaque) 2344 { 2345 return ((SCSIDiskState *)opaque)->tray_locked; 2346 } 2347 2348 static const BlockDevOps scsi_disk_removable_block_ops = { 2349 .change_media_cb = scsi_cd_change_media_cb, 2350 .eject_request_cb = scsi_cd_eject_request_cb, 2351 .is_tray_open = scsi_cd_is_tray_open, 2352 .is_medium_locked = scsi_cd_is_medium_locked, 2353 2354 .resize_cb = scsi_disk_resize_cb, 2355 }; 2356 2357 static const BlockDevOps scsi_disk_block_ops = { 2358 .resize_cb = scsi_disk_resize_cb, 2359 }; 2360 2361 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2362 { 2363 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2364 if (s->media_changed) { 2365 s->media_changed = false; 2366 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2367 } 2368 } 2369 2370 static void scsi_realize(SCSIDevice *dev, Error **errp) 2371 { 2372 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2373 2374 if (!s->qdev.conf.blk) { 2375 error_setg(errp, "drive property not set"); 2376 return; 2377 } 2378 2379 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2380 !blk_is_inserted(s->qdev.conf.blk)) { 2381 error_setg(errp, "Device needs media, but drive is empty"); 2382 return; 2383 } 2384 2385 blkconf_blocksizes(&s->qdev.conf); 2386 2387 if (s->qdev.conf.logical_block_size > 2388 s->qdev.conf.physical_block_size) { 2389 error_setg(errp, 2390 "logical_block_size > physical_block_size not supported"); 2391 return; 2392 } 2393 2394 if (dev->type == TYPE_DISK) { 2395 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2396 return; 2397 } 2398 } 2399 if (!blkconf_apply_backend_options(&dev->conf, 2400 blk_is_read_only(s->qdev.conf.blk), 2401 dev->type == TYPE_DISK, errp)) { 2402 return; 2403 } 2404 2405 if (s->qdev.conf.discard_granularity == -1) { 2406 s->qdev.conf.discard_granularity = 2407 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2408 } 2409 2410 if (!s->version) { 2411 s->version = g_strdup(qemu_hw_version()); 2412 } 2413 if (!s->vendor) { 2414 s->vendor = g_strdup("QEMU"); 2415 } 2416 2417 if (blk_is_sg(s->qdev.conf.blk)) { 2418 error_setg(errp, "unwanted /dev/sg*"); 2419 return; 2420 } 2421 2422 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2423 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2424 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2425 } else { 2426 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2427 } 2428 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2429 2430 blk_iostatus_enable(s->qdev.conf.blk); 2431 } 2432 2433 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2434 { 2435 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2436 /* can happen for devices without drive. The error message for missing 2437 * backend will be issued in scsi_realize 2438 */ 2439 if (s->qdev.conf.blk) { 2440 blkconf_blocksizes(&s->qdev.conf); 2441 } 2442 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2443 s->qdev.type = TYPE_DISK; 2444 if (!s->product) { 2445 s->product = g_strdup("QEMU HARDDISK"); 2446 } 2447 scsi_realize(&s->qdev, errp); 2448 } 2449 2450 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2451 { 2452 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2453 int ret; 2454 2455 if (!dev->conf.blk) { 2456 /* Anonymous BlockBackend for an empty drive. As we put it into 2457 * dev->conf, qdev takes care of detaching on unplug. */ 2458 dev->conf.blk = blk_new(0, BLK_PERM_ALL); 2459 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2460 assert(ret == 0); 2461 } 2462 2463 s->qdev.blocksize = 2048; 2464 s->qdev.type = TYPE_ROM; 2465 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2466 if (!s->product) { 2467 s->product = g_strdup("QEMU CD-ROM"); 2468 } 2469 scsi_realize(&s->qdev, errp); 2470 } 2471 2472 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2473 { 2474 DriveInfo *dinfo; 2475 Error *local_err = NULL; 2476 2477 if (!dev->conf.blk) { 2478 scsi_realize(dev, &local_err); 2479 assert(local_err); 2480 error_propagate(errp, local_err); 2481 return; 2482 } 2483 2484 dinfo = blk_legacy_dinfo(dev->conf.blk); 2485 if (dinfo && dinfo->media_cd) { 2486 scsi_cd_realize(dev, errp); 2487 } else { 2488 scsi_hd_realize(dev, errp); 2489 } 2490 } 2491 2492 static const SCSIReqOps scsi_disk_emulate_reqops = { 2493 .size = sizeof(SCSIDiskReq), 2494 .free_req = scsi_free_request, 2495 .send_command = scsi_disk_emulate_command, 2496 .read_data = scsi_disk_emulate_read_data, 2497 .write_data = scsi_disk_emulate_write_data, 2498 .get_buf = scsi_get_buf, 2499 }; 2500 2501 static const SCSIReqOps scsi_disk_dma_reqops = { 2502 .size = sizeof(SCSIDiskReq), 2503 .free_req = scsi_free_request, 2504 .send_command = scsi_disk_dma_command, 2505 .read_data = scsi_read_data, 2506 .write_data = scsi_write_data, 2507 .get_buf = scsi_get_buf, 2508 .load_request = scsi_disk_load_request, 2509 .save_request = scsi_disk_save_request, 2510 }; 2511 2512 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2513 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2514 [INQUIRY] = &scsi_disk_emulate_reqops, 2515 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2516 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2517 [START_STOP] = &scsi_disk_emulate_reqops, 2518 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2519 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2520 [READ_TOC] = &scsi_disk_emulate_reqops, 2521 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2522 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2523 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2524 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2525 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2526 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2527 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2528 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2529 [SEEK_10] = &scsi_disk_emulate_reqops, 2530 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2531 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2532 [UNMAP] = &scsi_disk_emulate_reqops, 2533 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2534 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2535 [VERIFY_10] = &scsi_disk_emulate_reqops, 2536 [VERIFY_12] = &scsi_disk_emulate_reqops, 2537 [VERIFY_16] = &scsi_disk_emulate_reqops, 2538 2539 [READ_6] = &scsi_disk_dma_reqops, 2540 [READ_10] = &scsi_disk_dma_reqops, 2541 [READ_12] = &scsi_disk_dma_reqops, 2542 [READ_16] = &scsi_disk_dma_reqops, 2543 [WRITE_6] = &scsi_disk_dma_reqops, 2544 [WRITE_10] = &scsi_disk_dma_reqops, 2545 [WRITE_12] = &scsi_disk_dma_reqops, 2546 [WRITE_16] = &scsi_disk_dma_reqops, 2547 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2548 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2549 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2550 }; 2551 2552 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2553 uint8_t *buf, void *hba_private) 2554 { 2555 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2556 SCSIRequest *req; 2557 const SCSIReqOps *ops; 2558 uint8_t command; 2559 2560 command = buf[0]; 2561 ops = scsi_disk_reqops_dispatch[command]; 2562 if (!ops) { 2563 ops = &scsi_disk_emulate_reqops; 2564 } 2565 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2566 2567 #ifdef DEBUG_SCSI 2568 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]); 2569 { 2570 int i; 2571 for (i = 1; i < scsi_cdb_length(buf); i++) { 2572 printf(" 0x%02x", buf[i]); 2573 } 2574 printf("\n"); 2575 } 2576 #endif 2577 2578 return req; 2579 } 2580 2581 #ifdef __linux__ 2582 static int get_device_type(SCSIDiskState *s) 2583 { 2584 uint8_t cmd[16]; 2585 uint8_t buf[36]; 2586 int ret; 2587 2588 memset(cmd, 0, sizeof(cmd)); 2589 memset(buf, 0, sizeof(buf)); 2590 cmd[0] = INQUIRY; 2591 cmd[4] = sizeof(buf); 2592 2593 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2594 buf, sizeof(buf)); 2595 if (ret < 0) { 2596 return -1; 2597 } 2598 s->qdev.type = buf[0]; 2599 if (buf[1] & 0x80) { 2600 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2601 } 2602 return 0; 2603 } 2604 2605 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2606 { 2607 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2608 int sg_version; 2609 int rc; 2610 2611 if (!s->qdev.conf.blk) { 2612 error_setg(errp, "drive property not set"); 2613 return; 2614 } 2615 2616 if (s->rotation_rate) { 2617 error_report_once("rotation_rate is specified for scsi-block but is " 2618 "not implemented. This option is deprecated and will " 2619 "be removed in a future version"); 2620 } 2621 2622 /* check we are using a driver managing SG_IO (version 3 and after) */ 2623 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2624 if (rc < 0) { 2625 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2626 if (rc != -EPERM) { 2627 error_append_hint(errp, "Is this a SCSI device?\n"); 2628 } 2629 return; 2630 } 2631 if (sg_version < 30000) { 2632 error_setg(errp, "scsi generic interface too old"); 2633 return; 2634 } 2635 2636 /* get device type from INQUIRY data */ 2637 rc = get_device_type(s); 2638 if (rc < 0) { 2639 error_setg(errp, "INQUIRY failed"); 2640 return; 2641 } 2642 2643 /* Make a guess for the block size, we'll fix it when the guest sends. 2644 * READ CAPACITY. If they don't, they likely would assume these sizes 2645 * anyway. (TODO: check in /sys). 2646 */ 2647 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2648 s->qdev.blocksize = 2048; 2649 } else { 2650 s->qdev.blocksize = 512; 2651 } 2652 2653 /* Makes the scsi-block device not removable by using HMP and QMP eject 2654 * command. 2655 */ 2656 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2657 2658 scsi_realize(&s->qdev, errp); 2659 scsi_generic_read_device_inquiry(&s->qdev); 2660 } 2661 2662 typedef struct SCSIBlockReq { 2663 SCSIDiskReq req; 2664 sg_io_hdr_t io_header; 2665 2666 /* Selected bytes of the original CDB, copied into our own CDB. */ 2667 uint8_t cmd, cdb1, group_number; 2668 2669 /* CDB passed to SG_IO. */ 2670 uint8_t cdb[16]; 2671 } SCSIBlockReq; 2672 2673 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2674 int64_t offset, QEMUIOVector *iov, 2675 int direction, 2676 BlockCompletionFunc *cb, void *opaque) 2677 { 2678 sg_io_hdr_t *io_header = &req->io_header; 2679 SCSIDiskReq *r = &req->req; 2680 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2681 int nb_logical_blocks; 2682 uint64_t lba; 2683 BlockAIOCB *aiocb; 2684 2685 /* This is not supported yet. It can only happen if the guest does 2686 * reads and writes that are not aligned to one logical sectors 2687 * _and_ cover multiple MemoryRegions. 2688 */ 2689 assert(offset % s->qdev.blocksize == 0); 2690 assert(iov->size % s->qdev.blocksize == 0); 2691 2692 io_header->interface_id = 'S'; 2693 2694 /* The data transfer comes from the QEMUIOVector. */ 2695 io_header->dxfer_direction = direction; 2696 io_header->dxfer_len = iov->size; 2697 io_header->dxferp = (void *)iov->iov; 2698 io_header->iovec_count = iov->niov; 2699 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2700 2701 /* Build a new CDB with the LBA and length patched in, in case 2702 * DMA helpers split the transfer in multiple segments. Do not 2703 * build a CDB smaller than what the guest wanted, and only build 2704 * a larger one if strictly necessary. 2705 */ 2706 io_header->cmdp = req->cdb; 2707 lba = offset / s->qdev.blocksize; 2708 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2709 2710 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2711 /* 6-byte CDB */ 2712 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2713 req->cdb[4] = nb_logical_blocks; 2714 req->cdb[5] = 0; 2715 io_header->cmd_len = 6; 2716 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2717 /* 10-byte CDB */ 2718 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2719 req->cdb[1] = req->cdb1; 2720 stl_be_p(&req->cdb[2], lba); 2721 req->cdb[6] = req->group_number; 2722 stw_be_p(&req->cdb[7], nb_logical_blocks); 2723 req->cdb[9] = 0; 2724 io_header->cmd_len = 10; 2725 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2726 /* 12-byte CDB */ 2727 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2728 req->cdb[1] = req->cdb1; 2729 stl_be_p(&req->cdb[2], lba); 2730 stl_be_p(&req->cdb[6], nb_logical_blocks); 2731 req->cdb[10] = req->group_number; 2732 req->cdb[11] = 0; 2733 io_header->cmd_len = 12; 2734 } else { 2735 /* 16-byte CDB */ 2736 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2737 req->cdb[1] = req->cdb1; 2738 stq_be_p(&req->cdb[2], lba); 2739 stl_be_p(&req->cdb[10], nb_logical_blocks); 2740 req->cdb[14] = req->group_number; 2741 req->cdb[15] = 0; 2742 io_header->cmd_len = 16; 2743 } 2744 2745 /* The rest is as in scsi-generic.c. */ 2746 io_header->mx_sb_len = sizeof(r->req.sense); 2747 io_header->sbp = r->req.sense; 2748 io_header->timeout = UINT_MAX; 2749 io_header->usr_ptr = r; 2750 io_header->flags |= SG_FLAG_DIRECT_IO; 2751 2752 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2753 assert(aiocb != NULL); 2754 return aiocb; 2755 } 2756 2757 static bool scsi_block_no_fua(SCSICommand *cmd) 2758 { 2759 return false; 2760 } 2761 2762 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2763 QEMUIOVector *iov, 2764 BlockCompletionFunc *cb, void *cb_opaque, 2765 void *opaque) 2766 { 2767 SCSIBlockReq *r = opaque; 2768 return scsi_block_do_sgio(r, offset, iov, 2769 SG_DXFER_FROM_DEV, cb, cb_opaque); 2770 } 2771 2772 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2773 QEMUIOVector *iov, 2774 BlockCompletionFunc *cb, void *cb_opaque, 2775 void *opaque) 2776 { 2777 SCSIBlockReq *r = opaque; 2778 return scsi_block_do_sgio(r, offset, iov, 2779 SG_DXFER_TO_DEV, cb, cb_opaque); 2780 } 2781 2782 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2783 { 2784 switch (buf[0]) { 2785 case VERIFY_10: 2786 case VERIFY_12: 2787 case VERIFY_16: 2788 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2789 * for the number of logical blocks specified in the length 2790 * field). For other modes, do not use scatter/gather operation. 2791 */ 2792 if ((buf[1] & 6) == 2) { 2793 return false; 2794 } 2795 break; 2796 2797 case READ_6: 2798 case READ_10: 2799 case READ_12: 2800 case READ_16: 2801 case WRITE_6: 2802 case WRITE_10: 2803 case WRITE_12: 2804 case WRITE_16: 2805 case WRITE_VERIFY_10: 2806 case WRITE_VERIFY_12: 2807 case WRITE_VERIFY_16: 2808 /* MMC writing cannot be done via DMA helpers, because it sometimes 2809 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2810 * We might use scsi_block_dma_reqops as long as no writing commands are 2811 * seen, but performance usually isn't paramount on optical media. So, 2812 * just make scsi-block operate the same as scsi-generic for them. 2813 */ 2814 if (s->qdev.type != TYPE_ROM) { 2815 return false; 2816 } 2817 break; 2818 2819 default: 2820 break; 2821 } 2822 2823 return true; 2824 } 2825 2826 2827 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2828 { 2829 SCSIBlockReq *r = (SCSIBlockReq *)req; 2830 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2831 2832 r->cmd = req->cmd.buf[0]; 2833 switch (r->cmd >> 5) { 2834 case 0: 2835 /* 6-byte CDB. */ 2836 r->cdb1 = r->group_number = 0; 2837 break; 2838 case 1: 2839 /* 10-byte CDB. */ 2840 r->cdb1 = req->cmd.buf[1]; 2841 r->group_number = req->cmd.buf[6]; 2842 break; 2843 case 4: 2844 /* 12-byte CDB. */ 2845 r->cdb1 = req->cmd.buf[1]; 2846 r->group_number = req->cmd.buf[10]; 2847 break; 2848 case 5: 2849 /* 16-byte CDB. */ 2850 r->cdb1 = req->cmd.buf[1]; 2851 r->group_number = req->cmd.buf[14]; 2852 break; 2853 default: 2854 abort(); 2855 } 2856 2857 /* Protection information is not supported. For SCSI versions 2 and 2858 * older (as determined by snooping the guest's INQUIRY commands), 2859 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2860 */ 2861 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2862 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2863 return 0; 2864 } 2865 2866 r->req.status = &r->io_header.status; 2867 return scsi_disk_dma_command(req, buf); 2868 } 2869 2870 static const SCSIReqOps scsi_block_dma_reqops = { 2871 .size = sizeof(SCSIBlockReq), 2872 .free_req = scsi_free_request, 2873 .send_command = scsi_block_dma_command, 2874 .read_data = scsi_read_data, 2875 .write_data = scsi_write_data, 2876 .get_buf = scsi_get_buf, 2877 .load_request = scsi_disk_load_request, 2878 .save_request = scsi_disk_save_request, 2879 }; 2880 2881 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2882 uint32_t lun, uint8_t *buf, 2883 void *hba_private) 2884 { 2885 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2886 2887 if (scsi_block_is_passthrough(s, buf)) { 2888 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2889 hba_private); 2890 } else { 2891 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2892 hba_private); 2893 } 2894 } 2895 2896 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2897 uint8_t *buf, void *hba_private) 2898 { 2899 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2900 2901 if (scsi_block_is_passthrough(s, buf)) { 2902 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2903 } else { 2904 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2905 } 2906 } 2907 2908 #endif 2909 2910 static 2911 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2912 BlockCompletionFunc *cb, void *cb_opaque, 2913 void *opaque) 2914 { 2915 SCSIDiskReq *r = opaque; 2916 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2917 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2918 } 2919 2920 static 2921 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2922 BlockCompletionFunc *cb, void *cb_opaque, 2923 void *opaque) 2924 { 2925 SCSIDiskReq *r = opaque; 2926 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2927 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2928 } 2929 2930 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2931 { 2932 DeviceClass *dc = DEVICE_CLASS(klass); 2933 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2934 2935 dc->fw_name = "disk"; 2936 dc->reset = scsi_disk_reset; 2937 sdc->dma_readv = scsi_dma_readv; 2938 sdc->dma_writev = scsi_dma_writev; 2939 sdc->need_fua_emulation = scsi_is_cmd_fua; 2940 } 2941 2942 static const TypeInfo scsi_disk_base_info = { 2943 .name = TYPE_SCSI_DISK_BASE, 2944 .parent = TYPE_SCSI_DEVICE, 2945 .class_init = scsi_disk_base_class_initfn, 2946 .instance_size = sizeof(SCSIDiskState), 2947 .class_size = sizeof(SCSIDiskClass), 2948 .abstract = true, 2949 }; 2950 2951 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2952 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ 2953 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2954 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2955 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2956 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2957 DEFINE_PROP_STRING("product", SCSIDiskState, product) 2958 2959 static Property scsi_hd_properties[] = { 2960 DEFINE_SCSI_DISK_PROPERTIES(), 2961 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2962 SCSI_DISK_F_REMOVABLE, false), 2963 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2964 SCSI_DISK_F_DPOFUA, false), 2965 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2966 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2967 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2968 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2969 DEFAULT_MAX_UNMAP_SIZE), 2970 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2971 DEFAULT_MAX_IO_SIZE), 2972 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2973 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2974 5), 2975 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2976 DEFINE_PROP_END_OF_LIST(), 2977 }; 2978 2979 static const VMStateDescription vmstate_scsi_disk_state = { 2980 .name = "scsi-disk", 2981 .version_id = 1, 2982 .minimum_version_id = 1, 2983 .fields = (VMStateField[]) { 2984 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2985 VMSTATE_BOOL(media_changed, SCSIDiskState), 2986 VMSTATE_BOOL(media_event, SCSIDiskState), 2987 VMSTATE_BOOL(eject_request, SCSIDiskState), 2988 VMSTATE_BOOL(tray_open, SCSIDiskState), 2989 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2990 VMSTATE_END_OF_LIST() 2991 } 2992 }; 2993 2994 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 2995 { 2996 DeviceClass *dc = DEVICE_CLASS(klass); 2997 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2998 2999 sc->realize = scsi_hd_realize; 3000 sc->alloc_req = scsi_new_request; 3001 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3002 dc->desc = "virtual SCSI disk"; 3003 dc->props = scsi_hd_properties; 3004 dc->vmsd = &vmstate_scsi_disk_state; 3005 } 3006 3007 static const TypeInfo scsi_hd_info = { 3008 .name = "scsi-hd", 3009 .parent = TYPE_SCSI_DISK_BASE, 3010 .class_init = scsi_hd_class_initfn, 3011 }; 3012 3013 static Property scsi_cd_properties[] = { 3014 DEFINE_SCSI_DISK_PROPERTIES(), 3015 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3016 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3017 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3018 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3019 DEFAULT_MAX_IO_SIZE), 3020 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3021 5), 3022 DEFINE_PROP_END_OF_LIST(), 3023 }; 3024 3025 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3026 { 3027 DeviceClass *dc = DEVICE_CLASS(klass); 3028 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3029 3030 sc->realize = scsi_cd_realize; 3031 sc->alloc_req = scsi_new_request; 3032 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3033 dc->desc = "virtual SCSI CD-ROM"; 3034 dc->props = scsi_cd_properties; 3035 dc->vmsd = &vmstate_scsi_disk_state; 3036 } 3037 3038 static const TypeInfo scsi_cd_info = { 3039 .name = "scsi-cd", 3040 .parent = TYPE_SCSI_DISK_BASE, 3041 .class_init = scsi_cd_class_initfn, 3042 }; 3043 3044 #ifdef __linux__ 3045 static Property scsi_block_properties[] = { 3046 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3047 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3048 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3049 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3050 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3051 DEFAULT_MAX_UNMAP_SIZE), 3052 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3053 DEFAULT_MAX_IO_SIZE), 3054 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3055 -1), 3056 DEFINE_PROP_END_OF_LIST(), 3057 }; 3058 3059 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3060 { 3061 DeviceClass *dc = DEVICE_CLASS(klass); 3062 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3063 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3064 3065 sc->realize = scsi_block_realize; 3066 sc->alloc_req = scsi_block_new_request; 3067 sc->parse_cdb = scsi_block_parse_cdb; 3068 sdc->dma_readv = scsi_block_dma_readv; 3069 sdc->dma_writev = scsi_block_dma_writev; 3070 sdc->need_fua_emulation = scsi_block_no_fua; 3071 dc->desc = "SCSI block device passthrough"; 3072 dc->props = scsi_block_properties; 3073 dc->vmsd = &vmstate_scsi_disk_state; 3074 } 3075 3076 static const TypeInfo scsi_block_info = { 3077 .name = "scsi-block", 3078 .parent = TYPE_SCSI_DISK_BASE, 3079 .class_init = scsi_block_class_initfn, 3080 }; 3081 #endif 3082 3083 static Property scsi_disk_properties[] = { 3084 DEFINE_SCSI_DISK_PROPERTIES(), 3085 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3086 SCSI_DISK_F_REMOVABLE, false), 3087 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3088 SCSI_DISK_F_DPOFUA, false), 3089 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3090 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3091 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3092 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3093 DEFAULT_MAX_UNMAP_SIZE), 3094 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3095 DEFAULT_MAX_IO_SIZE), 3096 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3097 5), 3098 DEFINE_PROP_END_OF_LIST(), 3099 }; 3100 3101 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3102 { 3103 DeviceClass *dc = DEVICE_CLASS(klass); 3104 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3105 3106 sc->realize = scsi_disk_realize; 3107 sc->alloc_req = scsi_new_request; 3108 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3109 dc->fw_name = "disk"; 3110 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3111 dc->reset = scsi_disk_reset; 3112 dc->props = scsi_disk_properties; 3113 dc->vmsd = &vmstate_scsi_disk_state; 3114 } 3115 3116 static const TypeInfo scsi_disk_info = { 3117 .name = "scsi-disk", 3118 .parent = TYPE_SCSI_DISK_BASE, 3119 .class_init = scsi_disk_class_initfn, 3120 }; 3121 3122 static void scsi_disk_register_types(void) 3123 { 3124 type_register_static(&scsi_disk_base_info); 3125 type_register_static(&scsi_hd_info); 3126 type_register_static(&scsi_cd_info); 3127 #ifdef __linux__ 3128 type_register_static(&scsi_block_info); 3129 #endif 3130 type_register_static(&scsi_disk_info); 3131 } 3132 3133 type_init(scsi_disk_register_types) 3134