1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 //#define DEBUG_SCSI 23 24 #ifdef DEBUG_SCSI 25 #define DPRINTF(fmt, ...) \ 26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0) 27 #else 28 #define DPRINTF(fmt, ...) do {} while(0) 29 #endif 30 31 #include "qemu/osdep.h" 32 #include "qemu/units.h" 33 #include "qapi/error.h" 34 #include "qemu/error-report.h" 35 #include "hw/scsi/scsi.h" 36 #include "scsi/constants.h" 37 #include "sysemu/sysemu.h" 38 #include "sysemu/block-backend.h" 39 #include "sysemu/blockdev.h" 40 #include "hw/block/block.h" 41 #include "sysemu/dma.h" 42 #include "qemu/cutils.h" 43 44 #ifdef __linux 45 #include <scsi/sg.h> 46 #endif 47 48 #define SCSI_WRITE_SAME_MAX (512 * KiB) 49 #define SCSI_DMA_BUF_SIZE (128 * KiB) 50 #define SCSI_MAX_INQUIRY_LEN 256 51 #define SCSI_MAX_MODE_LEN 256 52 53 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 54 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 55 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 56 57 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 58 59 #define SCSI_DISK_BASE(obj) \ 60 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 61 #define SCSI_DISK_BASE_CLASS(klass) \ 62 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 63 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 64 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 65 66 typedef struct SCSIDiskClass { 67 SCSIDeviceClass parent_class; 68 DMAIOFunc *dma_readv; 69 DMAIOFunc *dma_writev; 70 bool (*need_fua_emulation)(SCSICommand *cmd); 71 } SCSIDiskClass; 72 73 typedef struct SCSIDiskReq { 74 SCSIRequest req; 75 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 76 uint64_t sector; 77 uint32_t sector_count; 78 uint32_t buflen; 79 bool started; 80 bool need_fua_emulation; 81 struct iovec iov; 82 QEMUIOVector qiov; 83 BlockAcctCookie acct; 84 unsigned char *status; 85 } SCSIDiskReq; 86 87 #define SCSI_DISK_F_REMOVABLE 0 88 #define SCSI_DISK_F_DPOFUA 1 89 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 90 91 typedef struct SCSIDiskState 92 { 93 SCSIDevice qdev; 94 uint32_t features; 95 bool media_changed; 96 bool media_event; 97 bool eject_request; 98 uint16_t port_index; 99 uint64_t max_unmap_size; 100 uint64_t max_io_size; 101 QEMUBH *bh; 102 char *version; 103 char *serial; 104 char *vendor; 105 char *product; 106 bool tray_open; 107 bool tray_locked; 108 /* 109 * 0x0000 - rotation rate not reported 110 * 0x0001 - non-rotating medium (SSD) 111 * 0x0002-0x0400 - reserved 112 * 0x0401-0xffe - rotations per minute 113 * 0xffff - reserved 114 */ 115 uint16_t rotation_rate; 116 } SCSIDiskState; 117 118 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 119 120 static void scsi_free_request(SCSIRequest *req) 121 { 122 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 123 124 qemu_vfree(r->iov.iov_base); 125 } 126 127 /* Helper function for command completion with sense. */ 128 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 129 { 130 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n", 131 r->req.tag, sense.key, sense.asc, sense.ascq); 132 scsi_req_build_sense(&r->req, sense); 133 scsi_req_complete(&r->req, CHECK_CONDITION); 134 } 135 136 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 137 { 138 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 139 140 if (!r->iov.iov_base) { 141 r->buflen = size; 142 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 143 } 144 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 145 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 146 } 147 148 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 149 { 150 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 151 152 qemu_put_be64s(f, &r->sector); 153 qemu_put_be32s(f, &r->sector_count); 154 qemu_put_be32s(f, &r->buflen); 155 if (r->buflen) { 156 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 157 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 158 } else if (!req->retry) { 159 uint32_t len = r->iov.iov_len; 160 qemu_put_be32s(f, &len); 161 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 162 } 163 } 164 } 165 166 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 167 { 168 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 169 170 qemu_get_be64s(f, &r->sector); 171 qemu_get_be32s(f, &r->sector_count); 172 qemu_get_be32s(f, &r->buflen); 173 if (r->buflen) { 174 scsi_init_iovec(r, r->buflen); 175 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 176 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 177 } else if (!r->req.retry) { 178 uint32_t len; 179 qemu_get_be32s(f, &len); 180 r->iov.iov_len = len; 181 assert(r->iov.iov_len <= r->buflen); 182 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 183 } 184 } 185 186 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 187 } 188 189 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 190 { 191 if (r->req.io_canceled) { 192 scsi_req_cancel_complete(&r->req); 193 return true; 194 } 195 196 if (ret < 0 || (r->status && *r->status)) { 197 return scsi_handle_rw_error(r, -ret, acct_failed); 198 } 199 200 return false; 201 } 202 203 static void scsi_aio_complete(void *opaque, int ret) 204 { 205 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 206 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 207 208 assert(r->req.aiocb != NULL); 209 r->req.aiocb = NULL; 210 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 211 if (scsi_disk_req_check_error(r, ret, true)) { 212 goto done; 213 } 214 215 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 216 scsi_req_complete(&r->req, GOOD); 217 218 done: 219 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 220 scsi_req_unref(&r->req); 221 } 222 223 static bool scsi_is_cmd_fua(SCSICommand *cmd) 224 { 225 switch (cmd->buf[0]) { 226 case READ_10: 227 case READ_12: 228 case READ_16: 229 case WRITE_10: 230 case WRITE_12: 231 case WRITE_16: 232 return (cmd->buf[1] & 8) != 0; 233 234 case VERIFY_10: 235 case VERIFY_12: 236 case VERIFY_16: 237 case WRITE_VERIFY_10: 238 case WRITE_VERIFY_12: 239 case WRITE_VERIFY_16: 240 return true; 241 242 case READ_6: 243 case WRITE_6: 244 default: 245 return false; 246 } 247 } 248 249 static void scsi_write_do_fua(SCSIDiskReq *r) 250 { 251 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 252 253 assert(r->req.aiocb == NULL); 254 assert(!r->req.io_canceled); 255 256 if (r->need_fua_emulation) { 257 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 258 BLOCK_ACCT_FLUSH); 259 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 260 return; 261 } 262 263 scsi_req_complete(&r->req, GOOD); 264 scsi_req_unref(&r->req); 265 } 266 267 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 268 { 269 assert(r->req.aiocb == NULL); 270 if (scsi_disk_req_check_error(r, ret, false)) { 271 goto done; 272 } 273 274 r->sector += r->sector_count; 275 r->sector_count = 0; 276 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 277 scsi_write_do_fua(r); 278 return; 279 } else { 280 scsi_req_complete(&r->req, GOOD); 281 } 282 283 done: 284 scsi_req_unref(&r->req); 285 } 286 287 static void scsi_dma_complete(void *opaque, int ret) 288 { 289 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 290 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 291 292 assert(r->req.aiocb != NULL); 293 r->req.aiocb = NULL; 294 295 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 296 if (ret < 0) { 297 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 298 } else { 299 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 300 } 301 scsi_dma_complete_noio(r, ret); 302 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 303 } 304 305 static void scsi_read_complete(void * opaque, int ret) 306 { 307 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 308 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 309 int n; 310 311 assert(r->req.aiocb != NULL); 312 r->req.aiocb = NULL; 313 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 314 if (scsi_disk_req_check_error(r, ret, true)) { 315 goto done; 316 } 317 318 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 319 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size); 320 321 n = r->qiov.size / 512; 322 r->sector += n; 323 r->sector_count -= n; 324 scsi_req_data(&r->req, r->qiov.size); 325 326 done: 327 scsi_req_unref(&r->req); 328 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 329 } 330 331 /* Actually issue a read to the block device. */ 332 static void scsi_do_read(SCSIDiskReq *r, int ret) 333 { 334 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 335 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 336 337 assert (r->req.aiocb == NULL); 338 if (scsi_disk_req_check_error(r, ret, false)) { 339 goto done; 340 } 341 342 /* The request is used as the AIO opaque value, so add a ref. */ 343 scsi_req_ref(&r->req); 344 345 if (r->req.sg) { 346 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 347 r->req.resid -= r->req.sg->size; 348 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 349 r->req.sg, r->sector << BDRV_SECTOR_BITS, 350 BDRV_SECTOR_SIZE, 351 sdc->dma_readv, r, scsi_dma_complete, r, 352 DMA_DIRECTION_FROM_DEVICE); 353 } else { 354 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 355 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 356 r->qiov.size, BLOCK_ACCT_READ); 357 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 358 scsi_read_complete, r, r); 359 } 360 361 done: 362 scsi_req_unref(&r->req); 363 } 364 365 static void scsi_do_read_cb(void *opaque, int ret) 366 { 367 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 368 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 369 370 assert (r->req.aiocb != NULL); 371 r->req.aiocb = NULL; 372 373 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 374 if (ret < 0) { 375 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 376 } else { 377 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 378 } 379 scsi_do_read(opaque, ret); 380 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 381 } 382 383 /* Read more data from scsi device into buffer. */ 384 static void scsi_read_data(SCSIRequest *req) 385 { 386 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 387 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 388 bool first; 389 390 DPRINTF("Read sector_count=%d\n", r->sector_count); 391 if (r->sector_count == 0) { 392 /* This also clears the sense buffer for REQUEST SENSE. */ 393 scsi_req_complete(&r->req, GOOD); 394 return; 395 } 396 397 /* No data transfer may already be in progress */ 398 assert(r->req.aiocb == NULL); 399 400 /* The request is used as the AIO opaque value, so add a ref. */ 401 scsi_req_ref(&r->req); 402 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 403 DPRINTF("Data transfer direction invalid\n"); 404 scsi_read_complete(r, -EINVAL); 405 return; 406 } 407 408 if (!blk_is_available(req->dev->conf.blk)) { 409 scsi_read_complete(r, -ENOMEDIUM); 410 return; 411 } 412 413 first = !r->started; 414 r->started = true; 415 if (first && r->need_fua_emulation) { 416 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 417 BLOCK_ACCT_FLUSH); 418 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 419 } else { 420 scsi_do_read(r, 0); 421 } 422 } 423 424 /* 425 * scsi_handle_rw_error has two return values. False means that the error 426 * must be ignored, true means that the error has been processed and the 427 * caller should not do anything else for this request. Note that 428 * scsi_handle_rw_error always manages its reference counts, independent 429 * of the return value. 430 */ 431 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 432 { 433 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 434 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 435 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 436 is_read, error); 437 438 if (action == BLOCK_ERROR_ACTION_REPORT) { 439 if (acct_failed) { 440 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 441 } 442 switch (error) { 443 case 0: 444 /* The command has run, no need to fake sense. */ 445 assert(r->status && *r->status); 446 scsi_req_complete(&r->req, *r->status); 447 break; 448 case ENOMEDIUM: 449 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 450 break; 451 case ENOMEM: 452 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 453 break; 454 case EINVAL: 455 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 456 break; 457 case ENOSPC: 458 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 459 break; 460 default: 461 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 462 break; 463 } 464 } 465 if (!error) { 466 assert(r->status && *r->status); 467 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 468 469 if (error == ECANCELED || error == EAGAIN || error == ENOTCONN || 470 error == 0) { 471 /* These errors are handled by guest. */ 472 scsi_req_complete(&r->req, *r->status); 473 return true; 474 } 475 } 476 477 blk_error_action(s->qdev.conf.blk, action, is_read, error); 478 if (action == BLOCK_ERROR_ACTION_STOP) { 479 scsi_req_retry(&r->req); 480 } 481 return action != BLOCK_ERROR_ACTION_IGNORE; 482 } 483 484 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 485 { 486 uint32_t n; 487 488 assert (r->req.aiocb == NULL); 489 if (scsi_disk_req_check_error(r, ret, false)) { 490 goto done; 491 } 492 493 n = r->qiov.size / 512; 494 r->sector += n; 495 r->sector_count -= n; 496 if (r->sector_count == 0) { 497 scsi_write_do_fua(r); 498 return; 499 } else { 500 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 501 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size); 502 scsi_req_data(&r->req, r->qiov.size); 503 } 504 505 done: 506 scsi_req_unref(&r->req); 507 } 508 509 static void scsi_write_complete(void * opaque, int ret) 510 { 511 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 512 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 513 514 assert (r->req.aiocb != NULL); 515 r->req.aiocb = NULL; 516 517 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 518 if (ret < 0) { 519 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 520 } else { 521 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 522 } 523 scsi_write_complete_noio(r, ret); 524 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 525 } 526 527 static void scsi_write_data(SCSIRequest *req) 528 { 529 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 530 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 531 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 532 533 /* No data transfer may already be in progress */ 534 assert(r->req.aiocb == NULL); 535 536 /* The request is used as the AIO opaque value, so add a ref. */ 537 scsi_req_ref(&r->req); 538 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 539 DPRINTF("Data transfer direction invalid\n"); 540 scsi_write_complete_noio(r, -EINVAL); 541 return; 542 } 543 544 if (!r->req.sg && !r->qiov.size) { 545 /* Called for the first time. Ask the driver to send us more data. */ 546 r->started = true; 547 scsi_write_complete_noio(r, 0); 548 return; 549 } 550 if (!blk_is_available(req->dev->conf.blk)) { 551 scsi_write_complete_noio(r, -ENOMEDIUM); 552 return; 553 } 554 555 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 556 r->req.cmd.buf[0] == VERIFY_16) { 557 if (r->req.sg) { 558 scsi_dma_complete_noio(r, 0); 559 } else { 560 scsi_write_complete_noio(r, 0); 561 } 562 return; 563 } 564 565 if (r->req.sg) { 566 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 567 r->req.resid -= r->req.sg->size; 568 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 569 r->req.sg, r->sector << BDRV_SECTOR_BITS, 570 BDRV_SECTOR_SIZE, 571 sdc->dma_writev, r, scsi_dma_complete, r, 572 DMA_DIRECTION_TO_DEVICE); 573 } else { 574 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 575 r->qiov.size, BLOCK_ACCT_WRITE); 576 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 577 scsi_write_complete, r, r); 578 } 579 } 580 581 /* Return a pointer to the data buffer. */ 582 static uint8_t *scsi_get_buf(SCSIRequest *req) 583 { 584 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 585 586 return (uint8_t *)r->iov.iov_base; 587 } 588 589 int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 590 { 591 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 592 uint8_t page_code = req->cmd.buf[2]; 593 int start, buflen = 0; 594 595 outbuf[buflen++] = s->qdev.type & 0x1f; 596 outbuf[buflen++] = page_code; 597 outbuf[buflen++] = 0x00; 598 outbuf[buflen++] = 0x00; 599 start = buflen; 600 601 switch (page_code) { 602 case 0x00: /* Supported page codes, mandatory */ 603 { 604 DPRINTF("Inquiry EVPD[Supported pages] " 605 "buffer size %zd\n", req->cmd.xfer); 606 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 607 if (s->serial) { 608 outbuf[buflen++] = 0x80; /* unit serial number */ 609 } 610 outbuf[buflen++] = 0x83; /* device identification */ 611 if (s->qdev.type == TYPE_DISK) { 612 outbuf[buflen++] = 0xb0; /* block limits */ 613 outbuf[buflen++] = 0xb1; /* block device characteristics */ 614 outbuf[buflen++] = 0xb2; /* thin provisioning */ 615 } 616 break; 617 } 618 case 0x80: /* Device serial number, optional */ 619 { 620 int l; 621 622 if (!s->serial) { 623 DPRINTF("Inquiry (EVPD[Serial number] not supported\n"); 624 return -1; 625 } 626 627 l = strlen(s->serial); 628 if (l > 36) { 629 l = 36; 630 } 631 632 DPRINTF("Inquiry EVPD[Serial number] " 633 "buffer size %zd\n", req->cmd.xfer); 634 memcpy(outbuf + buflen, s->serial, l); 635 buflen += l; 636 break; 637 } 638 639 case 0x83: /* Device identification page, mandatory */ 640 { 641 const char *str = s->serial ?: blk_name(s->qdev.conf.blk); 642 int max_len = s->serial ? 20 : 255 - 8; 643 int id_len = strlen(str); 644 645 if (id_len > max_len) { 646 id_len = max_len; 647 } 648 DPRINTF("Inquiry EVPD[Device identification] " 649 "buffer size %zd\n", req->cmd.xfer); 650 651 outbuf[buflen++] = 0x2; /* ASCII */ 652 outbuf[buflen++] = 0; /* not officially assigned */ 653 outbuf[buflen++] = 0; /* reserved */ 654 outbuf[buflen++] = id_len; /* length of data following */ 655 memcpy(outbuf + buflen, str, id_len); 656 buflen += id_len; 657 658 if (s->qdev.wwn) { 659 outbuf[buflen++] = 0x1; /* Binary */ 660 outbuf[buflen++] = 0x3; /* NAA */ 661 outbuf[buflen++] = 0; /* reserved */ 662 outbuf[buflen++] = 8; 663 stq_be_p(&outbuf[buflen], s->qdev.wwn); 664 buflen += 8; 665 } 666 667 if (s->qdev.port_wwn) { 668 outbuf[buflen++] = 0x61; /* SAS / Binary */ 669 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 670 outbuf[buflen++] = 0; /* reserved */ 671 outbuf[buflen++] = 8; 672 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 673 buflen += 8; 674 } 675 676 if (s->port_index) { 677 outbuf[buflen++] = 0x61; /* SAS / Binary */ 678 679 /* PIV/Target port/relative target port */ 680 outbuf[buflen++] = 0x94; 681 682 outbuf[buflen++] = 0; /* reserved */ 683 outbuf[buflen++] = 4; 684 stw_be_p(&outbuf[buflen + 2], s->port_index); 685 buflen += 4; 686 } 687 break; 688 } 689 case 0xb0: /* block limits */ 690 { 691 unsigned int unmap_sectors = 692 s->qdev.conf.discard_granularity / s->qdev.blocksize; 693 unsigned int min_io_size = 694 s->qdev.conf.min_io_size / s->qdev.blocksize; 695 unsigned int opt_io_size = 696 s->qdev.conf.opt_io_size / s->qdev.blocksize; 697 unsigned int max_unmap_sectors = 698 s->max_unmap_size / s->qdev.blocksize; 699 unsigned int max_io_sectors = 700 s->max_io_size / s->qdev.blocksize; 701 702 if (s->qdev.type == TYPE_ROM) { 703 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n", 704 page_code); 705 return -1; 706 } 707 if (s->qdev.type == TYPE_DISK) { 708 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 709 int max_io_sectors_blk = 710 max_transfer_blk / s->qdev.blocksize; 711 712 max_io_sectors = 713 MIN_NON_ZERO(max_io_sectors_blk, max_io_sectors); 714 715 /* min_io_size and opt_io_size can't be greater than 716 * max_io_sectors */ 717 if (min_io_size) { 718 min_io_size = MIN(min_io_size, max_io_sectors); 719 } 720 if (opt_io_size) { 721 opt_io_size = MIN(opt_io_size, max_io_sectors); 722 } 723 } 724 /* required VPD size with unmap support */ 725 buflen = 0x40; 726 memset(outbuf + 4, 0, buflen - 4); 727 728 outbuf[4] = 0x1; /* wsnz */ 729 730 /* optimal transfer length granularity */ 731 outbuf[6] = (min_io_size >> 8) & 0xff; 732 outbuf[7] = min_io_size & 0xff; 733 734 /* maximum transfer length */ 735 outbuf[8] = (max_io_sectors >> 24) & 0xff; 736 outbuf[9] = (max_io_sectors >> 16) & 0xff; 737 outbuf[10] = (max_io_sectors >> 8) & 0xff; 738 outbuf[11] = max_io_sectors & 0xff; 739 740 /* optimal transfer length */ 741 outbuf[12] = (opt_io_size >> 24) & 0xff; 742 outbuf[13] = (opt_io_size >> 16) & 0xff; 743 outbuf[14] = (opt_io_size >> 8) & 0xff; 744 outbuf[15] = opt_io_size & 0xff; 745 746 /* max unmap LBA count, default is 1GB */ 747 outbuf[20] = (max_unmap_sectors >> 24) & 0xff; 748 outbuf[21] = (max_unmap_sectors >> 16) & 0xff; 749 outbuf[22] = (max_unmap_sectors >> 8) & 0xff; 750 outbuf[23] = max_unmap_sectors & 0xff; 751 752 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header */ 753 outbuf[24] = 0; 754 outbuf[25] = 0; 755 outbuf[26] = 0; 756 outbuf[27] = 255; 757 758 /* optimal unmap granularity */ 759 outbuf[28] = (unmap_sectors >> 24) & 0xff; 760 outbuf[29] = (unmap_sectors >> 16) & 0xff; 761 outbuf[30] = (unmap_sectors >> 8) & 0xff; 762 outbuf[31] = unmap_sectors & 0xff; 763 764 /* max write same size */ 765 outbuf[36] = 0; 766 outbuf[37] = 0; 767 outbuf[38] = 0; 768 outbuf[39] = 0; 769 770 outbuf[40] = (max_io_sectors >> 24) & 0xff; 771 outbuf[41] = (max_io_sectors >> 16) & 0xff; 772 outbuf[42] = (max_io_sectors >> 8) & 0xff; 773 outbuf[43] = max_io_sectors & 0xff; 774 break; 775 } 776 case 0xb1: /* block device characteristics */ 777 { 778 buflen = 0x40; 779 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 780 outbuf[5] = s->rotation_rate & 0xff; 781 outbuf[6] = 0; /* PRODUCT TYPE */ 782 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 783 outbuf[8] = 0; /* VBULS */ 784 break; 785 } 786 case 0xb2: /* thin provisioning */ 787 { 788 buflen = 8; 789 outbuf[4] = 0; 790 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 791 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 792 outbuf[7] = 0; 793 break; 794 } 795 default: 796 return -1; 797 } 798 /* done with EVPD */ 799 assert(buflen - start <= 255); 800 outbuf[start - 1] = buflen - start; 801 return buflen; 802 } 803 804 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 805 { 806 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 807 int buflen = 0; 808 809 if (req->cmd.buf[1] & 0x1) { 810 /* Vital product data */ 811 return scsi_disk_emulate_vpd_page(req, outbuf); 812 } 813 814 /* Standard INQUIRY data */ 815 if (req->cmd.buf[2] != 0) { 816 return -1; 817 } 818 819 /* PAGE CODE == 0 */ 820 buflen = req->cmd.xfer; 821 if (buflen > SCSI_MAX_INQUIRY_LEN) { 822 buflen = SCSI_MAX_INQUIRY_LEN; 823 } 824 825 outbuf[0] = s->qdev.type & 0x1f; 826 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 827 828 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 829 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 830 831 memset(&outbuf[32], 0, 4); 832 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 833 /* 834 * We claim conformance to SPC-3, which is required for guests 835 * to ask for modern features like READ CAPACITY(16) or the 836 * block characteristics VPD page by default. Not all of SPC-3 837 * is actually implemented, but we're good enough. 838 */ 839 outbuf[2] = s->qdev.default_scsi_version; 840 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 841 842 if (buflen > 36) { 843 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 844 } else { 845 /* If the allocation length of CDB is too small, 846 the additional length is not adjusted */ 847 outbuf[4] = 36 - 5; 848 } 849 850 /* Sync data transfer and TCQ. */ 851 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 852 return buflen; 853 } 854 855 static inline bool media_is_dvd(SCSIDiskState *s) 856 { 857 uint64_t nb_sectors; 858 if (s->qdev.type != TYPE_ROM) { 859 return false; 860 } 861 if (!blk_is_available(s->qdev.conf.blk)) { 862 return false; 863 } 864 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 865 return nb_sectors > CD_MAX_SECTORS; 866 } 867 868 static inline bool media_is_cd(SCSIDiskState *s) 869 { 870 uint64_t nb_sectors; 871 if (s->qdev.type != TYPE_ROM) { 872 return false; 873 } 874 if (!blk_is_available(s->qdev.conf.blk)) { 875 return false; 876 } 877 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 878 return nb_sectors <= CD_MAX_SECTORS; 879 } 880 881 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 882 uint8_t *outbuf) 883 { 884 uint8_t type = r->req.cmd.buf[1] & 7; 885 886 if (s->qdev.type != TYPE_ROM) { 887 return -1; 888 } 889 890 /* Types 1/2 are only defined for Blu-Ray. */ 891 if (type != 0) { 892 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 893 return -1; 894 } 895 896 memset(outbuf, 0, 34); 897 outbuf[1] = 32; 898 outbuf[2] = 0xe; /* last session complete, disc finalized */ 899 outbuf[3] = 1; /* first track on disc */ 900 outbuf[4] = 1; /* # of sessions */ 901 outbuf[5] = 1; /* first track of last session */ 902 outbuf[6] = 1; /* last track of last session */ 903 outbuf[7] = 0x20; /* unrestricted use */ 904 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 905 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 906 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 907 /* 24-31: disc bar code */ 908 /* 32: disc application code */ 909 /* 33: number of OPC tables */ 910 911 return 34; 912 } 913 914 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 915 uint8_t *outbuf) 916 { 917 static const int rds_caps_size[5] = { 918 [0] = 2048 + 4, 919 [1] = 4 + 4, 920 [3] = 188 + 4, 921 [4] = 2048 + 4, 922 }; 923 924 uint8_t media = r->req.cmd.buf[1]; 925 uint8_t layer = r->req.cmd.buf[6]; 926 uint8_t format = r->req.cmd.buf[7]; 927 int size = -1; 928 929 if (s->qdev.type != TYPE_ROM) { 930 return -1; 931 } 932 if (media != 0) { 933 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 934 return -1; 935 } 936 937 if (format != 0xff) { 938 if (!blk_is_available(s->qdev.conf.blk)) { 939 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 940 return -1; 941 } 942 if (media_is_cd(s)) { 943 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 944 return -1; 945 } 946 if (format >= ARRAY_SIZE(rds_caps_size)) { 947 return -1; 948 } 949 size = rds_caps_size[format]; 950 memset(outbuf, 0, size); 951 } 952 953 switch (format) { 954 case 0x00: { 955 /* Physical format information */ 956 uint64_t nb_sectors; 957 if (layer != 0) { 958 goto fail; 959 } 960 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 961 962 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 963 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 964 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 965 outbuf[7] = 0; /* default densities */ 966 967 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 968 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 969 break; 970 } 971 972 case 0x01: /* DVD copyright information, all zeros */ 973 break; 974 975 case 0x03: /* BCA information - invalid field for no BCA info */ 976 return -1; 977 978 case 0x04: /* DVD disc manufacturing information, all zeros */ 979 break; 980 981 case 0xff: { /* List capabilities */ 982 int i; 983 size = 4; 984 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 985 if (!rds_caps_size[i]) { 986 continue; 987 } 988 outbuf[size] = i; 989 outbuf[size + 1] = 0x40; /* Not writable, readable */ 990 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 991 size += 4; 992 } 993 break; 994 } 995 996 default: 997 return -1; 998 } 999 1000 /* Size of buffer, not including 2 byte size field */ 1001 stw_be_p(outbuf, size - 2); 1002 return size; 1003 1004 fail: 1005 return -1; 1006 } 1007 1008 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 1009 { 1010 uint8_t event_code, media_status; 1011 1012 media_status = 0; 1013 if (s->tray_open) { 1014 media_status = MS_TRAY_OPEN; 1015 } else if (blk_is_inserted(s->qdev.conf.blk)) { 1016 media_status = MS_MEDIA_PRESENT; 1017 } 1018 1019 /* Event notification descriptor */ 1020 event_code = MEC_NO_CHANGE; 1021 if (media_status != MS_TRAY_OPEN) { 1022 if (s->media_event) { 1023 event_code = MEC_NEW_MEDIA; 1024 s->media_event = false; 1025 } else if (s->eject_request) { 1026 event_code = MEC_EJECT_REQUESTED; 1027 s->eject_request = false; 1028 } 1029 } 1030 1031 outbuf[0] = event_code; 1032 outbuf[1] = media_status; 1033 1034 /* These fields are reserved, just clear them. */ 1035 outbuf[2] = 0; 1036 outbuf[3] = 0; 1037 return 4; 1038 } 1039 1040 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 1041 uint8_t *outbuf) 1042 { 1043 int size; 1044 uint8_t *buf = r->req.cmd.buf; 1045 uint8_t notification_class_request = buf[4]; 1046 if (s->qdev.type != TYPE_ROM) { 1047 return -1; 1048 } 1049 if ((buf[1] & 1) == 0) { 1050 /* asynchronous */ 1051 return -1; 1052 } 1053 1054 size = 4; 1055 outbuf[0] = outbuf[1] = 0; 1056 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1057 if (notification_class_request & (1 << GESN_MEDIA)) { 1058 outbuf[2] = GESN_MEDIA; 1059 size += scsi_event_status_media(s, &outbuf[size]); 1060 } else { 1061 outbuf[2] = 0x80; 1062 } 1063 stw_be_p(outbuf, size - 4); 1064 return size; 1065 } 1066 1067 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1068 { 1069 int current; 1070 1071 if (s->qdev.type != TYPE_ROM) { 1072 return -1; 1073 } 1074 1075 if (media_is_dvd(s)) { 1076 current = MMC_PROFILE_DVD_ROM; 1077 } else if (media_is_cd(s)) { 1078 current = MMC_PROFILE_CD_ROM; 1079 } else { 1080 current = MMC_PROFILE_NONE; 1081 } 1082 1083 memset(outbuf, 0, 40); 1084 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1085 stw_be_p(&outbuf[6], current); 1086 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1087 outbuf[10] = 0x03; /* persistent, current */ 1088 outbuf[11] = 8; /* two profiles */ 1089 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1090 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1091 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1092 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1093 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1094 stw_be_p(&outbuf[20], 1); 1095 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1096 outbuf[23] = 8; 1097 stl_be_p(&outbuf[24], 1); /* SCSI */ 1098 outbuf[28] = 1; /* DBE = 1, mandatory */ 1099 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1100 stw_be_p(&outbuf[32], 3); 1101 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1102 outbuf[35] = 4; 1103 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1104 /* TODO: Random readable, CD read, DVD read, drive serial number, 1105 power management */ 1106 return 40; 1107 } 1108 1109 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1110 { 1111 if (s->qdev.type != TYPE_ROM) { 1112 return -1; 1113 } 1114 memset(outbuf, 0, 8); 1115 outbuf[5] = 1; /* CD-ROM */ 1116 return 8; 1117 } 1118 1119 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1120 int page_control) 1121 { 1122 static const int mode_sense_valid[0x3f] = { 1123 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1124 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1125 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1126 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1127 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1128 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1129 }; 1130 1131 uint8_t *p = *p_outbuf + 2; 1132 int length; 1133 1134 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1135 return -1; 1136 } 1137 1138 /* 1139 * If Changeable Values are requested, a mask denoting those mode parameters 1140 * that are changeable shall be returned. As we currently don't support 1141 * parameter changes via MODE_SELECT all bits are returned set to zero. 1142 * The buffer was already menset to zero by the caller of this function. 1143 * 1144 * The offsets here are off by two compared to the descriptions in the 1145 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1146 * but it is done so that offsets are consistent within our implementation 1147 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1148 * 2-byte and 4-byte headers. 1149 */ 1150 switch (page) { 1151 case MODE_PAGE_HD_GEOMETRY: 1152 length = 0x16; 1153 if (page_control == 1) { /* Changeable Values */ 1154 break; 1155 } 1156 /* if a geometry hint is available, use it */ 1157 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1158 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1159 p[2] = s->qdev.conf.cyls & 0xff; 1160 p[3] = s->qdev.conf.heads & 0xff; 1161 /* Write precomp start cylinder, disabled */ 1162 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1163 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1164 p[6] = s->qdev.conf.cyls & 0xff; 1165 /* Reduced current start cylinder, disabled */ 1166 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1167 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1168 p[9] = s->qdev.conf.cyls & 0xff; 1169 /* Device step rate [ns], 200ns */ 1170 p[10] = 0; 1171 p[11] = 200; 1172 /* Landing zone cylinder */ 1173 p[12] = 0xff; 1174 p[13] = 0xff; 1175 p[14] = 0xff; 1176 /* Medium rotation rate [rpm], 5400 rpm */ 1177 p[18] = (5400 >> 8) & 0xff; 1178 p[19] = 5400 & 0xff; 1179 break; 1180 1181 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1182 length = 0x1e; 1183 if (page_control == 1) { /* Changeable Values */ 1184 break; 1185 } 1186 /* Transfer rate [kbit/s], 5Mbit/s */ 1187 p[0] = 5000 >> 8; 1188 p[1] = 5000 & 0xff; 1189 /* if a geometry hint is available, use it */ 1190 p[2] = s->qdev.conf.heads & 0xff; 1191 p[3] = s->qdev.conf.secs & 0xff; 1192 p[4] = s->qdev.blocksize >> 8; 1193 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1194 p[7] = s->qdev.conf.cyls & 0xff; 1195 /* Write precomp start cylinder, disabled */ 1196 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1197 p[9] = s->qdev.conf.cyls & 0xff; 1198 /* Reduced current start cylinder, disabled */ 1199 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1200 p[11] = s->qdev.conf.cyls & 0xff; 1201 /* Device step rate [100us], 100us */ 1202 p[12] = 0; 1203 p[13] = 1; 1204 /* Device step pulse width [us], 1us */ 1205 p[14] = 1; 1206 /* Device head settle delay [100us], 100us */ 1207 p[15] = 0; 1208 p[16] = 1; 1209 /* Motor on delay [0.1s], 0.1s */ 1210 p[17] = 1; 1211 /* Motor off delay [0.1s], 0.1s */ 1212 p[18] = 1; 1213 /* Medium rotation rate [rpm], 5400 rpm */ 1214 p[26] = (5400 >> 8) & 0xff; 1215 p[27] = 5400 & 0xff; 1216 break; 1217 1218 case MODE_PAGE_CACHING: 1219 length = 0x12; 1220 if (page_control == 1 || /* Changeable Values */ 1221 blk_enable_write_cache(s->qdev.conf.blk)) { 1222 p[0] = 4; /* WCE */ 1223 } 1224 break; 1225 1226 case MODE_PAGE_R_W_ERROR: 1227 length = 10; 1228 if (page_control == 1) { /* Changeable Values */ 1229 break; 1230 } 1231 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1232 if (s->qdev.type == TYPE_ROM) { 1233 p[1] = 0x20; /* Read Retry Count */ 1234 } 1235 break; 1236 1237 case MODE_PAGE_AUDIO_CTL: 1238 length = 14; 1239 break; 1240 1241 case MODE_PAGE_CAPABILITIES: 1242 length = 0x14; 1243 if (page_control == 1) { /* Changeable Values */ 1244 break; 1245 } 1246 1247 p[0] = 0x3b; /* CD-R & CD-RW read */ 1248 p[1] = 0; /* Writing not supported */ 1249 p[2] = 0x7f; /* Audio, composite, digital out, 1250 mode 2 form 1&2, multi session */ 1251 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1252 RW corrected, C2 errors, ISRC, 1253 UPC, Bar code */ 1254 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1255 /* Locking supported, jumper present, eject, tray */ 1256 p[5] = 0; /* no volume & mute control, no 1257 changer */ 1258 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1259 p[7] = (50 * 176) & 0xff; 1260 p[8] = 2 >> 8; /* Two volume levels */ 1261 p[9] = 2 & 0xff; 1262 p[10] = 2048 >> 8; /* 2M buffer */ 1263 p[11] = 2048 & 0xff; 1264 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1265 p[13] = (16 * 176) & 0xff; 1266 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1267 p[17] = (16 * 176) & 0xff; 1268 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1269 p[19] = (16 * 176) & 0xff; 1270 break; 1271 1272 default: 1273 return -1; 1274 } 1275 1276 assert(length < 256); 1277 (*p_outbuf)[0] = page; 1278 (*p_outbuf)[1] = length; 1279 *p_outbuf += length + 2; 1280 return length + 2; 1281 } 1282 1283 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1284 { 1285 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1286 uint64_t nb_sectors; 1287 bool dbd; 1288 int page, buflen, ret, page_control; 1289 uint8_t *p; 1290 uint8_t dev_specific_param; 1291 1292 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1293 page = r->req.cmd.buf[2] & 0x3f; 1294 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1295 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n", 1296 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control); 1297 memset(outbuf, 0, r->req.cmd.xfer); 1298 p = outbuf; 1299 1300 if (s->qdev.type == TYPE_DISK) { 1301 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1302 if (blk_is_read_only(s->qdev.conf.blk)) { 1303 dev_specific_param |= 0x80; /* Readonly. */ 1304 } 1305 } else { 1306 /* MMC prescribes that CD/DVD drives have no block descriptors, 1307 * and defines no device-specific parameter. */ 1308 dev_specific_param = 0x00; 1309 dbd = true; 1310 } 1311 1312 if (r->req.cmd.buf[0] == MODE_SENSE) { 1313 p[1] = 0; /* Default media type. */ 1314 p[2] = dev_specific_param; 1315 p[3] = 0; /* Block descriptor length. */ 1316 p += 4; 1317 } else { /* MODE_SENSE_10 */ 1318 p[2] = 0; /* Default media type. */ 1319 p[3] = dev_specific_param; 1320 p[6] = p[7] = 0; /* Block descriptor length. */ 1321 p += 8; 1322 } 1323 1324 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1325 if (!dbd && nb_sectors) { 1326 if (r->req.cmd.buf[0] == MODE_SENSE) { 1327 outbuf[3] = 8; /* Block descriptor length */ 1328 } else { /* MODE_SENSE_10 */ 1329 outbuf[7] = 8; /* Block descriptor length */ 1330 } 1331 nb_sectors /= (s->qdev.blocksize / 512); 1332 if (nb_sectors > 0xffffff) { 1333 nb_sectors = 0; 1334 } 1335 p[0] = 0; /* media density code */ 1336 p[1] = (nb_sectors >> 16) & 0xff; 1337 p[2] = (nb_sectors >> 8) & 0xff; 1338 p[3] = nb_sectors & 0xff; 1339 p[4] = 0; /* reserved */ 1340 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1341 p[6] = s->qdev.blocksize >> 8; 1342 p[7] = 0; 1343 p += 8; 1344 } 1345 1346 if (page_control == 3) { 1347 /* Saved Values */ 1348 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1349 return -1; 1350 } 1351 1352 if (page == 0x3f) { 1353 for (page = 0; page <= 0x3e; page++) { 1354 mode_sense_page(s, page, &p, page_control); 1355 } 1356 } else { 1357 ret = mode_sense_page(s, page, &p, page_control); 1358 if (ret == -1) { 1359 return -1; 1360 } 1361 } 1362 1363 buflen = p - outbuf; 1364 /* 1365 * The mode data length field specifies the length in bytes of the 1366 * following data that is available to be transferred. The mode data 1367 * length does not include itself. 1368 */ 1369 if (r->req.cmd.buf[0] == MODE_SENSE) { 1370 outbuf[0] = buflen - 1; 1371 } else { /* MODE_SENSE_10 */ 1372 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1373 outbuf[1] = (buflen - 2) & 0xff; 1374 } 1375 return buflen; 1376 } 1377 1378 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1379 { 1380 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1381 int start_track, format, msf, toclen; 1382 uint64_t nb_sectors; 1383 1384 msf = req->cmd.buf[1] & 2; 1385 format = req->cmd.buf[2] & 0xf; 1386 start_track = req->cmd.buf[6]; 1387 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1388 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1); 1389 nb_sectors /= s->qdev.blocksize / 512; 1390 switch (format) { 1391 case 0: 1392 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1393 break; 1394 case 1: 1395 /* multi session : only a single session defined */ 1396 toclen = 12; 1397 memset(outbuf, 0, 12); 1398 outbuf[1] = 0x0a; 1399 outbuf[2] = 0x01; 1400 outbuf[3] = 0x01; 1401 break; 1402 case 2: 1403 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1404 break; 1405 default: 1406 return -1; 1407 } 1408 return toclen; 1409 } 1410 1411 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1412 { 1413 SCSIRequest *req = &r->req; 1414 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1415 bool start = req->cmd.buf[4] & 1; 1416 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1417 int pwrcnd = req->cmd.buf[4] & 0xf0; 1418 1419 if (pwrcnd) { 1420 /* eject/load only happens for power condition == 0 */ 1421 return 0; 1422 } 1423 1424 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1425 if (!start && !s->tray_open && s->tray_locked) { 1426 scsi_check_condition(r, 1427 blk_is_inserted(s->qdev.conf.blk) 1428 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1429 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1430 return -1; 1431 } 1432 1433 if (s->tray_open != !start) { 1434 blk_eject(s->qdev.conf.blk, !start); 1435 s->tray_open = !start; 1436 } 1437 } 1438 return 0; 1439 } 1440 1441 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1442 { 1443 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1444 int buflen = r->iov.iov_len; 1445 1446 if (buflen) { 1447 DPRINTF("Read buf_len=%d\n", buflen); 1448 r->iov.iov_len = 0; 1449 r->started = true; 1450 scsi_req_data(&r->req, buflen); 1451 return; 1452 } 1453 1454 /* This also clears the sense buffer for REQUEST SENSE. */ 1455 scsi_req_complete(&r->req, GOOD); 1456 } 1457 1458 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1459 uint8_t *inbuf, int inlen) 1460 { 1461 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1462 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1463 uint8_t *p; 1464 int len, expected_len, changeable_len, i; 1465 1466 /* The input buffer does not include the page header, so it is 1467 * off by 2 bytes. 1468 */ 1469 expected_len = inlen + 2; 1470 if (expected_len > SCSI_MAX_MODE_LEN) { 1471 return -1; 1472 } 1473 1474 p = mode_current; 1475 memset(mode_current, 0, inlen + 2); 1476 len = mode_sense_page(s, page, &p, 0); 1477 if (len < 0 || len != expected_len) { 1478 return -1; 1479 } 1480 1481 p = mode_changeable; 1482 memset(mode_changeable, 0, inlen + 2); 1483 changeable_len = mode_sense_page(s, page, &p, 1); 1484 assert(changeable_len == len); 1485 1486 /* Check that unchangeable bits are the same as what MODE SENSE 1487 * would return. 1488 */ 1489 for (i = 2; i < len; i++) { 1490 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1491 return -1; 1492 } 1493 } 1494 return 0; 1495 } 1496 1497 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1498 { 1499 switch (page) { 1500 case MODE_PAGE_CACHING: 1501 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1502 break; 1503 1504 default: 1505 break; 1506 } 1507 } 1508 1509 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1510 { 1511 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1512 1513 while (len > 0) { 1514 int page, subpage, page_len; 1515 1516 /* Parse both possible formats for the mode page headers. */ 1517 page = p[0] & 0x3f; 1518 if (p[0] & 0x40) { 1519 if (len < 4) { 1520 goto invalid_param_len; 1521 } 1522 subpage = p[1]; 1523 page_len = lduw_be_p(&p[2]); 1524 p += 4; 1525 len -= 4; 1526 } else { 1527 if (len < 2) { 1528 goto invalid_param_len; 1529 } 1530 subpage = 0; 1531 page_len = p[1]; 1532 p += 2; 1533 len -= 2; 1534 } 1535 1536 if (subpage) { 1537 goto invalid_param; 1538 } 1539 if (page_len > len) { 1540 goto invalid_param_len; 1541 } 1542 1543 if (!change) { 1544 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1545 goto invalid_param; 1546 } 1547 } else { 1548 scsi_disk_apply_mode_select(s, page, p); 1549 } 1550 1551 p += page_len; 1552 len -= page_len; 1553 } 1554 return 0; 1555 1556 invalid_param: 1557 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1558 return -1; 1559 1560 invalid_param_len: 1561 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1562 return -1; 1563 } 1564 1565 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1566 { 1567 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1568 uint8_t *p = inbuf; 1569 int cmd = r->req.cmd.buf[0]; 1570 int len = r->req.cmd.xfer; 1571 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1572 int bd_len; 1573 int pass; 1574 1575 /* We only support PF=1, SP=0. */ 1576 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1577 goto invalid_field; 1578 } 1579 1580 if (len < hdr_len) { 1581 goto invalid_param_len; 1582 } 1583 1584 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1585 len -= hdr_len; 1586 p += hdr_len; 1587 if (len < bd_len) { 1588 goto invalid_param_len; 1589 } 1590 if (bd_len != 0 && bd_len != 8) { 1591 goto invalid_param; 1592 } 1593 1594 len -= bd_len; 1595 p += bd_len; 1596 1597 /* Ensure no change is made if there is an error! */ 1598 for (pass = 0; pass < 2; pass++) { 1599 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1600 assert(pass == 0); 1601 return; 1602 } 1603 } 1604 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1605 /* The request is used as the AIO opaque value, so add a ref. */ 1606 scsi_req_ref(&r->req); 1607 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1608 BLOCK_ACCT_FLUSH); 1609 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1610 return; 1611 } 1612 1613 scsi_req_complete(&r->req, GOOD); 1614 return; 1615 1616 invalid_param: 1617 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1618 return; 1619 1620 invalid_param_len: 1621 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1622 return; 1623 1624 invalid_field: 1625 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1626 } 1627 1628 static inline bool check_lba_range(SCSIDiskState *s, 1629 uint64_t sector_num, uint32_t nb_sectors) 1630 { 1631 /* 1632 * The first line tests that no overflow happens when computing the last 1633 * sector. The second line tests that the last accessed sector is in 1634 * range. 1635 * 1636 * Careful, the computations should not underflow for nb_sectors == 0, 1637 * and a 0-block read to the first LBA beyond the end of device is 1638 * valid. 1639 */ 1640 return (sector_num <= sector_num + nb_sectors && 1641 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1642 } 1643 1644 typedef struct UnmapCBData { 1645 SCSIDiskReq *r; 1646 uint8_t *inbuf; 1647 int count; 1648 } UnmapCBData; 1649 1650 static void scsi_unmap_complete(void *opaque, int ret); 1651 1652 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1653 { 1654 SCSIDiskReq *r = data->r; 1655 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1656 uint64_t sector_num; 1657 uint32_t nb_sectors; 1658 1659 assert(r->req.aiocb == NULL); 1660 if (scsi_disk_req_check_error(r, ret, false)) { 1661 goto done; 1662 } 1663 1664 if (data->count > 0) { 1665 sector_num = ldq_be_p(&data->inbuf[0]); 1666 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1667 if (!check_lba_range(s, sector_num, nb_sectors)) { 1668 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1669 goto done; 1670 } 1671 1672 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1673 sector_num * s->qdev.blocksize, 1674 nb_sectors * s->qdev.blocksize, 1675 scsi_unmap_complete, data); 1676 data->count--; 1677 data->inbuf += 16; 1678 return; 1679 } 1680 1681 scsi_req_complete(&r->req, GOOD); 1682 1683 done: 1684 scsi_req_unref(&r->req); 1685 g_free(data); 1686 } 1687 1688 static void scsi_unmap_complete(void *opaque, int ret) 1689 { 1690 UnmapCBData *data = opaque; 1691 SCSIDiskReq *r = data->r; 1692 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1693 1694 assert(r->req.aiocb != NULL); 1695 r->req.aiocb = NULL; 1696 1697 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1698 scsi_unmap_complete_noio(data, ret); 1699 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1700 } 1701 1702 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1703 { 1704 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1705 uint8_t *p = inbuf; 1706 int len = r->req.cmd.xfer; 1707 UnmapCBData *data; 1708 1709 /* Reject ANCHOR=1. */ 1710 if (r->req.cmd.buf[1] & 0x1) { 1711 goto invalid_field; 1712 } 1713 1714 if (len < 8) { 1715 goto invalid_param_len; 1716 } 1717 if (len < lduw_be_p(&p[0]) + 2) { 1718 goto invalid_param_len; 1719 } 1720 if (len < lduw_be_p(&p[2]) + 8) { 1721 goto invalid_param_len; 1722 } 1723 if (lduw_be_p(&p[2]) & 15) { 1724 goto invalid_param_len; 1725 } 1726 1727 if (blk_is_read_only(s->qdev.conf.blk)) { 1728 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1729 return; 1730 } 1731 1732 data = g_new0(UnmapCBData, 1); 1733 data->r = r; 1734 data->inbuf = &p[8]; 1735 data->count = lduw_be_p(&p[2]) >> 4; 1736 1737 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1738 scsi_req_ref(&r->req); 1739 scsi_unmap_complete_noio(data, 0); 1740 return; 1741 1742 invalid_param_len: 1743 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1744 return; 1745 1746 invalid_field: 1747 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1748 } 1749 1750 typedef struct WriteSameCBData { 1751 SCSIDiskReq *r; 1752 int64_t sector; 1753 int nb_sectors; 1754 QEMUIOVector qiov; 1755 struct iovec iov; 1756 } WriteSameCBData; 1757 1758 static void scsi_write_same_complete(void *opaque, int ret) 1759 { 1760 WriteSameCBData *data = opaque; 1761 SCSIDiskReq *r = data->r; 1762 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1763 1764 assert(r->req.aiocb != NULL); 1765 r->req.aiocb = NULL; 1766 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1767 if (scsi_disk_req_check_error(r, ret, true)) { 1768 goto done; 1769 } 1770 1771 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1772 1773 data->nb_sectors -= data->iov.iov_len / 512; 1774 data->sector += data->iov.iov_len / 512; 1775 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1776 if (data->iov.iov_len) { 1777 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1778 data->iov.iov_len, BLOCK_ACCT_WRITE); 1779 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1780 * where final qiov may need smaller size */ 1781 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1782 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1783 data->sector << BDRV_SECTOR_BITS, 1784 &data->qiov, 0, 1785 scsi_write_same_complete, data); 1786 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1787 return; 1788 } 1789 1790 scsi_req_complete(&r->req, GOOD); 1791 1792 done: 1793 scsi_req_unref(&r->req); 1794 qemu_vfree(data->iov.iov_base); 1795 g_free(data); 1796 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1797 } 1798 1799 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1800 { 1801 SCSIRequest *req = &r->req; 1802 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1803 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1804 WriteSameCBData *data; 1805 uint8_t *buf; 1806 int i; 1807 1808 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1809 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1810 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1811 return; 1812 } 1813 1814 if (blk_is_read_only(s->qdev.conf.blk)) { 1815 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1816 return; 1817 } 1818 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1819 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1820 return; 1821 } 1822 1823 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1824 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1825 1826 /* The request is used as the AIO opaque value, so add a ref. */ 1827 scsi_req_ref(&r->req); 1828 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1829 nb_sectors * s->qdev.blocksize, 1830 BLOCK_ACCT_WRITE); 1831 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1832 r->req.cmd.lba * s->qdev.blocksize, 1833 nb_sectors * s->qdev.blocksize, 1834 flags, scsi_aio_complete, r); 1835 return; 1836 } 1837 1838 data = g_new0(WriteSameCBData, 1); 1839 data->r = r; 1840 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1841 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1842 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1843 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1844 data->iov.iov_len); 1845 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1846 1847 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1848 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1849 } 1850 1851 scsi_req_ref(&r->req); 1852 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1853 data->iov.iov_len, BLOCK_ACCT_WRITE); 1854 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1855 data->sector << BDRV_SECTOR_BITS, 1856 &data->qiov, 0, 1857 scsi_write_same_complete, data); 1858 } 1859 1860 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1861 { 1862 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1863 1864 if (r->iov.iov_len) { 1865 int buflen = r->iov.iov_len; 1866 DPRINTF("Write buf_len=%d\n", buflen); 1867 r->iov.iov_len = 0; 1868 scsi_req_data(&r->req, buflen); 1869 return; 1870 } 1871 1872 switch (req->cmd.buf[0]) { 1873 case MODE_SELECT: 1874 case MODE_SELECT_10: 1875 /* This also clears the sense buffer for REQUEST SENSE. */ 1876 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1877 break; 1878 1879 case UNMAP: 1880 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1881 break; 1882 1883 case VERIFY_10: 1884 case VERIFY_12: 1885 case VERIFY_16: 1886 if (r->req.status == -1) { 1887 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1888 } 1889 break; 1890 1891 case WRITE_SAME_10: 1892 case WRITE_SAME_16: 1893 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1894 break; 1895 1896 default: 1897 abort(); 1898 } 1899 } 1900 1901 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1902 { 1903 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1904 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1905 uint64_t nb_sectors; 1906 uint8_t *outbuf; 1907 int buflen; 1908 1909 switch (req->cmd.buf[0]) { 1910 case INQUIRY: 1911 case MODE_SENSE: 1912 case MODE_SENSE_10: 1913 case RESERVE: 1914 case RESERVE_10: 1915 case RELEASE: 1916 case RELEASE_10: 1917 case START_STOP: 1918 case ALLOW_MEDIUM_REMOVAL: 1919 case GET_CONFIGURATION: 1920 case GET_EVENT_STATUS_NOTIFICATION: 1921 case MECHANISM_STATUS: 1922 case REQUEST_SENSE: 1923 break; 1924 1925 default: 1926 if (!blk_is_available(s->qdev.conf.blk)) { 1927 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1928 return 0; 1929 } 1930 break; 1931 } 1932 1933 /* 1934 * FIXME: we shouldn't return anything bigger than 4k, but the code 1935 * requires the buffer to be as big as req->cmd.xfer in several 1936 * places. So, do not allow CDBs with a very large ALLOCATION 1937 * LENGTH. The real fix would be to modify scsi_read_data and 1938 * dma_buf_read, so that they return data beyond the buflen 1939 * as all zeros. 1940 */ 1941 if (req->cmd.xfer > 65536) { 1942 goto illegal_request; 1943 } 1944 r->buflen = MAX(4096, req->cmd.xfer); 1945 1946 if (!r->iov.iov_base) { 1947 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1948 } 1949 1950 buflen = req->cmd.xfer; 1951 outbuf = r->iov.iov_base; 1952 memset(outbuf, 0, r->buflen); 1953 switch (req->cmd.buf[0]) { 1954 case TEST_UNIT_READY: 1955 assert(blk_is_available(s->qdev.conf.blk)); 1956 break; 1957 case INQUIRY: 1958 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1959 if (buflen < 0) { 1960 goto illegal_request; 1961 } 1962 break; 1963 case MODE_SENSE: 1964 case MODE_SENSE_10: 1965 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1966 if (buflen < 0) { 1967 goto illegal_request; 1968 } 1969 break; 1970 case READ_TOC: 1971 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1972 if (buflen < 0) { 1973 goto illegal_request; 1974 } 1975 break; 1976 case RESERVE: 1977 if (req->cmd.buf[1] & 1) { 1978 goto illegal_request; 1979 } 1980 break; 1981 case RESERVE_10: 1982 if (req->cmd.buf[1] & 3) { 1983 goto illegal_request; 1984 } 1985 break; 1986 case RELEASE: 1987 if (req->cmd.buf[1] & 1) { 1988 goto illegal_request; 1989 } 1990 break; 1991 case RELEASE_10: 1992 if (req->cmd.buf[1] & 3) { 1993 goto illegal_request; 1994 } 1995 break; 1996 case START_STOP: 1997 if (scsi_disk_emulate_start_stop(r) < 0) { 1998 return 0; 1999 } 2000 break; 2001 case ALLOW_MEDIUM_REMOVAL: 2002 s->tray_locked = req->cmd.buf[4] & 1; 2003 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 2004 break; 2005 case READ_CAPACITY_10: 2006 /* The normal LEN field for this command is zero. */ 2007 memset(outbuf, 0, 8); 2008 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2009 if (!nb_sectors) { 2010 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2011 return 0; 2012 } 2013 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 2014 goto illegal_request; 2015 } 2016 nb_sectors /= s->qdev.blocksize / 512; 2017 /* Returned value is the address of the last sector. */ 2018 nb_sectors--; 2019 /* Remember the new size for read/write sanity checking. */ 2020 s->qdev.max_lba = nb_sectors; 2021 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 2022 if (nb_sectors > UINT32_MAX) { 2023 nb_sectors = UINT32_MAX; 2024 } 2025 outbuf[0] = (nb_sectors >> 24) & 0xff; 2026 outbuf[1] = (nb_sectors >> 16) & 0xff; 2027 outbuf[2] = (nb_sectors >> 8) & 0xff; 2028 outbuf[3] = nb_sectors & 0xff; 2029 outbuf[4] = 0; 2030 outbuf[5] = 0; 2031 outbuf[6] = s->qdev.blocksize >> 8; 2032 outbuf[7] = 0; 2033 break; 2034 case REQUEST_SENSE: 2035 /* Just return "NO SENSE". */ 2036 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2037 (req->cmd.buf[1] & 1) == 0); 2038 if (buflen < 0) { 2039 goto illegal_request; 2040 } 2041 break; 2042 case MECHANISM_STATUS: 2043 buflen = scsi_emulate_mechanism_status(s, outbuf); 2044 if (buflen < 0) { 2045 goto illegal_request; 2046 } 2047 break; 2048 case GET_CONFIGURATION: 2049 buflen = scsi_get_configuration(s, outbuf); 2050 if (buflen < 0) { 2051 goto illegal_request; 2052 } 2053 break; 2054 case GET_EVENT_STATUS_NOTIFICATION: 2055 buflen = scsi_get_event_status_notification(s, r, outbuf); 2056 if (buflen < 0) { 2057 goto illegal_request; 2058 } 2059 break; 2060 case READ_DISC_INFORMATION: 2061 buflen = scsi_read_disc_information(s, r, outbuf); 2062 if (buflen < 0) { 2063 goto illegal_request; 2064 } 2065 break; 2066 case READ_DVD_STRUCTURE: 2067 buflen = scsi_read_dvd_structure(s, r, outbuf); 2068 if (buflen < 0) { 2069 goto illegal_request; 2070 } 2071 break; 2072 case SERVICE_ACTION_IN_16: 2073 /* Service Action In subcommands. */ 2074 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2075 DPRINTF("SAI READ CAPACITY(16)\n"); 2076 memset(outbuf, 0, req->cmd.xfer); 2077 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2078 if (!nb_sectors) { 2079 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2080 return 0; 2081 } 2082 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2083 goto illegal_request; 2084 } 2085 nb_sectors /= s->qdev.blocksize / 512; 2086 /* Returned value is the address of the last sector. */ 2087 nb_sectors--; 2088 /* Remember the new size for read/write sanity checking. */ 2089 s->qdev.max_lba = nb_sectors; 2090 outbuf[0] = (nb_sectors >> 56) & 0xff; 2091 outbuf[1] = (nb_sectors >> 48) & 0xff; 2092 outbuf[2] = (nb_sectors >> 40) & 0xff; 2093 outbuf[3] = (nb_sectors >> 32) & 0xff; 2094 outbuf[4] = (nb_sectors >> 24) & 0xff; 2095 outbuf[5] = (nb_sectors >> 16) & 0xff; 2096 outbuf[6] = (nb_sectors >> 8) & 0xff; 2097 outbuf[7] = nb_sectors & 0xff; 2098 outbuf[8] = 0; 2099 outbuf[9] = 0; 2100 outbuf[10] = s->qdev.blocksize >> 8; 2101 outbuf[11] = 0; 2102 outbuf[12] = 0; 2103 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2104 2105 /* set TPE bit if the format supports discard */ 2106 if (s->qdev.conf.discard_granularity) { 2107 outbuf[14] = 0x80; 2108 } 2109 2110 /* Protection, exponent and lowest lba field left blank. */ 2111 break; 2112 } 2113 DPRINTF("Unsupported Service Action In\n"); 2114 goto illegal_request; 2115 case SYNCHRONIZE_CACHE: 2116 /* The request is used as the AIO opaque value, so add a ref. */ 2117 scsi_req_ref(&r->req); 2118 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2119 BLOCK_ACCT_FLUSH); 2120 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2121 return 0; 2122 case SEEK_10: 2123 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba); 2124 if (r->req.cmd.lba > s->qdev.max_lba) { 2125 goto illegal_lba; 2126 } 2127 break; 2128 case MODE_SELECT: 2129 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2130 break; 2131 case MODE_SELECT_10: 2132 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2133 break; 2134 case UNMAP: 2135 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2136 break; 2137 case VERIFY_10: 2138 case VERIFY_12: 2139 case VERIFY_16: 2140 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3); 2141 if (req->cmd.buf[1] & 6) { 2142 goto illegal_request; 2143 } 2144 break; 2145 case WRITE_SAME_10: 2146 case WRITE_SAME_16: 2147 DPRINTF("WRITE SAME %d (len %lu)\n", 2148 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, 2149 (unsigned long)r->req.cmd.xfer); 2150 break; 2151 default: 2152 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0], 2153 scsi_command_name(buf[0])); 2154 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2155 return 0; 2156 } 2157 assert(!r->req.aiocb); 2158 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2159 if (r->iov.iov_len == 0) { 2160 scsi_req_complete(&r->req, GOOD); 2161 } 2162 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2163 assert(r->iov.iov_len == req->cmd.xfer); 2164 return -r->iov.iov_len; 2165 } else { 2166 return r->iov.iov_len; 2167 } 2168 2169 illegal_request: 2170 if (r->req.status == -1) { 2171 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2172 } 2173 return 0; 2174 2175 illegal_lba: 2176 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2177 return 0; 2178 } 2179 2180 /* Execute a scsi command. Returns the length of the data expected by the 2181 command. This will be Positive for data transfers from the device 2182 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2183 and zero if the command does not transfer any data. */ 2184 2185 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2186 { 2187 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2188 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2189 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2190 uint32_t len; 2191 uint8_t command; 2192 2193 command = buf[0]; 2194 2195 if (!blk_is_available(s->qdev.conf.blk)) { 2196 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2197 return 0; 2198 } 2199 2200 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2201 switch (command) { 2202 case READ_6: 2203 case READ_10: 2204 case READ_12: 2205 case READ_16: 2206 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len); 2207 /* Protection information is not supported. For SCSI versions 2 and 2208 * older (as determined by snooping the guest's INQUIRY commands), 2209 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2210 */ 2211 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2212 goto illegal_request; 2213 } 2214 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2215 goto illegal_lba; 2216 } 2217 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2218 r->sector_count = len * (s->qdev.blocksize / 512); 2219 break; 2220 case WRITE_6: 2221 case WRITE_10: 2222 case WRITE_12: 2223 case WRITE_16: 2224 case WRITE_VERIFY_10: 2225 case WRITE_VERIFY_12: 2226 case WRITE_VERIFY_16: 2227 if (blk_is_read_only(s->qdev.conf.blk)) { 2228 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2229 return 0; 2230 } 2231 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n", 2232 (command & 0xe) == 0xe ? "And Verify " : "", 2233 r->req.cmd.lba, len); 2234 /* fall through */ 2235 case VERIFY_10: 2236 case VERIFY_12: 2237 case VERIFY_16: 2238 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2239 * As far as DMA is concerned, we can treat it the same as a write; 2240 * scsi_block_do_sgio will send VERIFY commands. 2241 */ 2242 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2243 goto illegal_request; 2244 } 2245 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2246 goto illegal_lba; 2247 } 2248 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2249 r->sector_count = len * (s->qdev.blocksize / 512); 2250 break; 2251 default: 2252 abort(); 2253 illegal_request: 2254 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2255 return 0; 2256 illegal_lba: 2257 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2258 return 0; 2259 } 2260 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2261 if (r->sector_count == 0) { 2262 scsi_req_complete(&r->req, GOOD); 2263 } 2264 assert(r->iov.iov_len == 0); 2265 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2266 return -r->sector_count * 512; 2267 } else { 2268 return r->sector_count * 512; 2269 } 2270 } 2271 2272 static void scsi_disk_reset(DeviceState *dev) 2273 { 2274 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2275 uint64_t nb_sectors; 2276 2277 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2278 2279 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2280 nb_sectors /= s->qdev.blocksize / 512; 2281 if (nb_sectors) { 2282 nb_sectors--; 2283 } 2284 s->qdev.max_lba = nb_sectors; 2285 /* reset tray statuses */ 2286 s->tray_locked = 0; 2287 s->tray_open = 0; 2288 2289 s->qdev.scsi_version = s->qdev.default_scsi_version; 2290 } 2291 2292 static void scsi_disk_resize_cb(void *opaque) 2293 { 2294 SCSIDiskState *s = opaque; 2295 2296 /* SPC lists this sense code as available only for 2297 * direct-access devices. 2298 */ 2299 if (s->qdev.type == TYPE_DISK) { 2300 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2301 } 2302 } 2303 2304 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2305 { 2306 SCSIDiskState *s = opaque; 2307 2308 /* 2309 * When a CD gets changed, we have to report an ejected state and 2310 * then a loaded state to guests so that they detect tray 2311 * open/close and media change events. Guests that do not use 2312 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2313 * states rely on this behavior. 2314 * 2315 * media_changed governs the state machine used for unit attention 2316 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2317 */ 2318 s->media_changed = load; 2319 s->tray_open = !load; 2320 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2321 s->media_event = true; 2322 s->eject_request = false; 2323 } 2324 2325 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2326 { 2327 SCSIDiskState *s = opaque; 2328 2329 s->eject_request = true; 2330 if (force) { 2331 s->tray_locked = false; 2332 } 2333 } 2334 2335 static bool scsi_cd_is_tray_open(void *opaque) 2336 { 2337 return ((SCSIDiskState *)opaque)->tray_open; 2338 } 2339 2340 static bool scsi_cd_is_medium_locked(void *opaque) 2341 { 2342 return ((SCSIDiskState *)opaque)->tray_locked; 2343 } 2344 2345 static const BlockDevOps scsi_disk_removable_block_ops = { 2346 .change_media_cb = scsi_cd_change_media_cb, 2347 .eject_request_cb = scsi_cd_eject_request_cb, 2348 .is_tray_open = scsi_cd_is_tray_open, 2349 .is_medium_locked = scsi_cd_is_medium_locked, 2350 2351 .resize_cb = scsi_disk_resize_cb, 2352 }; 2353 2354 static const BlockDevOps scsi_disk_block_ops = { 2355 .resize_cb = scsi_disk_resize_cb, 2356 }; 2357 2358 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2359 { 2360 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2361 if (s->media_changed) { 2362 s->media_changed = false; 2363 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2364 } 2365 } 2366 2367 static void scsi_realize(SCSIDevice *dev, Error **errp) 2368 { 2369 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2370 2371 if (!s->qdev.conf.blk) { 2372 error_setg(errp, "drive property not set"); 2373 return; 2374 } 2375 2376 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2377 !blk_is_inserted(s->qdev.conf.blk)) { 2378 error_setg(errp, "Device needs media, but drive is empty"); 2379 return; 2380 } 2381 2382 blkconf_blocksizes(&s->qdev.conf); 2383 2384 if (s->qdev.conf.logical_block_size > 2385 s->qdev.conf.physical_block_size) { 2386 error_setg(errp, 2387 "logical_block_size > physical_block_size not supported"); 2388 return; 2389 } 2390 2391 if (dev->type == TYPE_DISK) { 2392 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2393 return; 2394 } 2395 } 2396 if (!blkconf_apply_backend_options(&dev->conf, 2397 blk_is_read_only(s->qdev.conf.blk), 2398 dev->type == TYPE_DISK, errp)) { 2399 return; 2400 } 2401 2402 if (s->qdev.conf.discard_granularity == -1) { 2403 s->qdev.conf.discard_granularity = 2404 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2405 } 2406 2407 if (!s->version) { 2408 s->version = g_strdup(qemu_hw_version()); 2409 } 2410 if (!s->vendor) { 2411 s->vendor = g_strdup("QEMU"); 2412 } 2413 2414 if (blk_is_sg(s->qdev.conf.blk)) { 2415 error_setg(errp, "unwanted /dev/sg*"); 2416 return; 2417 } 2418 2419 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2420 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2421 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2422 } else { 2423 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2424 } 2425 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2426 2427 blk_iostatus_enable(s->qdev.conf.blk); 2428 } 2429 2430 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2431 { 2432 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2433 /* can happen for devices without drive. The error message for missing 2434 * backend will be issued in scsi_realize 2435 */ 2436 if (s->qdev.conf.blk) { 2437 blkconf_blocksizes(&s->qdev.conf); 2438 } 2439 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2440 s->qdev.type = TYPE_DISK; 2441 if (!s->product) { 2442 s->product = g_strdup("QEMU HARDDISK"); 2443 } 2444 scsi_realize(&s->qdev, errp); 2445 } 2446 2447 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2448 { 2449 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2450 int ret; 2451 2452 if (!dev->conf.blk) { 2453 /* Anonymous BlockBackend for an empty drive. As we put it into 2454 * dev->conf, qdev takes care of detaching on unplug. */ 2455 dev->conf.blk = blk_new(0, BLK_PERM_ALL); 2456 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2457 assert(ret == 0); 2458 } 2459 2460 s->qdev.blocksize = 2048; 2461 s->qdev.type = TYPE_ROM; 2462 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2463 if (!s->product) { 2464 s->product = g_strdup("QEMU CD-ROM"); 2465 } 2466 scsi_realize(&s->qdev, errp); 2467 } 2468 2469 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2470 { 2471 DriveInfo *dinfo; 2472 Error *local_err = NULL; 2473 2474 if (!dev->conf.blk) { 2475 scsi_realize(dev, &local_err); 2476 assert(local_err); 2477 error_propagate(errp, local_err); 2478 return; 2479 } 2480 2481 dinfo = blk_legacy_dinfo(dev->conf.blk); 2482 if (dinfo && dinfo->media_cd) { 2483 scsi_cd_realize(dev, errp); 2484 } else { 2485 scsi_hd_realize(dev, errp); 2486 } 2487 } 2488 2489 static const SCSIReqOps scsi_disk_emulate_reqops = { 2490 .size = sizeof(SCSIDiskReq), 2491 .free_req = scsi_free_request, 2492 .send_command = scsi_disk_emulate_command, 2493 .read_data = scsi_disk_emulate_read_data, 2494 .write_data = scsi_disk_emulate_write_data, 2495 .get_buf = scsi_get_buf, 2496 }; 2497 2498 static const SCSIReqOps scsi_disk_dma_reqops = { 2499 .size = sizeof(SCSIDiskReq), 2500 .free_req = scsi_free_request, 2501 .send_command = scsi_disk_dma_command, 2502 .read_data = scsi_read_data, 2503 .write_data = scsi_write_data, 2504 .get_buf = scsi_get_buf, 2505 .load_request = scsi_disk_load_request, 2506 .save_request = scsi_disk_save_request, 2507 }; 2508 2509 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2510 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2511 [INQUIRY] = &scsi_disk_emulate_reqops, 2512 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2513 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2514 [START_STOP] = &scsi_disk_emulate_reqops, 2515 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2516 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2517 [READ_TOC] = &scsi_disk_emulate_reqops, 2518 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2519 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2520 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2521 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2522 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2523 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2524 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2525 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2526 [SEEK_10] = &scsi_disk_emulate_reqops, 2527 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2528 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2529 [UNMAP] = &scsi_disk_emulate_reqops, 2530 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2531 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2532 [VERIFY_10] = &scsi_disk_emulate_reqops, 2533 [VERIFY_12] = &scsi_disk_emulate_reqops, 2534 [VERIFY_16] = &scsi_disk_emulate_reqops, 2535 2536 [READ_6] = &scsi_disk_dma_reqops, 2537 [READ_10] = &scsi_disk_dma_reqops, 2538 [READ_12] = &scsi_disk_dma_reqops, 2539 [READ_16] = &scsi_disk_dma_reqops, 2540 [WRITE_6] = &scsi_disk_dma_reqops, 2541 [WRITE_10] = &scsi_disk_dma_reqops, 2542 [WRITE_12] = &scsi_disk_dma_reqops, 2543 [WRITE_16] = &scsi_disk_dma_reqops, 2544 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2545 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2546 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2547 }; 2548 2549 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2550 uint8_t *buf, void *hba_private) 2551 { 2552 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2553 SCSIRequest *req; 2554 const SCSIReqOps *ops; 2555 uint8_t command; 2556 2557 command = buf[0]; 2558 ops = scsi_disk_reqops_dispatch[command]; 2559 if (!ops) { 2560 ops = &scsi_disk_emulate_reqops; 2561 } 2562 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2563 2564 #ifdef DEBUG_SCSI 2565 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]); 2566 { 2567 int i; 2568 for (i = 1; i < scsi_cdb_length(buf); i++) { 2569 printf(" 0x%02x", buf[i]); 2570 } 2571 printf("\n"); 2572 } 2573 #endif 2574 2575 return req; 2576 } 2577 2578 #ifdef __linux__ 2579 static int get_device_type(SCSIDiskState *s) 2580 { 2581 uint8_t cmd[16]; 2582 uint8_t buf[36]; 2583 int ret; 2584 2585 memset(cmd, 0, sizeof(cmd)); 2586 memset(buf, 0, sizeof(buf)); 2587 cmd[0] = INQUIRY; 2588 cmd[4] = sizeof(buf); 2589 2590 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2591 buf, sizeof(buf)); 2592 if (ret < 0) { 2593 return -1; 2594 } 2595 s->qdev.type = buf[0]; 2596 if (buf[1] & 0x80) { 2597 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2598 } 2599 return 0; 2600 } 2601 2602 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2603 { 2604 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2605 int sg_version; 2606 int rc; 2607 2608 if (!s->qdev.conf.blk) { 2609 error_setg(errp, "drive property not set"); 2610 return; 2611 } 2612 2613 /* check we are using a driver managing SG_IO (version 3 and after) */ 2614 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2615 if (rc < 0) { 2616 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2617 if (rc != -EPERM) { 2618 error_append_hint(errp, "Is this a SCSI device?\n"); 2619 } 2620 return; 2621 } 2622 if (sg_version < 30000) { 2623 error_setg(errp, "scsi generic interface too old"); 2624 return; 2625 } 2626 2627 /* get device type from INQUIRY data */ 2628 rc = get_device_type(s); 2629 if (rc < 0) { 2630 error_setg(errp, "INQUIRY failed"); 2631 return; 2632 } 2633 2634 /* Make a guess for the block size, we'll fix it when the guest sends. 2635 * READ CAPACITY. If they don't, they likely would assume these sizes 2636 * anyway. (TODO: check in /sys). 2637 */ 2638 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2639 s->qdev.blocksize = 2048; 2640 } else { 2641 s->qdev.blocksize = 512; 2642 } 2643 2644 /* Makes the scsi-block device not removable by using HMP and QMP eject 2645 * command. 2646 */ 2647 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2648 2649 scsi_realize(&s->qdev, errp); 2650 scsi_generic_read_device_inquiry(&s->qdev); 2651 } 2652 2653 typedef struct SCSIBlockReq { 2654 SCSIDiskReq req; 2655 sg_io_hdr_t io_header; 2656 2657 /* Selected bytes of the original CDB, copied into our own CDB. */ 2658 uint8_t cmd, cdb1, group_number; 2659 2660 /* CDB passed to SG_IO. */ 2661 uint8_t cdb[16]; 2662 } SCSIBlockReq; 2663 2664 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2665 int64_t offset, QEMUIOVector *iov, 2666 int direction, 2667 BlockCompletionFunc *cb, void *opaque) 2668 { 2669 sg_io_hdr_t *io_header = &req->io_header; 2670 SCSIDiskReq *r = &req->req; 2671 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2672 int nb_logical_blocks; 2673 uint64_t lba; 2674 BlockAIOCB *aiocb; 2675 2676 /* This is not supported yet. It can only happen if the guest does 2677 * reads and writes that are not aligned to one logical sectors 2678 * _and_ cover multiple MemoryRegions. 2679 */ 2680 assert(offset % s->qdev.blocksize == 0); 2681 assert(iov->size % s->qdev.blocksize == 0); 2682 2683 io_header->interface_id = 'S'; 2684 2685 /* The data transfer comes from the QEMUIOVector. */ 2686 io_header->dxfer_direction = direction; 2687 io_header->dxfer_len = iov->size; 2688 io_header->dxferp = (void *)iov->iov; 2689 io_header->iovec_count = iov->niov; 2690 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2691 2692 /* Build a new CDB with the LBA and length patched in, in case 2693 * DMA helpers split the transfer in multiple segments. Do not 2694 * build a CDB smaller than what the guest wanted, and only build 2695 * a larger one if strictly necessary. 2696 */ 2697 io_header->cmdp = req->cdb; 2698 lba = offset / s->qdev.blocksize; 2699 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2700 2701 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2702 /* 6-byte CDB */ 2703 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2704 req->cdb[4] = nb_logical_blocks; 2705 req->cdb[5] = 0; 2706 io_header->cmd_len = 6; 2707 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2708 /* 10-byte CDB */ 2709 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2710 req->cdb[1] = req->cdb1; 2711 stl_be_p(&req->cdb[2], lba); 2712 req->cdb[6] = req->group_number; 2713 stw_be_p(&req->cdb[7], nb_logical_blocks); 2714 req->cdb[9] = 0; 2715 io_header->cmd_len = 10; 2716 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2717 /* 12-byte CDB */ 2718 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2719 req->cdb[1] = req->cdb1; 2720 stl_be_p(&req->cdb[2], lba); 2721 stl_be_p(&req->cdb[6], nb_logical_blocks); 2722 req->cdb[10] = req->group_number; 2723 req->cdb[11] = 0; 2724 io_header->cmd_len = 12; 2725 } else { 2726 /* 16-byte CDB */ 2727 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2728 req->cdb[1] = req->cdb1; 2729 stq_be_p(&req->cdb[2], lba); 2730 stl_be_p(&req->cdb[10], nb_logical_blocks); 2731 req->cdb[14] = req->group_number; 2732 req->cdb[15] = 0; 2733 io_header->cmd_len = 16; 2734 } 2735 2736 /* The rest is as in scsi-generic.c. */ 2737 io_header->mx_sb_len = sizeof(r->req.sense); 2738 io_header->sbp = r->req.sense; 2739 io_header->timeout = UINT_MAX; 2740 io_header->usr_ptr = r; 2741 io_header->flags |= SG_FLAG_DIRECT_IO; 2742 2743 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2744 assert(aiocb != NULL); 2745 return aiocb; 2746 } 2747 2748 static bool scsi_block_no_fua(SCSICommand *cmd) 2749 { 2750 return false; 2751 } 2752 2753 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2754 QEMUIOVector *iov, 2755 BlockCompletionFunc *cb, void *cb_opaque, 2756 void *opaque) 2757 { 2758 SCSIBlockReq *r = opaque; 2759 return scsi_block_do_sgio(r, offset, iov, 2760 SG_DXFER_FROM_DEV, cb, cb_opaque); 2761 } 2762 2763 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2764 QEMUIOVector *iov, 2765 BlockCompletionFunc *cb, void *cb_opaque, 2766 void *opaque) 2767 { 2768 SCSIBlockReq *r = opaque; 2769 return scsi_block_do_sgio(r, offset, iov, 2770 SG_DXFER_TO_DEV, cb, cb_opaque); 2771 } 2772 2773 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2774 { 2775 switch (buf[0]) { 2776 case VERIFY_10: 2777 case VERIFY_12: 2778 case VERIFY_16: 2779 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2780 * for the number of logical blocks specified in the length 2781 * field). For other modes, do not use scatter/gather operation. 2782 */ 2783 if ((buf[1] & 6) == 2) { 2784 return false; 2785 } 2786 break; 2787 2788 case READ_6: 2789 case READ_10: 2790 case READ_12: 2791 case READ_16: 2792 case WRITE_6: 2793 case WRITE_10: 2794 case WRITE_12: 2795 case WRITE_16: 2796 case WRITE_VERIFY_10: 2797 case WRITE_VERIFY_12: 2798 case WRITE_VERIFY_16: 2799 /* MMC writing cannot be done via DMA helpers, because it sometimes 2800 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2801 * We might use scsi_block_dma_reqops as long as no writing commands are 2802 * seen, but performance usually isn't paramount on optical media. So, 2803 * just make scsi-block operate the same as scsi-generic for them. 2804 */ 2805 if (s->qdev.type != TYPE_ROM) { 2806 return false; 2807 } 2808 break; 2809 2810 default: 2811 break; 2812 } 2813 2814 return true; 2815 } 2816 2817 2818 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2819 { 2820 SCSIBlockReq *r = (SCSIBlockReq *)req; 2821 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2822 2823 r->cmd = req->cmd.buf[0]; 2824 switch (r->cmd >> 5) { 2825 case 0: 2826 /* 6-byte CDB. */ 2827 r->cdb1 = r->group_number = 0; 2828 break; 2829 case 1: 2830 /* 10-byte CDB. */ 2831 r->cdb1 = req->cmd.buf[1]; 2832 r->group_number = req->cmd.buf[6]; 2833 break; 2834 case 4: 2835 /* 12-byte CDB. */ 2836 r->cdb1 = req->cmd.buf[1]; 2837 r->group_number = req->cmd.buf[10]; 2838 break; 2839 case 5: 2840 /* 16-byte CDB. */ 2841 r->cdb1 = req->cmd.buf[1]; 2842 r->group_number = req->cmd.buf[14]; 2843 break; 2844 default: 2845 abort(); 2846 } 2847 2848 /* Protection information is not supported. For SCSI versions 2 and 2849 * older (as determined by snooping the guest's INQUIRY commands), 2850 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2851 */ 2852 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2853 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2854 return 0; 2855 } 2856 2857 r->req.status = &r->io_header.status; 2858 return scsi_disk_dma_command(req, buf); 2859 } 2860 2861 static const SCSIReqOps scsi_block_dma_reqops = { 2862 .size = sizeof(SCSIBlockReq), 2863 .free_req = scsi_free_request, 2864 .send_command = scsi_block_dma_command, 2865 .read_data = scsi_read_data, 2866 .write_data = scsi_write_data, 2867 .get_buf = scsi_get_buf, 2868 .load_request = scsi_disk_load_request, 2869 .save_request = scsi_disk_save_request, 2870 }; 2871 2872 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2873 uint32_t lun, uint8_t *buf, 2874 void *hba_private) 2875 { 2876 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2877 2878 if (scsi_block_is_passthrough(s, buf)) { 2879 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2880 hba_private); 2881 } else { 2882 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2883 hba_private); 2884 } 2885 } 2886 2887 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2888 uint8_t *buf, void *hba_private) 2889 { 2890 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2891 2892 if (scsi_block_is_passthrough(s, buf)) { 2893 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2894 } else { 2895 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2896 } 2897 } 2898 2899 #endif 2900 2901 static 2902 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2903 BlockCompletionFunc *cb, void *cb_opaque, 2904 void *opaque) 2905 { 2906 SCSIDiskReq *r = opaque; 2907 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2908 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2909 } 2910 2911 static 2912 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2913 BlockCompletionFunc *cb, void *cb_opaque, 2914 void *opaque) 2915 { 2916 SCSIDiskReq *r = opaque; 2917 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2918 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2919 } 2920 2921 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2922 { 2923 DeviceClass *dc = DEVICE_CLASS(klass); 2924 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2925 2926 dc->fw_name = "disk"; 2927 dc->reset = scsi_disk_reset; 2928 sdc->dma_readv = scsi_dma_readv; 2929 sdc->dma_writev = scsi_dma_writev; 2930 sdc->need_fua_emulation = scsi_is_cmd_fua; 2931 } 2932 2933 static const TypeInfo scsi_disk_base_info = { 2934 .name = TYPE_SCSI_DISK_BASE, 2935 .parent = TYPE_SCSI_DEVICE, 2936 .class_init = scsi_disk_base_class_initfn, 2937 .instance_size = sizeof(SCSIDiskState), 2938 .class_size = sizeof(SCSIDiskClass), 2939 .abstract = true, 2940 }; 2941 2942 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2943 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ 2944 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2945 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2946 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2947 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2948 DEFINE_PROP_STRING("product", SCSIDiskState, product) 2949 2950 static Property scsi_hd_properties[] = { 2951 DEFINE_SCSI_DISK_PROPERTIES(), 2952 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2953 SCSI_DISK_F_REMOVABLE, false), 2954 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2955 SCSI_DISK_F_DPOFUA, false), 2956 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2957 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2958 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2959 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2960 DEFAULT_MAX_UNMAP_SIZE), 2961 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2962 DEFAULT_MAX_IO_SIZE), 2963 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2964 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2965 5), 2966 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2967 DEFINE_PROP_END_OF_LIST(), 2968 }; 2969 2970 static const VMStateDescription vmstate_scsi_disk_state = { 2971 .name = "scsi-disk", 2972 .version_id = 1, 2973 .minimum_version_id = 1, 2974 .fields = (VMStateField[]) { 2975 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2976 VMSTATE_BOOL(media_changed, SCSIDiskState), 2977 VMSTATE_BOOL(media_event, SCSIDiskState), 2978 VMSTATE_BOOL(eject_request, SCSIDiskState), 2979 VMSTATE_BOOL(tray_open, SCSIDiskState), 2980 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2981 VMSTATE_END_OF_LIST() 2982 } 2983 }; 2984 2985 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 2986 { 2987 DeviceClass *dc = DEVICE_CLASS(klass); 2988 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2989 2990 sc->realize = scsi_hd_realize; 2991 sc->alloc_req = scsi_new_request; 2992 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 2993 dc->desc = "virtual SCSI disk"; 2994 dc->props = scsi_hd_properties; 2995 dc->vmsd = &vmstate_scsi_disk_state; 2996 } 2997 2998 static const TypeInfo scsi_hd_info = { 2999 .name = "scsi-hd", 3000 .parent = TYPE_SCSI_DISK_BASE, 3001 .class_init = scsi_hd_class_initfn, 3002 }; 3003 3004 static Property scsi_cd_properties[] = { 3005 DEFINE_SCSI_DISK_PROPERTIES(), 3006 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3007 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3008 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3009 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3010 DEFAULT_MAX_IO_SIZE), 3011 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3012 5), 3013 DEFINE_PROP_END_OF_LIST(), 3014 }; 3015 3016 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3017 { 3018 DeviceClass *dc = DEVICE_CLASS(klass); 3019 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3020 3021 sc->realize = scsi_cd_realize; 3022 sc->alloc_req = scsi_new_request; 3023 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3024 dc->desc = "virtual SCSI CD-ROM"; 3025 dc->props = scsi_cd_properties; 3026 dc->vmsd = &vmstate_scsi_disk_state; 3027 } 3028 3029 static const TypeInfo scsi_cd_info = { 3030 .name = "scsi-cd", 3031 .parent = TYPE_SCSI_DISK_BASE, 3032 .class_init = scsi_cd_class_initfn, 3033 }; 3034 3035 #ifdef __linux__ 3036 static Property scsi_block_properties[] = { 3037 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3038 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3039 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3040 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3041 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3042 DEFAULT_MAX_UNMAP_SIZE), 3043 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3044 DEFAULT_MAX_IO_SIZE), 3045 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3046 -1), 3047 DEFINE_PROP_END_OF_LIST(), 3048 }; 3049 3050 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3051 { 3052 DeviceClass *dc = DEVICE_CLASS(klass); 3053 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3054 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3055 3056 sc->realize = scsi_block_realize; 3057 sc->alloc_req = scsi_block_new_request; 3058 sc->parse_cdb = scsi_block_parse_cdb; 3059 sdc->dma_readv = scsi_block_dma_readv; 3060 sdc->dma_writev = scsi_block_dma_writev; 3061 sdc->need_fua_emulation = scsi_block_no_fua; 3062 dc->desc = "SCSI block device passthrough"; 3063 dc->props = scsi_block_properties; 3064 dc->vmsd = &vmstate_scsi_disk_state; 3065 } 3066 3067 static const TypeInfo scsi_block_info = { 3068 .name = "scsi-block", 3069 .parent = TYPE_SCSI_DISK_BASE, 3070 .class_init = scsi_block_class_initfn, 3071 }; 3072 #endif 3073 3074 static Property scsi_disk_properties[] = { 3075 DEFINE_SCSI_DISK_PROPERTIES(), 3076 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3077 SCSI_DISK_F_REMOVABLE, false), 3078 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3079 SCSI_DISK_F_DPOFUA, false), 3080 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3081 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3082 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3083 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3084 DEFAULT_MAX_UNMAP_SIZE), 3085 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3086 DEFAULT_MAX_IO_SIZE), 3087 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3088 5), 3089 DEFINE_PROP_END_OF_LIST(), 3090 }; 3091 3092 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3093 { 3094 DeviceClass *dc = DEVICE_CLASS(klass); 3095 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3096 3097 sc->realize = scsi_disk_realize; 3098 sc->alloc_req = scsi_new_request; 3099 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3100 dc->fw_name = "disk"; 3101 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3102 dc->reset = scsi_disk_reset; 3103 dc->props = scsi_disk_properties; 3104 dc->vmsd = &vmstate_scsi_disk_state; 3105 } 3106 3107 static const TypeInfo scsi_disk_info = { 3108 .name = "scsi-disk", 3109 .parent = TYPE_SCSI_DISK_BASE, 3110 .class_init = scsi_disk_class_initfn, 3111 }; 3112 3113 static void scsi_disk_register_types(void) 3114 { 3115 type_register_static(&scsi_disk_base_info); 3116 type_register_static(&scsi_hd_info); 3117 type_register_static(&scsi_cd_info); 3118 #ifdef __linux__ 3119 type_register_static(&scsi_block_info); 3120 #endif 3121 type_register_static(&scsi_disk_info); 3122 } 3123 3124 type_init(scsi_disk_register_types) 3125