1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 //#define DEBUG_SCSI 23 24 #ifdef DEBUG_SCSI 25 #define DPRINTF(fmt, ...) \ 26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0) 27 #else 28 #define DPRINTF(fmt, ...) do {} while(0) 29 #endif 30 31 #include "qemu/osdep.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 #include "hw/scsi/scsi.h" 35 #include "scsi/constants.h" 36 #include "sysemu/sysemu.h" 37 #include "sysemu/block-backend.h" 38 #include "sysemu/blockdev.h" 39 #include "hw/block/block.h" 40 #include "sysemu/dma.h" 41 #include "qemu/cutils.h" 42 43 #ifdef __linux 44 #include <scsi/sg.h> 45 #endif 46 47 #define SCSI_WRITE_SAME_MAX 524288 48 #define SCSI_DMA_BUF_SIZE 131072 49 #define SCSI_MAX_INQUIRY_LEN 256 50 #define SCSI_MAX_MODE_LEN 256 51 52 #define DEFAULT_DISCARD_GRANULARITY 4096 53 #define DEFAULT_MAX_UNMAP_SIZE (1 << 30) /* 1 GB */ 54 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 55 56 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 57 58 #define SCSI_DISK_BASE(obj) \ 59 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 60 #define SCSI_DISK_BASE_CLASS(klass) \ 61 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 62 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 63 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 64 65 typedef struct SCSIDiskClass { 66 SCSIDeviceClass parent_class; 67 DMAIOFunc *dma_readv; 68 DMAIOFunc *dma_writev; 69 bool (*need_fua_emulation)(SCSICommand *cmd); 70 } SCSIDiskClass; 71 72 typedef struct SCSIDiskReq { 73 SCSIRequest req; 74 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 75 uint64_t sector; 76 uint32_t sector_count; 77 uint32_t buflen; 78 bool started; 79 bool need_fua_emulation; 80 struct iovec iov; 81 QEMUIOVector qiov; 82 BlockAcctCookie acct; 83 unsigned char *status; 84 } SCSIDiskReq; 85 86 #define SCSI_DISK_F_REMOVABLE 0 87 #define SCSI_DISK_F_DPOFUA 1 88 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 89 90 typedef struct SCSIDiskState 91 { 92 SCSIDevice qdev; 93 uint32_t features; 94 bool media_changed; 95 bool media_event; 96 bool eject_request; 97 uint16_t port_index; 98 uint64_t max_unmap_size; 99 uint64_t max_io_size; 100 QEMUBH *bh; 101 char *version; 102 char *serial; 103 char *vendor; 104 char *product; 105 bool tray_open; 106 bool tray_locked; 107 /* 108 * 0x0000 - rotation rate not reported 109 * 0x0001 - non-rotating medium (SSD) 110 * 0x0002-0x0400 - reserved 111 * 0x0401-0xffe - rotations per minute 112 * 0xffff - reserved 113 */ 114 uint16_t rotation_rate; 115 } SCSIDiskState; 116 117 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 118 119 static void scsi_free_request(SCSIRequest *req) 120 { 121 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 122 123 qemu_vfree(r->iov.iov_base); 124 } 125 126 /* Helper function for command completion with sense. */ 127 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 128 { 129 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n", 130 r->req.tag, sense.key, sense.asc, sense.ascq); 131 scsi_req_build_sense(&r->req, sense); 132 scsi_req_complete(&r->req, CHECK_CONDITION); 133 } 134 135 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 136 { 137 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 138 139 if (!r->iov.iov_base) { 140 r->buflen = size; 141 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 142 } 143 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 144 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 145 } 146 147 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 148 { 149 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 150 151 qemu_put_be64s(f, &r->sector); 152 qemu_put_be32s(f, &r->sector_count); 153 qemu_put_be32s(f, &r->buflen); 154 if (r->buflen) { 155 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 156 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 157 } else if (!req->retry) { 158 uint32_t len = r->iov.iov_len; 159 qemu_put_be32s(f, &len); 160 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 161 } 162 } 163 } 164 165 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 166 { 167 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 168 169 qemu_get_be64s(f, &r->sector); 170 qemu_get_be32s(f, &r->sector_count); 171 qemu_get_be32s(f, &r->buflen); 172 if (r->buflen) { 173 scsi_init_iovec(r, r->buflen); 174 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 175 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 176 } else if (!r->req.retry) { 177 uint32_t len; 178 qemu_get_be32s(f, &len); 179 r->iov.iov_len = len; 180 assert(r->iov.iov_len <= r->buflen); 181 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 182 } 183 } 184 185 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 186 } 187 188 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 189 { 190 if (r->req.io_canceled) { 191 scsi_req_cancel_complete(&r->req); 192 return true; 193 } 194 195 if (ret < 0 || (r->status && *r->status)) { 196 return scsi_handle_rw_error(r, -ret, acct_failed); 197 } 198 199 return false; 200 } 201 202 static void scsi_aio_complete(void *opaque, int ret) 203 { 204 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 205 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 206 207 assert(r->req.aiocb != NULL); 208 r->req.aiocb = NULL; 209 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 210 if (scsi_disk_req_check_error(r, ret, true)) { 211 goto done; 212 } 213 214 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 215 scsi_req_complete(&r->req, GOOD); 216 217 done: 218 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 219 scsi_req_unref(&r->req); 220 } 221 222 static bool scsi_is_cmd_fua(SCSICommand *cmd) 223 { 224 switch (cmd->buf[0]) { 225 case READ_10: 226 case READ_12: 227 case READ_16: 228 case WRITE_10: 229 case WRITE_12: 230 case WRITE_16: 231 return (cmd->buf[1] & 8) != 0; 232 233 case VERIFY_10: 234 case VERIFY_12: 235 case VERIFY_16: 236 case WRITE_VERIFY_10: 237 case WRITE_VERIFY_12: 238 case WRITE_VERIFY_16: 239 return true; 240 241 case READ_6: 242 case WRITE_6: 243 default: 244 return false; 245 } 246 } 247 248 static void scsi_write_do_fua(SCSIDiskReq *r) 249 { 250 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 251 252 assert(r->req.aiocb == NULL); 253 assert(!r->req.io_canceled); 254 255 if (r->need_fua_emulation) { 256 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 257 BLOCK_ACCT_FLUSH); 258 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 259 return; 260 } 261 262 scsi_req_complete(&r->req, GOOD); 263 scsi_req_unref(&r->req); 264 } 265 266 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 267 { 268 assert(r->req.aiocb == NULL); 269 if (scsi_disk_req_check_error(r, ret, false)) { 270 goto done; 271 } 272 273 r->sector += r->sector_count; 274 r->sector_count = 0; 275 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 276 scsi_write_do_fua(r); 277 return; 278 } else { 279 scsi_req_complete(&r->req, GOOD); 280 } 281 282 done: 283 scsi_req_unref(&r->req); 284 } 285 286 static void scsi_dma_complete(void *opaque, int ret) 287 { 288 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 289 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 290 291 assert(r->req.aiocb != NULL); 292 r->req.aiocb = NULL; 293 294 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 295 if (ret < 0) { 296 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 297 } else { 298 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 299 } 300 scsi_dma_complete_noio(r, ret); 301 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 302 } 303 304 static void scsi_read_complete(void * opaque, int ret) 305 { 306 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 307 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 308 int n; 309 310 assert(r->req.aiocb != NULL); 311 r->req.aiocb = NULL; 312 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 313 if (scsi_disk_req_check_error(r, ret, true)) { 314 goto done; 315 } 316 317 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 318 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size); 319 320 n = r->qiov.size / 512; 321 r->sector += n; 322 r->sector_count -= n; 323 scsi_req_data(&r->req, r->qiov.size); 324 325 done: 326 scsi_req_unref(&r->req); 327 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 328 } 329 330 /* Actually issue a read to the block device. */ 331 static void scsi_do_read(SCSIDiskReq *r, int ret) 332 { 333 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 334 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 335 336 assert (r->req.aiocb == NULL); 337 if (scsi_disk_req_check_error(r, ret, false)) { 338 goto done; 339 } 340 341 /* The request is used as the AIO opaque value, so add a ref. */ 342 scsi_req_ref(&r->req); 343 344 if (r->req.sg) { 345 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 346 r->req.resid -= r->req.sg->size; 347 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 348 r->req.sg, r->sector << BDRV_SECTOR_BITS, 349 BDRV_SECTOR_SIZE, 350 sdc->dma_readv, r, scsi_dma_complete, r, 351 DMA_DIRECTION_FROM_DEVICE); 352 } else { 353 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 354 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 355 r->qiov.size, BLOCK_ACCT_READ); 356 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 357 scsi_read_complete, r, r); 358 } 359 360 done: 361 scsi_req_unref(&r->req); 362 } 363 364 static void scsi_do_read_cb(void *opaque, int ret) 365 { 366 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 367 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 368 369 assert (r->req.aiocb != NULL); 370 r->req.aiocb = NULL; 371 372 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 373 if (ret < 0) { 374 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 375 } else { 376 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 377 } 378 scsi_do_read(opaque, ret); 379 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 380 } 381 382 /* Read more data from scsi device into buffer. */ 383 static void scsi_read_data(SCSIRequest *req) 384 { 385 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 386 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 387 bool first; 388 389 DPRINTF("Read sector_count=%d\n", r->sector_count); 390 if (r->sector_count == 0) { 391 /* This also clears the sense buffer for REQUEST SENSE. */ 392 scsi_req_complete(&r->req, GOOD); 393 return; 394 } 395 396 /* No data transfer may already be in progress */ 397 assert(r->req.aiocb == NULL); 398 399 /* The request is used as the AIO opaque value, so add a ref. */ 400 scsi_req_ref(&r->req); 401 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 402 DPRINTF("Data transfer direction invalid\n"); 403 scsi_read_complete(r, -EINVAL); 404 return; 405 } 406 407 if (!blk_is_available(req->dev->conf.blk)) { 408 scsi_read_complete(r, -ENOMEDIUM); 409 return; 410 } 411 412 first = !r->started; 413 r->started = true; 414 if (first && r->need_fua_emulation) { 415 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 416 BLOCK_ACCT_FLUSH); 417 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 418 } else { 419 scsi_do_read(r, 0); 420 } 421 } 422 423 /* 424 * scsi_handle_rw_error has two return values. False means that the error 425 * must be ignored, true means that the error has been processed and the 426 * caller should not do anything else for this request. Note that 427 * scsi_handle_rw_error always manages its reference counts, independent 428 * of the return value. 429 */ 430 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 431 { 432 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 433 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 434 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 435 is_read, error); 436 437 if (action == BLOCK_ERROR_ACTION_REPORT) { 438 if (acct_failed) { 439 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 440 } 441 switch (error) { 442 case 0: 443 /* The command has run, no need to fake sense. */ 444 assert(r->status && *r->status); 445 scsi_req_complete(&r->req, *r->status); 446 break; 447 case ENOMEDIUM: 448 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 449 break; 450 case ENOMEM: 451 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 452 break; 453 case EINVAL: 454 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 455 break; 456 case ENOSPC: 457 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 458 break; 459 default: 460 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 461 break; 462 } 463 } 464 if (!error) { 465 assert(r->status && *r->status); 466 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 467 468 if (error == ECANCELED || error == EAGAIN || error == ENOTCONN || 469 error == 0) { 470 /* These errors are handled by guest. */ 471 scsi_req_complete(&r->req, *r->status); 472 return true; 473 } 474 } 475 476 blk_error_action(s->qdev.conf.blk, action, is_read, error); 477 if (action == BLOCK_ERROR_ACTION_STOP) { 478 scsi_req_retry(&r->req); 479 } 480 return action != BLOCK_ERROR_ACTION_IGNORE; 481 } 482 483 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 484 { 485 uint32_t n; 486 487 assert (r->req.aiocb == NULL); 488 if (scsi_disk_req_check_error(r, ret, false)) { 489 goto done; 490 } 491 492 n = r->qiov.size / 512; 493 r->sector += n; 494 r->sector_count -= n; 495 if (r->sector_count == 0) { 496 scsi_write_do_fua(r); 497 return; 498 } else { 499 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 500 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size); 501 scsi_req_data(&r->req, r->qiov.size); 502 } 503 504 done: 505 scsi_req_unref(&r->req); 506 } 507 508 static void scsi_write_complete(void * opaque, int ret) 509 { 510 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 511 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 512 513 assert (r->req.aiocb != NULL); 514 r->req.aiocb = NULL; 515 516 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 517 if (ret < 0) { 518 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 519 } else { 520 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 521 } 522 scsi_write_complete_noio(r, ret); 523 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 524 } 525 526 static void scsi_write_data(SCSIRequest *req) 527 { 528 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 529 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 530 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 531 532 /* No data transfer may already be in progress */ 533 assert(r->req.aiocb == NULL); 534 535 /* The request is used as the AIO opaque value, so add a ref. */ 536 scsi_req_ref(&r->req); 537 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 538 DPRINTF("Data transfer direction invalid\n"); 539 scsi_write_complete_noio(r, -EINVAL); 540 return; 541 } 542 543 if (!r->req.sg && !r->qiov.size) { 544 /* Called for the first time. Ask the driver to send us more data. */ 545 r->started = true; 546 scsi_write_complete_noio(r, 0); 547 return; 548 } 549 if (!blk_is_available(req->dev->conf.blk)) { 550 scsi_write_complete_noio(r, -ENOMEDIUM); 551 return; 552 } 553 554 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 555 r->req.cmd.buf[0] == VERIFY_16) { 556 if (r->req.sg) { 557 scsi_dma_complete_noio(r, 0); 558 } else { 559 scsi_write_complete_noio(r, 0); 560 } 561 return; 562 } 563 564 if (r->req.sg) { 565 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 566 r->req.resid -= r->req.sg->size; 567 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 568 r->req.sg, r->sector << BDRV_SECTOR_BITS, 569 BDRV_SECTOR_SIZE, 570 sdc->dma_writev, r, scsi_dma_complete, r, 571 DMA_DIRECTION_TO_DEVICE); 572 } else { 573 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 574 r->qiov.size, BLOCK_ACCT_WRITE); 575 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 576 scsi_write_complete, r, r); 577 } 578 } 579 580 /* Return a pointer to the data buffer. */ 581 static uint8_t *scsi_get_buf(SCSIRequest *req) 582 { 583 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 584 585 return (uint8_t *)r->iov.iov_base; 586 } 587 588 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 589 { 590 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 591 int buflen = 0; 592 int start; 593 594 if (req->cmd.buf[1] & 0x1) { 595 /* Vital product data */ 596 uint8_t page_code = req->cmd.buf[2]; 597 598 outbuf[buflen++] = s->qdev.type & 0x1f; 599 outbuf[buflen++] = page_code ; // this page 600 outbuf[buflen++] = 0x00; 601 outbuf[buflen++] = 0x00; 602 start = buflen; 603 604 switch (page_code) { 605 case 0x00: /* Supported page codes, mandatory */ 606 { 607 DPRINTF("Inquiry EVPD[Supported pages] " 608 "buffer size %zd\n", req->cmd.xfer); 609 outbuf[buflen++] = 0x00; // list of supported pages (this page) 610 if (s->serial) { 611 outbuf[buflen++] = 0x80; // unit serial number 612 } 613 outbuf[buflen++] = 0x83; // device identification 614 if (s->qdev.type == TYPE_DISK) { 615 outbuf[buflen++] = 0xb0; // block limits 616 outbuf[buflen++] = 0xb1; /* block device characteristics */ 617 outbuf[buflen++] = 0xb2; // thin provisioning 618 } 619 break; 620 } 621 case 0x80: /* Device serial number, optional */ 622 { 623 int l; 624 625 if (!s->serial) { 626 DPRINTF("Inquiry (EVPD[Serial number] not supported\n"); 627 return -1; 628 } 629 630 l = strlen(s->serial); 631 if (l > 36) { 632 l = 36; 633 } 634 635 DPRINTF("Inquiry EVPD[Serial number] " 636 "buffer size %zd\n", req->cmd.xfer); 637 memcpy(outbuf+buflen, s->serial, l); 638 buflen += l; 639 break; 640 } 641 642 case 0x83: /* Device identification page, mandatory */ 643 { 644 const char *str = s->serial ?: blk_name(s->qdev.conf.blk); 645 int max_len = s->serial ? 20 : 255 - 8; 646 int id_len = strlen(str); 647 648 if (id_len > max_len) { 649 id_len = max_len; 650 } 651 DPRINTF("Inquiry EVPD[Device identification] " 652 "buffer size %zd\n", req->cmd.xfer); 653 654 outbuf[buflen++] = 0x2; // ASCII 655 outbuf[buflen++] = 0; // not officially assigned 656 outbuf[buflen++] = 0; // reserved 657 outbuf[buflen++] = id_len; // length of data following 658 memcpy(outbuf+buflen, str, id_len); 659 buflen += id_len; 660 661 if (s->qdev.wwn) { 662 outbuf[buflen++] = 0x1; // Binary 663 outbuf[buflen++] = 0x3; // NAA 664 outbuf[buflen++] = 0; // reserved 665 outbuf[buflen++] = 8; 666 stq_be_p(&outbuf[buflen], s->qdev.wwn); 667 buflen += 8; 668 } 669 670 if (s->qdev.port_wwn) { 671 outbuf[buflen++] = 0x61; // SAS / Binary 672 outbuf[buflen++] = 0x93; // PIV / Target port / NAA 673 outbuf[buflen++] = 0; // reserved 674 outbuf[buflen++] = 8; 675 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 676 buflen += 8; 677 } 678 679 if (s->port_index) { 680 outbuf[buflen++] = 0x61; // SAS / Binary 681 outbuf[buflen++] = 0x94; // PIV / Target port / relative target port 682 outbuf[buflen++] = 0; // reserved 683 outbuf[buflen++] = 4; 684 stw_be_p(&outbuf[buflen + 2], s->port_index); 685 buflen += 4; 686 } 687 break; 688 } 689 case 0xb0: /* block limits */ 690 { 691 unsigned int unmap_sectors = 692 s->qdev.conf.discard_granularity / s->qdev.blocksize; 693 unsigned int min_io_size = 694 s->qdev.conf.min_io_size / s->qdev.blocksize; 695 unsigned int opt_io_size = 696 s->qdev.conf.opt_io_size / s->qdev.blocksize; 697 unsigned int max_unmap_sectors = 698 s->max_unmap_size / s->qdev.blocksize; 699 unsigned int max_io_sectors = 700 s->max_io_size / s->qdev.blocksize; 701 702 if (s->qdev.type == TYPE_ROM) { 703 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n", 704 page_code); 705 return -1; 706 } 707 if (s->qdev.type == TYPE_DISK) { 708 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 709 int max_io_sectors_blk = 710 max_transfer_blk / s->qdev.blocksize; 711 712 max_io_sectors = 713 MIN_NON_ZERO(max_io_sectors_blk, max_io_sectors); 714 715 /* min_io_size and opt_io_size can't be greater than 716 * max_io_sectors */ 717 min_io_size = 718 MIN_NON_ZERO(min_io_size, max_io_sectors); 719 opt_io_size = 720 MIN_NON_ZERO(opt_io_size, max_io_sectors); 721 } 722 /* required VPD size with unmap support */ 723 buflen = 0x40; 724 memset(outbuf + 4, 0, buflen - 4); 725 726 outbuf[4] = 0x1; /* wsnz */ 727 728 /* optimal transfer length granularity */ 729 outbuf[6] = (min_io_size >> 8) & 0xff; 730 outbuf[7] = min_io_size & 0xff; 731 732 /* maximum transfer length */ 733 outbuf[8] = (max_io_sectors >> 24) & 0xff; 734 outbuf[9] = (max_io_sectors >> 16) & 0xff; 735 outbuf[10] = (max_io_sectors >> 8) & 0xff; 736 outbuf[11] = max_io_sectors & 0xff; 737 738 /* optimal transfer length */ 739 outbuf[12] = (opt_io_size >> 24) & 0xff; 740 outbuf[13] = (opt_io_size >> 16) & 0xff; 741 outbuf[14] = (opt_io_size >> 8) & 0xff; 742 outbuf[15] = opt_io_size & 0xff; 743 744 /* max unmap LBA count, default is 1GB */ 745 outbuf[20] = (max_unmap_sectors >> 24) & 0xff; 746 outbuf[21] = (max_unmap_sectors >> 16) & 0xff; 747 outbuf[22] = (max_unmap_sectors >> 8) & 0xff; 748 outbuf[23] = max_unmap_sectors & 0xff; 749 750 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header. */ 751 outbuf[24] = 0; 752 outbuf[25] = 0; 753 outbuf[26] = 0; 754 outbuf[27] = 255; 755 756 /* optimal unmap granularity */ 757 outbuf[28] = (unmap_sectors >> 24) & 0xff; 758 outbuf[29] = (unmap_sectors >> 16) & 0xff; 759 outbuf[30] = (unmap_sectors >> 8) & 0xff; 760 outbuf[31] = unmap_sectors & 0xff; 761 762 /* max write same size */ 763 outbuf[36] = 0; 764 outbuf[37] = 0; 765 outbuf[38] = 0; 766 outbuf[39] = 0; 767 768 outbuf[40] = (max_io_sectors >> 24) & 0xff; 769 outbuf[41] = (max_io_sectors >> 16) & 0xff; 770 outbuf[42] = (max_io_sectors >> 8) & 0xff; 771 outbuf[43] = max_io_sectors & 0xff; 772 break; 773 } 774 case 0xb1: /* block device characteristics */ 775 { 776 buflen = 8; 777 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 778 outbuf[5] = s->rotation_rate & 0xff; 779 outbuf[6] = 0; 780 outbuf[7] = 0; 781 break; 782 } 783 case 0xb2: /* thin provisioning */ 784 { 785 buflen = 8; 786 outbuf[4] = 0; 787 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 788 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 789 outbuf[7] = 0; 790 break; 791 } 792 default: 793 return -1; 794 } 795 /* done with EVPD */ 796 assert(buflen - start <= 255); 797 outbuf[start - 1] = buflen - start; 798 return buflen; 799 } 800 801 /* Standard INQUIRY data */ 802 if (req->cmd.buf[2] != 0) { 803 return -1; 804 } 805 806 /* PAGE CODE == 0 */ 807 buflen = req->cmd.xfer; 808 if (buflen > SCSI_MAX_INQUIRY_LEN) { 809 buflen = SCSI_MAX_INQUIRY_LEN; 810 } 811 812 outbuf[0] = s->qdev.type & 0x1f; 813 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 814 815 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 816 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 817 818 memset(&outbuf[32], 0, 4); 819 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 820 /* 821 * We claim conformance to SPC-3, which is required for guests 822 * to ask for modern features like READ CAPACITY(16) or the 823 * block characteristics VPD page by default. Not all of SPC-3 824 * is actually implemented, but we're good enough. 825 */ 826 outbuf[2] = 5; 827 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 828 829 if (buflen > 36) { 830 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 831 } else { 832 /* If the allocation length of CDB is too small, 833 the additional length is not adjusted */ 834 outbuf[4] = 36 - 5; 835 } 836 837 /* Sync data transfer and TCQ. */ 838 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 839 return buflen; 840 } 841 842 static inline bool media_is_dvd(SCSIDiskState *s) 843 { 844 uint64_t nb_sectors; 845 if (s->qdev.type != TYPE_ROM) { 846 return false; 847 } 848 if (!blk_is_available(s->qdev.conf.blk)) { 849 return false; 850 } 851 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 852 return nb_sectors > CD_MAX_SECTORS; 853 } 854 855 static inline bool media_is_cd(SCSIDiskState *s) 856 { 857 uint64_t nb_sectors; 858 if (s->qdev.type != TYPE_ROM) { 859 return false; 860 } 861 if (!blk_is_available(s->qdev.conf.blk)) { 862 return false; 863 } 864 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 865 return nb_sectors <= CD_MAX_SECTORS; 866 } 867 868 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 869 uint8_t *outbuf) 870 { 871 uint8_t type = r->req.cmd.buf[1] & 7; 872 873 if (s->qdev.type != TYPE_ROM) { 874 return -1; 875 } 876 877 /* Types 1/2 are only defined for Blu-Ray. */ 878 if (type != 0) { 879 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 880 return -1; 881 } 882 883 memset(outbuf, 0, 34); 884 outbuf[1] = 32; 885 outbuf[2] = 0xe; /* last session complete, disc finalized */ 886 outbuf[3] = 1; /* first track on disc */ 887 outbuf[4] = 1; /* # of sessions */ 888 outbuf[5] = 1; /* first track of last session */ 889 outbuf[6] = 1; /* last track of last session */ 890 outbuf[7] = 0x20; /* unrestricted use */ 891 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 892 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 893 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 894 /* 24-31: disc bar code */ 895 /* 32: disc application code */ 896 /* 33: number of OPC tables */ 897 898 return 34; 899 } 900 901 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 902 uint8_t *outbuf) 903 { 904 static const int rds_caps_size[5] = { 905 [0] = 2048 + 4, 906 [1] = 4 + 4, 907 [3] = 188 + 4, 908 [4] = 2048 + 4, 909 }; 910 911 uint8_t media = r->req.cmd.buf[1]; 912 uint8_t layer = r->req.cmd.buf[6]; 913 uint8_t format = r->req.cmd.buf[7]; 914 int size = -1; 915 916 if (s->qdev.type != TYPE_ROM) { 917 return -1; 918 } 919 if (media != 0) { 920 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 921 return -1; 922 } 923 924 if (format != 0xff) { 925 if (!blk_is_available(s->qdev.conf.blk)) { 926 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 927 return -1; 928 } 929 if (media_is_cd(s)) { 930 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 931 return -1; 932 } 933 if (format >= ARRAY_SIZE(rds_caps_size)) { 934 return -1; 935 } 936 size = rds_caps_size[format]; 937 memset(outbuf, 0, size); 938 } 939 940 switch (format) { 941 case 0x00: { 942 /* Physical format information */ 943 uint64_t nb_sectors; 944 if (layer != 0) { 945 goto fail; 946 } 947 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 948 949 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 950 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 951 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 952 outbuf[7] = 0; /* default densities */ 953 954 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 955 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 956 break; 957 } 958 959 case 0x01: /* DVD copyright information, all zeros */ 960 break; 961 962 case 0x03: /* BCA information - invalid field for no BCA info */ 963 return -1; 964 965 case 0x04: /* DVD disc manufacturing information, all zeros */ 966 break; 967 968 case 0xff: { /* List capabilities */ 969 int i; 970 size = 4; 971 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 972 if (!rds_caps_size[i]) { 973 continue; 974 } 975 outbuf[size] = i; 976 outbuf[size + 1] = 0x40; /* Not writable, readable */ 977 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 978 size += 4; 979 } 980 break; 981 } 982 983 default: 984 return -1; 985 } 986 987 /* Size of buffer, not including 2 byte size field */ 988 stw_be_p(outbuf, size - 2); 989 return size; 990 991 fail: 992 return -1; 993 } 994 995 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 996 { 997 uint8_t event_code, media_status; 998 999 media_status = 0; 1000 if (s->tray_open) { 1001 media_status = MS_TRAY_OPEN; 1002 } else if (blk_is_inserted(s->qdev.conf.blk)) { 1003 media_status = MS_MEDIA_PRESENT; 1004 } 1005 1006 /* Event notification descriptor */ 1007 event_code = MEC_NO_CHANGE; 1008 if (media_status != MS_TRAY_OPEN) { 1009 if (s->media_event) { 1010 event_code = MEC_NEW_MEDIA; 1011 s->media_event = false; 1012 } else if (s->eject_request) { 1013 event_code = MEC_EJECT_REQUESTED; 1014 s->eject_request = false; 1015 } 1016 } 1017 1018 outbuf[0] = event_code; 1019 outbuf[1] = media_status; 1020 1021 /* These fields are reserved, just clear them. */ 1022 outbuf[2] = 0; 1023 outbuf[3] = 0; 1024 return 4; 1025 } 1026 1027 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 1028 uint8_t *outbuf) 1029 { 1030 int size; 1031 uint8_t *buf = r->req.cmd.buf; 1032 uint8_t notification_class_request = buf[4]; 1033 if (s->qdev.type != TYPE_ROM) { 1034 return -1; 1035 } 1036 if ((buf[1] & 1) == 0) { 1037 /* asynchronous */ 1038 return -1; 1039 } 1040 1041 size = 4; 1042 outbuf[0] = outbuf[1] = 0; 1043 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1044 if (notification_class_request & (1 << GESN_MEDIA)) { 1045 outbuf[2] = GESN_MEDIA; 1046 size += scsi_event_status_media(s, &outbuf[size]); 1047 } else { 1048 outbuf[2] = 0x80; 1049 } 1050 stw_be_p(outbuf, size - 4); 1051 return size; 1052 } 1053 1054 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1055 { 1056 int current; 1057 1058 if (s->qdev.type != TYPE_ROM) { 1059 return -1; 1060 } 1061 1062 if (media_is_dvd(s)) { 1063 current = MMC_PROFILE_DVD_ROM; 1064 } else if (media_is_cd(s)) { 1065 current = MMC_PROFILE_CD_ROM; 1066 } else { 1067 current = MMC_PROFILE_NONE; 1068 } 1069 1070 memset(outbuf, 0, 40); 1071 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1072 stw_be_p(&outbuf[6], current); 1073 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1074 outbuf[10] = 0x03; /* persistent, current */ 1075 outbuf[11] = 8; /* two profiles */ 1076 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1077 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1078 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1079 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1080 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1081 stw_be_p(&outbuf[20], 1); 1082 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1083 outbuf[23] = 8; 1084 stl_be_p(&outbuf[24], 1); /* SCSI */ 1085 outbuf[28] = 1; /* DBE = 1, mandatory */ 1086 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1087 stw_be_p(&outbuf[32], 3); 1088 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1089 outbuf[35] = 4; 1090 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1091 /* TODO: Random readable, CD read, DVD read, drive serial number, 1092 power management */ 1093 return 40; 1094 } 1095 1096 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1097 { 1098 if (s->qdev.type != TYPE_ROM) { 1099 return -1; 1100 } 1101 memset(outbuf, 0, 8); 1102 outbuf[5] = 1; /* CD-ROM */ 1103 return 8; 1104 } 1105 1106 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1107 int page_control) 1108 { 1109 static const int mode_sense_valid[0x3f] = { 1110 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1111 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1112 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1113 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1114 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1115 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1116 }; 1117 1118 uint8_t *p = *p_outbuf + 2; 1119 int length; 1120 1121 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1122 return -1; 1123 } 1124 1125 /* 1126 * If Changeable Values are requested, a mask denoting those mode parameters 1127 * that are changeable shall be returned. As we currently don't support 1128 * parameter changes via MODE_SELECT all bits are returned set to zero. 1129 * The buffer was already menset to zero by the caller of this function. 1130 * 1131 * The offsets here are off by two compared to the descriptions in the 1132 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1133 * but it is done so that offsets are consistent within our implementation 1134 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1135 * 2-byte and 4-byte headers. 1136 */ 1137 switch (page) { 1138 case MODE_PAGE_HD_GEOMETRY: 1139 length = 0x16; 1140 if (page_control == 1) { /* Changeable Values */ 1141 break; 1142 } 1143 /* if a geometry hint is available, use it */ 1144 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1145 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1146 p[2] = s->qdev.conf.cyls & 0xff; 1147 p[3] = s->qdev.conf.heads & 0xff; 1148 /* Write precomp start cylinder, disabled */ 1149 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1150 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1151 p[6] = s->qdev.conf.cyls & 0xff; 1152 /* Reduced current start cylinder, disabled */ 1153 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1154 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1155 p[9] = s->qdev.conf.cyls & 0xff; 1156 /* Device step rate [ns], 200ns */ 1157 p[10] = 0; 1158 p[11] = 200; 1159 /* Landing zone cylinder */ 1160 p[12] = 0xff; 1161 p[13] = 0xff; 1162 p[14] = 0xff; 1163 /* Medium rotation rate [rpm], 5400 rpm */ 1164 p[18] = (5400 >> 8) & 0xff; 1165 p[19] = 5400 & 0xff; 1166 break; 1167 1168 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1169 length = 0x1e; 1170 if (page_control == 1) { /* Changeable Values */ 1171 break; 1172 } 1173 /* Transfer rate [kbit/s], 5Mbit/s */ 1174 p[0] = 5000 >> 8; 1175 p[1] = 5000 & 0xff; 1176 /* if a geometry hint is available, use it */ 1177 p[2] = s->qdev.conf.heads & 0xff; 1178 p[3] = s->qdev.conf.secs & 0xff; 1179 p[4] = s->qdev.blocksize >> 8; 1180 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1181 p[7] = s->qdev.conf.cyls & 0xff; 1182 /* Write precomp start cylinder, disabled */ 1183 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1184 p[9] = s->qdev.conf.cyls & 0xff; 1185 /* Reduced current start cylinder, disabled */ 1186 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1187 p[11] = s->qdev.conf.cyls & 0xff; 1188 /* Device step rate [100us], 100us */ 1189 p[12] = 0; 1190 p[13] = 1; 1191 /* Device step pulse width [us], 1us */ 1192 p[14] = 1; 1193 /* Device head settle delay [100us], 100us */ 1194 p[15] = 0; 1195 p[16] = 1; 1196 /* Motor on delay [0.1s], 0.1s */ 1197 p[17] = 1; 1198 /* Motor off delay [0.1s], 0.1s */ 1199 p[18] = 1; 1200 /* Medium rotation rate [rpm], 5400 rpm */ 1201 p[26] = (5400 >> 8) & 0xff; 1202 p[27] = 5400 & 0xff; 1203 break; 1204 1205 case MODE_PAGE_CACHING: 1206 length = 0x12; 1207 if (page_control == 1 || /* Changeable Values */ 1208 blk_enable_write_cache(s->qdev.conf.blk)) { 1209 p[0] = 4; /* WCE */ 1210 } 1211 break; 1212 1213 case MODE_PAGE_R_W_ERROR: 1214 length = 10; 1215 if (page_control == 1) { /* Changeable Values */ 1216 break; 1217 } 1218 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1219 if (s->qdev.type == TYPE_ROM) { 1220 p[1] = 0x20; /* Read Retry Count */ 1221 } 1222 break; 1223 1224 case MODE_PAGE_AUDIO_CTL: 1225 length = 14; 1226 break; 1227 1228 case MODE_PAGE_CAPABILITIES: 1229 length = 0x14; 1230 if (page_control == 1) { /* Changeable Values */ 1231 break; 1232 } 1233 1234 p[0] = 0x3b; /* CD-R & CD-RW read */ 1235 p[1] = 0; /* Writing not supported */ 1236 p[2] = 0x7f; /* Audio, composite, digital out, 1237 mode 2 form 1&2, multi session */ 1238 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1239 RW corrected, C2 errors, ISRC, 1240 UPC, Bar code */ 1241 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1242 /* Locking supported, jumper present, eject, tray */ 1243 p[5] = 0; /* no volume & mute control, no 1244 changer */ 1245 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1246 p[7] = (50 * 176) & 0xff; 1247 p[8] = 2 >> 8; /* Two volume levels */ 1248 p[9] = 2 & 0xff; 1249 p[10] = 2048 >> 8; /* 2M buffer */ 1250 p[11] = 2048 & 0xff; 1251 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1252 p[13] = (16 * 176) & 0xff; 1253 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1254 p[17] = (16 * 176) & 0xff; 1255 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1256 p[19] = (16 * 176) & 0xff; 1257 break; 1258 1259 default: 1260 return -1; 1261 } 1262 1263 assert(length < 256); 1264 (*p_outbuf)[0] = page; 1265 (*p_outbuf)[1] = length; 1266 *p_outbuf += length + 2; 1267 return length + 2; 1268 } 1269 1270 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1271 { 1272 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1273 uint64_t nb_sectors; 1274 bool dbd; 1275 int page, buflen, ret, page_control; 1276 uint8_t *p; 1277 uint8_t dev_specific_param; 1278 1279 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1280 page = r->req.cmd.buf[2] & 0x3f; 1281 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1282 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n", 1283 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control); 1284 memset(outbuf, 0, r->req.cmd.xfer); 1285 p = outbuf; 1286 1287 if (s->qdev.type == TYPE_DISK) { 1288 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1289 if (blk_is_read_only(s->qdev.conf.blk)) { 1290 dev_specific_param |= 0x80; /* Readonly. */ 1291 } 1292 } else { 1293 /* MMC prescribes that CD/DVD drives have no block descriptors, 1294 * and defines no device-specific parameter. */ 1295 dev_specific_param = 0x00; 1296 dbd = true; 1297 } 1298 1299 if (r->req.cmd.buf[0] == MODE_SENSE) { 1300 p[1] = 0; /* Default media type. */ 1301 p[2] = dev_specific_param; 1302 p[3] = 0; /* Block descriptor length. */ 1303 p += 4; 1304 } else { /* MODE_SENSE_10 */ 1305 p[2] = 0; /* Default media type. */ 1306 p[3] = dev_specific_param; 1307 p[6] = p[7] = 0; /* Block descriptor length. */ 1308 p += 8; 1309 } 1310 1311 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1312 if (!dbd && nb_sectors) { 1313 if (r->req.cmd.buf[0] == MODE_SENSE) { 1314 outbuf[3] = 8; /* Block descriptor length */ 1315 } else { /* MODE_SENSE_10 */ 1316 outbuf[7] = 8; /* Block descriptor length */ 1317 } 1318 nb_sectors /= (s->qdev.blocksize / 512); 1319 if (nb_sectors > 0xffffff) { 1320 nb_sectors = 0; 1321 } 1322 p[0] = 0; /* media density code */ 1323 p[1] = (nb_sectors >> 16) & 0xff; 1324 p[2] = (nb_sectors >> 8) & 0xff; 1325 p[3] = nb_sectors & 0xff; 1326 p[4] = 0; /* reserved */ 1327 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1328 p[6] = s->qdev.blocksize >> 8; 1329 p[7] = 0; 1330 p += 8; 1331 } 1332 1333 if (page_control == 3) { 1334 /* Saved Values */ 1335 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1336 return -1; 1337 } 1338 1339 if (page == 0x3f) { 1340 for (page = 0; page <= 0x3e; page++) { 1341 mode_sense_page(s, page, &p, page_control); 1342 } 1343 } else { 1344 ret = mode_sense_page(s, page, &p, page_control); 1345 if (ret == -1) { 1346 return -1; 1347 } 1348 } 1349 1350 buflen = p - outbuf; 1351 /* 1352 * The mode data length field specifies the length in bytes of the 1353 * following data that is available to be transferred. The mode data 1354 * length does not include itself. 1355 */ 1356 if (r->req.cmd.buf[0] == MODE_SENSE) { 1357 outbuf[0] = buflen - 1; 1358 } else { /* MODE_SENSE_10 */ 1359 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1360 outbuf[1] = (buflen - 2) & 0xff; 1361 } 1362 return buflen; 1363 } 1364 1365 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1366 { 1367 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1368 int start_track, format, msf, toclen; 1369 uint64_t nb_sectors; 1370 1371 msf = req->cmd.buf[1] & 2; 1372 format = req->cmd.buf[2] & 0xf; 1373 start_track = req->cmd.buf[6]; 1374 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1375 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1); 1376 nb_sectors /= s->qdev.blocksize / 512; 1377 switch (format) { 1378 case 0: 1379 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1380 break; 1381 case 1: 1382 /* multi session : only a single session defined */ 1383 toclen = 12; 1384 memset(outbuf, 0, 12); 1385 outbuf[1] = 0x0a; 1386 outbuf[2] = 0x01; 1387 outbuf[3] = 0x01; 1388 break; 1389 case 2: 1390 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1391 break; 1392 default: 1393 return -1; 1394 } 1395 return toclen; 1396 } 1397 1398 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1399 { 1400 SCSIRequest *req = &r->req; 1401 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1402 bool start = req->cmd.buf[4] & 1; 1403 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1404 int pwrcnd = req->cmd.buf[4] & 0xf0; 1405 1406 if (pwrcnd) { 1407 /* eject/load only happens for power condition == 0 */ 1408 return 0; 1409 } 1410 1411 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1412 if (!start && !s->tray_open && s->tray_locked) { 1413 scsi_check_condition(r, 1414 blk_is_inserted(s->qdev.conf.blk) 1415 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1416 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1417 return -1; 1418 } 1419 1420 if (s->tray_open != !start) { 1421 blk_eject(s->qdev.conf.blk, !start); 1422 s->tray_open = !start; 1423 } 1424 } 1425 return 0; 1426 } 1427 1428 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1429 { 1430 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1431 int buflen = r->iov.iov_len; 1432 1433 if (buflen) { 1434 DPRINTF("Read buf_len=%d\n", buflen); 1435 r->iov.iov_len = 0; 1436 r->started = true; 1437 scsi_req_data(&r->req, buflen); 1438 return; 1439 } 1440 1441 /* This also clears the sense buffer for REQUEST SENSE. */ 1442 scsi_req_complete(&r->req, GOOD); 1443 } 1444 1445 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1446 uint8_t *inbuf, int inlen) 1447 { 1448 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1449 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1450 uint8_t *p; 1451 int len, expected_len, changeable_len, i; 1452 1453 /* The input buffer does not include the page header, so it is 1454 * off by 2 bytes. 1455 */ 1456 expected_len = inlen + 2; 1457 if (expected_len > SCSI_MAX_MODE_LEN) { 1458 return -1; 1459 } 1460 1461 p = mode_current; 1462 memset(mode_current, 0, inlen + 2); 1463 len = mode_sense_page(s, page, &p, 0); 1464 if (len < 0 || len != expected_len) { 1465 return -1; 1466 } 1467 1468 p = mode_changeable; 1469 memset(mode_changeable, 0, inlen + 2); 1470 changeable_len = mode_sense_page(s, page, &p, 1); 1471 assert(changeable_len == len); 1472 1473 /* Check that unchangeable bits are the same as what MODE SENSE 1474 * would return. 1475 */ 1476 for (i = 2; i < len; i++) { 1477 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1478 return -1; 1479 } 1480 } 1481 return 0; 1482 } 1483 1484 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1485 { 1486 switch (page) { 1487 case MODE_PAGE_CACHING: 1488 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1489 break; 1490 1491 default: 1492 break; 1493 } 1494 } 1495 1496 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1497 { 1498 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1499 1500 while (len > 0) { 1501 int page, subpage, page_len; 1502 1503 /* Parse both possible formats for the mode page headers. */ 1504 page = p[0] & 0x3f; 1505 if (p[0] & 0x40) { 1506 if (len < 4) { 1507 goto invalid_param_len; 1508 } 1509 subpage = p[1]; 1510 page_len = lduw_be_p(&p[2]); 1511 p += 4; 1512 len -= 4; 1513 } else { 1514 if (len < 2) { 1515 goto invalid_param_len; 1516 } 1517 subpage = 0; 1518 page_len = p[1]; 1519 p += 2; 1520 len -= 2; 1521 } 1522 1523 if (subpage) { 1524 goto invalid_param; 1525 } 1526 if (page_len > len) { 1527 goto invalid_param_len; 1528 } 1529 1530 if (!change) { 1531 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1532 goto invalid_param; 1533 } 1534 } else { 1535 scsi_disk_apply_mode_select(s, page, p); 1536 } 1537 1538 p += page_len; 1539 len -= page_len; 1540 } 1541 return 0; 1542 1543 invalid_param: 1544 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1545 return -1; 1546 1547 invalid_param_len: 1548 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1549 return -1; 1550 } 1551 1552 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1553 { 1554 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1555 uint8_t *p = inbuf; 1556 int cmd = r->req.cmd.buf[0]; 1557 int len = r->req.cmd.xfer; 1558 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1559 int bd_len; 1560 int pass; 1561 1562 /* We only support PF=1, SP=0. */ 1563 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1564 goto invalid_field; 1565 } 1566 1567 if (len < hdr_len) { 1568 goto invalid_param_len; 1569 } 1570 1571 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1572 len -= hdr_len; 1573 p += hdr_len; 1574 if (len < bd_len) { 1575 goto invalid_param_len; 1576 } 1577 if (bd_len != 0 && bd_len != 8) { 1578 goto invalid_param; 1579 } 1580 1581 len -= bd_len; 1582 p += bd_len; 1583 1584 /* Ensure no change is made if there is an error! */ 1585 for (pass = 0; pass < 2; pass++) { 1586 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1587 assert(pass == 0); 1588 return; 1589 } 1590 } 1591 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1592 /* The request is used as the AIO opaque value, so add a ref. */ 1593 scsi_req_ref(&r->req); 1594 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1595 BLOCK_ACCT_FLUSH); 1596 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1597 return; 1598 } 1599 1600 scsi_req_complete(&r->req, GOOD); 1601 return; 1602 1603 invalid_param: 1604 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1605 return; 1606 1607 invalid_param_len: 1608 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1609 return; 1610 1611 invalid_field: 1612 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1613 } 1614 1615 static inline bool check_lba_range(SCSIDiskState *s, 1616 uint64_t sector_num, uint32_t nb_sectors) 1617 { 1618 /* 1619 * The first line tests that no overflow happens when computing the last 1620 * sector. The second line tests that the last accessed sector is in 1621 * range. 1622 * 1623 * Careful, the computations should not underflow for nb_sectors == 0, 1624 * and a 0-block read to the first LBA beyond the end of device is 1625 * valid. 1626 */ 1627 return (sector_num <= sector_num + nb_sectors && 1628 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1629 } 1630 1631 typedef struct UnmapCBData { 1632 SCSIDiskReq *r; 1633 uint8_t *inbuf; 1634 int count; 1635 } UnmapCBData; 1636 1637 static void scsi_unmap_complete(void *opaque, int ret); 1638 1639 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1640 { 1641 SCSIDiskReq *r = data->r; 1642 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1643 uint64_t sector_num; 1644 uint32_t nb_sectors; 1645 1646 assert(r->req.aiocb == NULL); 1647 if (scsi_disk_req_check_error(r, ret, false)) { 1648 goto done; 1649 } 1650 1651 if (data->count > 0) { 1652 sector_num = ldq_be_p(&data->inbuf[0]); 1653 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1654 if (!check_lba_range(s, sector_num, nb_sectors)) { 1655 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1656 goto done; 1657 } 1658 1659 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1660 sector_num * s->qdev.blocksize, 1661 nb_sectors * s->qdev.blocksize, 1662 scsi_unmap_complete, data); 1663 data->count--; 1664 data->inbuf += 16; 1665 return; 1666 } 1667 1668 scsi_req_complete(&r->req, GOOD); 1669 1670 done: 1671 scsi_req_unref(&r->req); 1672 g_free(data); 1673 } 1674 1675 static void scsi_unmap_complete(void *opaque, int ret) 1676 { 1677 UnmapCBData *data = opaque; 1678 SCSIDiskReq *r = data->r; 1679 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1680 1681 assert(r->req.aiocb != NULL); 1682 r->req.aiocb = NULL; 1683 1684 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1685 scsi_unmap_complete_noio(data, ret); 1686 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1687 } 1688 1689 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1690 { 1691 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1692 uint8_t *p = inbuf; 1693 int len = r->req.cmd.xfer; 1694 UnmapCBData *data; 1695 1696 /* Reject ANCHOR=1. */ 1697 if (r->req.cmd.buf[1] & 0x1) { 1698 goto invalid_field; 1699 } 1700 1701 if (len < 8) { 1702 goto invalid_param_len; 1703 } 1704 if (len < lduw_be_p(&p[0]) + 2) { 1705 goto invalid_param_len; 1706 } 1707 if (len < lduw_be_p(&p[2]) + 8) { 1708 goto invalid_param_len; 1709 } 1710 if (lduw_be_p(&p[2]) & 15) { 1711 goto invalid_param_len; 1712 } 1713 1714 if (blk_is_read_only(s->qdev.conf.blk)) { 1715 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1716 return; 1717 } 1718 1719 data = g_new0(UnmapCBData, 1); 1720 data->r = r; 1721 data->inbuf = &p[8]; 1722 data->count = lduw_be_p(&p[2]) >> 4; 1723 1724 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1725 scsi_req_ref(&r->req); 1726 scsi_unmap_complete_noio(data, 0); 1727 return; 1728 1729 invalid_param_len: 1730 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1731 return; 1732 1733 invalid_field: 1734 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1735 } 1736 1737 typedef struct WriteSameCBData { 1738 SCSIDiskReq *r; 1739 int64_t sector; 1740 int nb_sectors; 1741 QEMUIOVector qiov; 1742 struct iovec iov; 1743 } WriteSameCBData; 1744 1745 static void scsi_write_same_complete(void *opaque, int ret) 1746 { 1747 WriteSameCBData *data = opaque; 1748 SCSIDiskReq *r = data->r; 1749 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1750 1751 assert(r->req.aiocb != NULL); 1752 r->req.aiocb = NULL; 1753 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1754 if (scsi_disk_req_check_error(r, ret, true)) { 1755 goto done; 1756 } 1757 1758 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1759 1760 data->nb_sectors -= data->iov.iov_len / 512; 1761 data->sector += data->iov.iov_len / 512; 1762 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1763 if (data->iov.iov_len) { 1764 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1765 data->iov.iov_len, BLOCK_ACCT_WRITE); 1766 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1767 * where final qiov may need smaller size */ 1768 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1769 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1770 data->sector << BDRV_SECTOR_BITS, 1771 &data->qiov, 0, 1772 scsi_write_same_complete, data); 1773 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1774 return; 1775 } 1776 1777 scsi_req_complete(&r->req, GOOD); 1778 1779 done: 1780 scsi_req_unref(&r->req); 1781 qemu_vfree(data->iov.iov_base); 1782 g_free(data); 1783 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1784 } 1785 1786 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1787 { 1788 SCSIRequest *req = &r->req; 1789 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1790 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1791 WriteSameCBData *data; 1792 uint8_t *buf; 1793 int i; 1794 1795 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1796 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1797 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1798 return; 1799 } 1800 1801 if (blk_is_read_only(s->qdev.conf.blk)) { 1802 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1803 return; 1804 } 1805 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1806 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1807 return; 1808 } 1809 1810 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1811 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1812 1813 /* The request is used as the AIO opaque value, so add a ref. */ 1814 scsi_req_ref(&r->req); 1815 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1816 nb_sectors * s->qdev.blocksize, 1817 BLOCK_ACCT_WRITE); 1818 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1819 r->req.cmd.lba * s->qdev.blocksize, 1820 nb_sectors * s->qdev.blocksize, 1821 flags, scsi_aio_complete, r); 1822 return; 1823 } 1824 1825 data = g_new0(WriteSameCBData, 1); 1826 data->r = r; 1827 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1828 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1829 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1830 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1831 data->iov.iov_len); 1832 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1833 1834 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1835 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1836 } 1837 1838 scsi_req_ref(&r->req); 1839 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1840 data->iov.iov_len, BLOCK_ACCT_WRITE); 1841 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1842 data->sector << BDRV_SECTOR_BITS, 1843 &data->qiov, 0, 1844 scsi_write_same_complete, data); 1845 } 1846 1847 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1848 { 1849 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1850 1851 if (r->iov.iov_len) { 1852 int buflen = r->iov.iov_len; 1853 DPRINTF("Write buf_len=%d\n", buflen); 1854 r->iov.iov_len = 0; 1855 scsi_req_data(&r->req, buflen); 1856 return; 1857 } 1858 1859 switch (req->cmd.buf[0]) { 1860 case MODE_SELECT: 1861 case MODE_SELECT_10: 1862 /* This also clears the sense buffer for REQUEST SENSE. */ 1863 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1864 break; 1865 1866 case UNMAP: 1867 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1868 break; 1869 1870 case VERIFY_10: 1871 case VERIFY_12: 1872 case VERIFY_16: 1873 if (r->req.status == -1) { 1874 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1875 } 1876 break; 1877 1878 case WRITE_SAME_10: 1879 case WRITE_SAME_16: 1880 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1881 break; 1882 1883 default: 1884 abort(); 1885 } 1886 } 1887 1888 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1889 { 1890 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1891 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1892 uint64_t nb_sectors; 1893 uint8_t *outbuf; 1894 int buflen; 1895 1896 switch (req->cmd.buf[0]) { 1897 case INQUIRY: 1898 case MODE_SENSE: 1899 case MODE_SENSE_10: 1900 case RESERVE: 1901 case RESERVE_10: 1902 case RELEASE: 1903 case RELEASE_10: 1904 case START_STOP: 1905 case ALLOW_MEDIUM_REMOVAL: 1906 case GET_CONFIGURATION: 1907 case GET_EVENT_STATUS_NOTIFICATION: 1908 case MECHANISM_STATUS: 1909 case REQUEST_SENSE: 1910 break; 1911 1912 default: 1913 if (!blk_is_available(s->qdev.conf.blk)) { 1914 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1915 return 0; 1916 } 1917 break; 1918 } 1919 1920 /* 1921 * FIXME: we shouldn't return anything bigger than 4k, but the code 1922 * requires the buffer to be as big as req->cmd.xfer in several 1923 * places. So, do not allow CDBs with a very large ALLOCATION 1924 * LENGTH. The real fix would be to modify scsi_read_data and 1925 * dma_buf_read, so that they return data beyond the buflen 1926 * as all zeros. 1927 */ 1928 if (req->cmd.xfer > 65536) { 1929 goto illegal_request; 1930 } 1931 r->buflen = MAX(4096, req->cmd.xfer); 1932 1933 if (!r->iov.iov_base) { 1934 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1935 } 1936 1937 buflen = req->cmd.xfer; 1938 outbuf = r->iov.iov_base; 1939 memset(outbuf, 0, r->buflen); 1940 switch (req->cmd.buf[0]) { 1941 case TEST_UNIT_READY: 1942 assert(blk_is_available(s->qdev.conf.blk)); 1943 break; 1944 case INQUIRY: 1945 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1946 if (buflen < 0) { 1947 goto illegal_request; 1948 } 1949 break; 1950 case MODE_SENSE: 1951 case MODE_SENSE_10: 1952 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1953 if (buflen < 0) { 1954 goto illegal_request; 1955 } 1956 break; 1957 case READ_TOC: 1958 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1959 if (buflen < 0) { 1960 goto illegal_request; 1961 } 1962 break; 1963 case RESERVE: 1964 if (req->cmd.buf[1] & 1) { 1965 goto illegal_request; 1966 } 1967 break; 1968 case RESERVE_10: 1969 if (req->cmd.buf[1] & 3) { 1970 goto illegal_request; 1971 } 1972 break; 1973 case RELEASE: 1974 if (req->cmd.buf[1] & 1) { 1975 goto illegal_request; 1976 } 1977 break; 1978 case RELEASE_10: 1979 if (req->cmd.buf[1] & 3) { 1980 goto illegal_request; 1981 } 1982 break; 1983 case START_STOP: 1984 if (scsi_disk_emulate_start_stop(r) < 0) { 1985 return 0; 1986 } 1987 break; 1988 case ALLOW_MEDIUM_REMOVAL: 1989 s->tray_locked = req->cmd.buf[4] & 1; 1990 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1991 break; 1992 case READ_CAPACITY_10: 1993 /* The normal LEN field for this command is zero. */ 1994 memset(outbuf, 0, 8); 1995 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1996 if (!nb_sectors) { 1997 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1998 return 0; 1999 } 2000 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 2001 goto illegal_request; 2002 } 2003 nb_sectors /= s->qdev.blocksize / 512; 2004 /* Returned value is the address of the last sector. */ 2005 nb_sectors--; 2006 /* Remember the new size for read/write sanity checking. */ 2007 s->qdev.max_lba = nb_sectors; 2008 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 2009 if (nb_sectors > UINT32_MAX) { 2010 nb_sectors = UINT32_MAX; 2011 } 2012 outbuf[0] = (nb_sectors >> 24) & 0xff; 2013 outbuf[1] = (nb_sectors >> 16) & 0xff; 2014 outbuf[2] = (nb_sectors >> 8) & 0xff; 2015 outbuf[3] = nb_sectors & 0xff; 2016 outbuf[4] = 0; 2017 outbuf[5] = 0; 2018 outbuf[6] = s->qdev.blocksize >> 8; 2019 outbuf[7] = 0; 2020 break; 2021 case REQUEST_SENSE: 2022 /* Just return "NO SENSE". */ 2023 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2024 (req->cmd.buf[1] & 1) == 0); 2025 if (buflen < 0) { 2026 goto illegal_request; 2027 } 2028 break; 2029 case MECHANISM_STATUS: 2030 buflen = scsi_emulate_mechanism_status(s, outbuf); 2031 if (buflen < 0) { 2032 goto illegal_request; 2033 } 2034 break; 2035 case GET_CONFIGURATION: 2036 buflen = scsi_get_configuration(s, outbuf); 2037 if (buflen < 0) { 2038 goto illegal_request; 2039 } 2040 break; 2041 case GET_EVENT_STATUS_NOTIFICATION: 2042 buflen = scsi_get_event_status_notification(s, r, outbuf); 2043 if (buflen < 0) { 2044 goto illegal_request; 2045 } 2046 break; 2047 case READ_DISC_INFORMATION: 2048 buflen = scsi_read_disc_information(s, r, outbuf); 2049 if (buflen < 0) { 2050 goto illegal_request; 2051 } 2052 break; 2053 case READ_DVD_STRUCTURE: 2054 buflen = scsi_read_dvd_structure(s, r, outbuf); 2055 if (buflen < 0) { 2056 goto illegal_request; 2057 } 2058 break; 2059 case SERVICE_ACTION_IN_16: 2060 /* Service Action In subcommands. */ 2061 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2062 DPRINTF("SAI READ CAPACITY(16)\n"); 2063 memset(outbuf, 0, req->cmd.xfer); 2064 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2065 if (!nb_sectors) { 2066 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2067 return 0; 2068 } 2069 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2070 goto illegal_request; 2071 } 2072 nb_sectors /= s->qdev.blocksize / 512; 2073 /* Returned value is the address of the last sector. */ 2074 nb_sectors--; 2075 /* Remember the new size for read/write sanity checking. */ 2076 s->qdev.max_lba = nb_sectors; 2077 outbuf[0] = (nb_sectors >> 56) & 0xff; 2078 outbuf[1] = (nb_sectors >> 48) & 0xff; 2079 outbuf[2] = (nb_sectors >> 40) & 0xff; 2080 outbuf[3] = (nb_sectors >> 32) & 0xff; 2081 outbuf[4] = (nb_sectors >> 24) & 0xff; 2082 outbuf[5] = (nb_sectors >> 16) & 0xff; 2083 outbuf[6] = (nb_sectors >> 8) & 0xff; 2084 outbuf[7] = nb_sectors & 0xff; 2085 outbuf[8] = 0; 2086 outbuf[9] = 0; 2087 outbuf[10] = s->qdev.blocksize >> 8; 2088 outbuf[11] = 0; 2089 outbuf[12] = 0; 2090 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2091 2092 /* set TPE bit if the format supports discard */ 2093 if (s->qdev.conf.discard_granularity) { 2094 outbuf[14] = 0x80; 2095 } 2096 2097 /* Protection, exponent and lowest lba field left blank. */ 2098 break; 2099 } 2100 DPRINTF("Unsupported Service Action In\n"); 2101 goto illegal_request; 2102 case SYNCHRONIZE_CACHE: 2103 /* The request is used as the AIO opaque value, so add a ref. */ 2104 scsi_req_ref(&r->req); 2105 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2106 BLOCK_ACCT_FLUSH); 2107 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2108 return 0; 2109 case SEEK_10: 2110 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba); 2111 if (r->req.cmd.lba > s->qdev.max_lba) { 2112 goto illegal_lba; 2113 } 2114 break; 2115 case MODE_SELECT: 2116 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2117 break; 2118 case MODE_SELECT_10: 2119 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2120 break; 2121 case UNMAP: 2122 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2123 break; 2124 case VERIFY_10: 2125 case VERIFY_12: 2126 case VERIFY_16: 2127 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3); 2128 if (req->cmd.buf[1] & 6) { 2129 goto illegal_request; 2130 } 2131 break; 2132 case WRITE_SAME_10: 2133 case WRITE_SAME_16: 2134 DPRINTF("WRITE SAME %d (len %lu)\n", 2135 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, 2136 (unsigned long)r->req.cmd.xfer); 2137 break; 2138 default: 2139 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0], 2140 scsi_command_name(buf[0])); 2141 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2142 return 0; 2143 } 2144 assert(!r->req.aiocb); 2145 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2146 if (r->iov.iov_len == 0) { 2147 scsi_req_complete(&r->req, GOOD); 2148 } 2149 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2150 assert(r->iov.iov_len == req->cmd.xfer); 2151 return -r->iov.iov_len; 2152 } else { 2153 return r->iov.iov_len; 2154 } 2155 2156 illegal_request: 2157 if (r->req.status == -1) { 2158 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2159 } 2160 return 0; 2161 2162 illegal_lba: 2163 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2164 return 0; 2165 } 2166 2167 /* Execute a scsi command. Returns the length of the data expected by the 2168 command. This will be Positive for data transfers from the device 2169 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2170 and zero if the command does not transfer any data. */ 2171 2172 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2173 { 2174 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2175 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2176 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2177 uint32_t len; 2178 uint8_t command; 2179 2180 command = buf[0]; 2181 2182 if (!blk_is_available(s->qdev.conf.blk)) { 2183 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2184 return 0; 2185 } 2186 2187 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2188 switch (command) { 2189 case READ_6: 2190 case READ_10: 2191 case READ_12: 2192 case READ_16: 2193 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len); 2194 if (r->req.cmd.buf[1] & 0xe0) { 2195 goto illegal_request; 2196 } 2197 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2198 goto illegal_lba; 2199 } 2200 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2201 r->sector_count = len * (s->qdev.blocksize / 512); 2202 break; 2203 case WRITE_6: 2204 case WRITE_10: 2205 case WRITE_12: 2206 case WRITE_16: 2207 case WRITE_VERIFY_10: 2208 case WRITE_VERIFY_12: 2209 case WRITE_VERIFY_16: 2210 if (blk_is_read_only(s->qdev.conf.blk)) { 2211 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2212 return 0; 2213 } 2214 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n", 2215 (command & 0xe) == 0xe ? "And Verify " : "", 2216 r->req.cmd.lba, len); 2217 /* fall through */ 2218 case VERIFY_10: 2219 case VERIFY_12: 2220 case VERIFY_16: 2221 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2222 * As far as DMA is concerned, we can treat it the same as a write; 2223 * scsi_block_do_sgio will send VERIFY commands. 2224 */ 2225 if (r->req.cmd.buf[1] & 0xe0) { 2226 goto illegal_request; 2227 } 2228 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2229 goto illegal_lba; 2230 } 2231 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2232 r->sector_count = len * (s->qdev.blocksize / 512); 2233 break; 2234 default: 2235 abort(); 2236 illegal_request: 2237 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2238 return 0; 2239 illegal_lba: 2240 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2241 return 0; 2242 } 2243 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2244 if (r->sector_count == 0) { 2245 scsi_req_complete(&r->req, GOOD); 2246 } 2247 assert(r->iov.iov_len == 0); 2248 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2249 return -r->sector_count * 512; 2250 } else { 2251 return r->sector_count * 512; 2252 } 2253 } 2254 2255 static void scsi_disk_reset(DeviceState *dev) 2256 { 2257 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2258 uint64_t nb_sectors; 2259 2260 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2261 2262 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2263 nb_sectors /= s->qdev.blocksize / 512; 2264 if (nb_sectors) { 2265 nb_sectors--; 2266 } 2267 s->qdev.max_lba = nb_sectors; 2268 /* reset tray statuses */ 2269 s->tray_locked = 0; 2270 s->tray_open = 0; 2271 } 2272 2273 static void scsi_disk_resize_cb(void *opaque) 2274 { 2275 SCSIDiskState *s = opaque; 2276 2277 /* SPC lists this sense code as available only for 2278 * direct-access devices. 2279 */ 2280 if (s->qdev.type == TYPE_DISK) { 2281 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2282 } 2283 } 2284 2285 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2286 { 2287 SCSIDiskState *s = opaque; 2288 2289 /* 2290 * When a CD gets changed, we have to report an ejected state and 2291 * then a loaded state to guests so that they detect tray 2292 * open/close and media change events. Guests that do not use 2293 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2294 * states rely on this behavior. 2295 * 2296 * media_changed governs the state machine used for unit attention 2297 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2298 */ 2299 s->media_changed = load; 2300 s->tray_open = !load; 2301 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2302 s->media_event = true; 2303 s->eject_request = false; 2304 } 2305 2306 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2307 { 2308 SCSIDiskState *s = opaque; 2309 2310 s->eject_request = true; 2311 if (force) { 2312 s->tray_locked = false; 2313 } 2314 } 2315 2316 static bool scsi_cd_is_tray_open(void *opaque) 2317 { 2318 return ((SCSIDiskState *)opaque)->tray_open; 2319 } 2320 2321 static bool scsi_cd_is_medium_locked(void *opaque) 2322 { 2323 return ((SCSIDiskState *)opaque)->tray_locked; 2324 } 2325 2326 static const BlockDevOps scsi_disk_removable_block_ops = { 2327 .change_media_cb = scsi_cd_change_media_cb, 2328 .eject_request_cb = scsi_cd_eject_request_cb, 2329 .is_tray_open = scsi_cd_is_tray_open, 2330 .is_medium_locked = scsi_cd_is_medium_locked, 2331 2332 .resize_cb = scsi_disk_resize_cb, 2333 }; 2334 2335 static const BlockDevOps scsi_disk_block_ops = { 2336 .resize_cb = scsi_disk_resize_cb, 2337 }; 2338 2339 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2340 { 2341 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2342 if (s->media_changed) { 2343 s->media_changed = false; 2344 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2345 } 2346 } 2347 2348 static void scsi_realize(SCSIDevice *dev, Error **errp) 2349 { 2350 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2351 2352 if (!s->qdev.conf.blk) { 2353 error_setg(errp, "drive property not set"); 2354 return; 2355 } 2356 2357 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2358 !blk_is_inserted(s->qdev.conf.blk)) { 2359 error_setg(errp, "Device needs media, but drive is empty"); 2360 return; 2361 } 2362 2363 blkconf_serial(&s->qdev.conf, &s->serial); 2364 blkconf_blocksizes(&s->qdev.conf); 2365 2366 if (s->qdev.conf.logical_block_size > 2367 s->qdev.conf.physical_block_size) { 2368 error_setg(errp, 2369 "logical_block_size > physical_block_size not supported"); 2370 return; 2371 } 2372 2373 if (dev->type == TYPE_DISK) { 2374 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2375 return; 2376 } 2377 } 2378 if (!blkconf_apply_backend_options(&dev->conf, 2379 blk_is_read_only(s->qdev.conf.blk), 2380 dev->type == TYPE_DISK, errp)) { 2381 return; 2382 } 2383 2384 if (s->qdev.conf.discard_granularity == -1) { 2385 s->qdev.conf.discard_granularity = 2386 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2387 } 2388 2389 if (!s->version) { 2390 s->version = g_strdup(qemu_hw_version()); 2391 } 2392 if (!s->vendor) { 2393 s->vendor = g_strdup("QEMU"); 2394 } 2395 2396 if (blk_is_sg(s->qdev.conf.blk)) { 2397 error_setg(errp, "unwanted /dev/sg*"); 2398 return; 2399 } 2400 2401 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2402 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2403 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2404 } else { 2405 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2406 } 2407 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2408 2409 blk_iostatus_enable(s->qdev.conf.blk); 2410 } 2411 2412 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2413 { 2414 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2415 /* can happen for devices without drive. The error message for missing 2416 * backend will be issued in scsi_realize 2417 */ 2418 if (s->qdev.conf.blk) { 2419 blkconf_blocksizes(&s->qdev.conf); 2420 } 2421 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2422 s->qdev.type = TYPE_DISK; 2423 if (!s->product) { 2424 s->product = g_strdup("QEMU HARDDISK"); 2425 } 2426 scsi_realize(&s->qdev, errp); 2427 } 2428 2429 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2430 { 2431 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2432 int ret; 2433 2434 if (!dev->conf.blk) { 2435 /* Anonymous BlockBackend for an empty drive. As we put it into 2436 * dev->conf, qdev takes care of detaching on unplug. */ 2437 dev->conf.blk = blk_new(0, BLK_PERM_ALL); 2438 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2439 assert(ret == 0); 2440 } 2441 2442 s->qdev.blocksize = 2048; 2443 s->qdev.type = TYPE_ROM; 2444 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2445 if (!s->product) { 2446 s->product = g_strdup("QEMU CD-ROM"); 2447 } 2448 scsi_realize(&s->qdev, errp); 2449 } 2450 2451 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2452 { 2453 DriveInfo *dinfo; 2454 Error *local_err = NULL; 2455 2456 if (!dev->conf.blk) { 2457 scsi_realize(dev, &local_err); 2458 assert(local_err); 2459 error_propagate(errp, local_err); 2460 return; 2461 } 2462 2463 dinfo = blk_legacy_dinfo(dev->conf.blk); 2464 if (dinfo && dinfo->media_cd) { 2465 scsi_cd_realize(dev, errp); 2466 } else { 2467 scsi_hd_realize(dev, errp); 2468 } 2469 } 2470 2471 static const SCSIReqOps scsi_disk_emulate_reqops = { 2472 .size = sizeof(SCSIDiskReq), 2473 .free_req = scsi_free_request, 2474 .send_command = scsi_disk_emulate_command, 2475 .read_data = scsi_disk_emulate_read_data, 2476 .write_data = scsi_disk_emulate_write_data, 2477 .get_buf = scsi_get_buf, 2478 }; 2479 2480 static const SCSIReqOps scsi_disk_dma_reqops = { 2481 .size = sizeof(SCSIDiskReq), 2482 .free_req = scsi_free_request, 2483 .send_command = scsi_disk_dma_command, 2484 .read_data = scsi_read_data, 2485 .write_data = scsi_write_data, 2486 .get_buf = scsi_get_buf, 2487 .load_request = scsi_disk_load_request, 2488 .save_request = scsi_disk_save_request, 2489 }; 2490 2491 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2492 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2493 [INQUIRY] = &scsi_disk_emulate_reqops, 2494 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2495 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2496 [START_STOP] = &scsi_disk_emulate_reqops, 2497 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2498 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2499 [READ_TOC] = &scsi_disk_emulate_reqops, 2500 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2501 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2502 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2503 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2504 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2505 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2506 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2507 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2508 [SEEK_10] = &scsi_disk_emulate_reqops, 2509 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2510 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2511 [UNMAP] = &scsi_disk_emulate_reqops, 2512 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2513 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2514 [VERIFY_10] = &scsi_disk_emulate_reqops, 2515 [VERIFY_12] = &scsi_disk_emulate_reqops, 2516 [VERIFY_16] = &scsi_disk_emulate_reqops, 2517 2518 [READ_6] = &scsi_disk_dma_reqops, 2519 [READ_10] = &scsi_disk_dma_reqops, 2520 [READ_12] = &scsi_disk_dma_reqops, 2521 [READ_16] = &scsi_disk_dma_reqops, 2522 [WRITE_6] = &scsi_disk_dma_reqops, 2523 [WRITE_10] = &scsi_disk_dma_reqops, 2524 [WRITE_12] = &scsi_disk_dma_reqops, 2525 [WRITE_16] = &scsi_disk_dma_reqops, 2526 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2527 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2528 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2529 }; 2530 2531 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2532 uint8_t *buf, void *hba_private) 2533 { 2534 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2535 SCSIRequest *req; 2536 const SCSIReqOps *ops; 2537 uint8_t command; 2538 2539 command = buf[0]; 2540 ops = scsi_disk_reqops_dispatch[command]; 2541 if (!ops) { 2542 ops = &scsi_disk_emulate_reqops; 2543 } 2544 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2545 2546 #ifdef DEBUG_SCSI 2547 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]); 2548 { 2549 int i; 2550 for (i = 1; i < scsi_cdb_length(buf); i++) { 2551 printf(" 0x%02x", buf[i]); 2552 } 2553 printf("\n"); 2554 } 2555 #endif 2556 2557 return req; 2558 } 2559 2560 #ifdef __linux__ 2561 static int get_device_type(SCSIDiskState *s) 2562 { 2563 uint8_t cmd[16]; 2564 uint8_t buf[36]; 2565 uint8_t sensebuf[8]; 2566 sg_io_hdr_t io_header; 2567 int ret; 2568 2569 memset(cmd, 0, sizeof(cmd)); 2570 memset(buf, 0, sizeof(buf)); 2571 cmd[0] = INQUIRY; 2572 cmd[4] = sizeof(buf); 2573 2574 memset(&io_header, 0, sizeof(io_header)); 2575 io_header.interface_id = 'S'; 2576 io_header.dxfer_direction = SG_DXFER_FROM_DEV; 2577 io_header.dxfer_len = sizeof(buf); 2578 io_header.dxferp = buf; 2579 io_header.cmdp = cmd; 2580 io_header.cmd_len = sizeof(cmd); 2581 io_header.mx_sb_len = sizeof(sensebuf); 2582 io_header.sbp = sensebuf; 2583 io_header.timeout = 6000; /* XXX */ 2584 2585 ret = blk_ioctl(s->qdev.conf.blk, SG_IO, &io_header); 2586 if (ret < 0 || io_header.driver_status || io_header.host_status) { 2587 return -1; 2588 } 2589 s->qdev.type = buf[0]; 2590 if (buf[1] & 0x80) { 2591 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2592 } 2593 return 0; 2594 } 2595 2596 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2597 { 2598 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2599 int sg_version; 2600 int rc; 2601 2602 if (!s->qdev.conf.blk) { 2603 error_setg(errp, "drive property not set"); 2604 return; 2605 } 2606 2607 /* check we are using a driver managing SG_IO (version 3 and after) */ 2608 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2609 if (rc < 0) { 2610 error_setg(errp, "cannot get SG_IO version number: %s. " 2611 "Is this a SCSI device?", 2612 strerror(-rc)); 2613 return; 2614 } 2615 if (sg_version < 30000) { 2616 error_setg(errp, "scsi generic interface too old"); 2617 return; 2618 } 2619 2620 /* get device type from INQUIRY data */ 2621 rc = get_device_type(s); 2622 if (rc < 0) { 2623 error_setg(errp, "INQUIRY failed"); 2624 return; 2625 } 2626 2627 /* Make a guess for the block size, we'll fix it when the guest sends. 2628 * READ CAPACITY. If they don't, they likely would assume these sizes 2629 * anyway. (TODO: check in /sys). 2630 */ 2631 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2632 s->qdev.blocksize = 2048; 2633 } else { 2634 s->qdev.blocksize = 512; 2635 } 2636 2637 /* Makes the scsi-block device not removable by using HMP and QMP eject 2638 * command. 2639 */ 2640 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2641 2642 scsi_realize(&s->qdev, errp); 2643 scsi_generic_read_device_identification(&s->qdev); 2644 } 2645 2646 typedef struct SCSIBlockReq { 2647 SCSIDiskReq req; 2648 sg_io_hdr_t io_header; 2649 2650 /* Selected bytes of the original CDB, copied into our own CDB. */ 2651 uint8_t cmd, cdb1, group_number; 2652 2653 /* CDB passed to SG_IO. */ 2654 uint8_t cdb[16]; 2655 } SCSIBlockReq; 2656 2657 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2658 int64_t offset, QEMUIOVector *iov, 2659 int direction, 2660 BlockCompletionFunc *cb, void *opaque) 2661 { 2662 sg_io_hdr_t *io_header = &req->io_header; 2663 SCSIDiskReq *r = &req->req; 2664 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2665 int nb_logical_blocks; 2666 uint64_t lba; 2667 BlockAIOCB *aiocb; 2668 2669 /* This is not supported yet. It can only happen if the guest does 2670 * reads and writes that are not aligned to one logical sectors 2671 * _and_ cover multiple MemoryRegions. 2672 */ 2673 assert(offset % s->qdev.blocksize == 0); 2674 assert(iov->size % s->qdev.blocksize == 0); 2675 2676 io_header->interface_id = 'S'; 2677 2678 /* The data transfer comes from the QEMUIOVector. */ 2679 io_header->dxfer_direction = direction; 2680 io_header->dxfer_len = iov->size; 2681 io_header->dxferp = (void *)iov->iov; 2682 io_header->iovec_count = iov->niov; 2683 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2684 2685 /* Build a new CDB with the LBA and length patched in, in case 2686 * DMA helpers split the transfer in multiple segments. Do not 2687 * build a CDB smaller than what the guest wanted, and only build 2688 * a larger one if strictly necessary. 2689 */ 2690 io_header->cmdp = req->cdb; 2691 lba = offset / s->qdev.blocksize; 2692 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2693 2694 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2695 /* 6-byte CDB */ 2696 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2697 req->cdb[4] = nb_logical_blocks; 2698 req->cdb[5] = 0; 2699 io_header->cmd_len = 6; 2700 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2701 /* 10-byte CDB */ 2702 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2703 req->cdb[1] = req->cdb1; 2704 stl_be_p(&req->cdb[2], lba); 2705 req->cdb[6] = req->group_number; 2706 stw_be_p(&req->cdb[7], nb_logical_blocks); 2707 req->cdb[9] = 0; 2708 io_header->cmd_len = 10; 2709 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2710 /* 12-byte CDB */ 2711 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2712 req->cdb[1] = req->cdb1; 2713 stl_be_p(&req->cdb[2], lba); 2714 stl_be_p(&req->cdb[6], nb_logical_blocks); 2715 req->cdb[10] = req->group_number; 2716 req->cdb[11] = 0; 2717 io_header->cmd_len = 12; 2718 } else { 2719 /* 16-byte CDB */ 2720 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2721 req->cdb[1] = req->cdb1; 2722 stq_be_p(&req->cdb[2], lba); 2723 stl_be_p(&req->cdb[10], nb_logical_blocks); 2724 req->cdb[14] = req->group_number; 2725 req->cdb[15] = 0; 2726 io_header->cmd_len = 16; 2727 } 2728 2729 /* The rest is as in scsi-generic.c. */ 2730 io_header->mx_sb_len = sizeof(r->req.sense); 2731 io_header->sbp = r->req.sense; 2732 io_header->timeout = UINT_MAX; 2733 io_header->usr_ptr = r; 2734 io_header->flags |= SG_FLAG_DIRECT_IO; 2735 2736 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2737 assert(aiocb != NULL); 2738 return aiocb; 2739 } 2740 2741 static bool scsi_block_no_fua(SCSICommand *cmd) 2742 { 2743 return false; 2744 } 2745 2746 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2747 QEMUIOVector *iov, 2748 BlockCompletionFunc *cb, void *cb_opaque, 2749 void *opaque) 2750 { 2751 SCSIBlockReq *r = opaque; 2752 return scsi_block_do_sgio(r, offset, iov, 2753 SG_DXFER_FROM_DEV, cb, cb_opaque); 2754 } 2755 2756 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2757 QEMUIOVector *iov, 2758 BlockCompletionFunc *cb, void *cb_opaque, 2759 void *opaque) 2760 { 2761 SCSIBlockReq *r = opaque; 2762 return scsi_block_do_sgio(r, offset, iov, 2763 SG_DXFER_TO_DEV, cb, cb_opaque); 2764 } 2765 2766 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2767 { 2768 switch (buf[0]) { 2769 case VERIFY_10: 2770 case VERIFY_12: 2771 case VERIFY_16: 2772 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2773 * for the number of logical blocks specified in the length 2774 * field). For other modes, do not use scatter/gather operation. 2775 */ 2776 if ((buf[1] & 6) == 2) { 2777 return false; 2778 } 2779 break; 2780 2781 case READ_6: 2782 case READ_10: 2783 case READ_12: 2784 case READ_16: 2785 case WRITE_6: 2786 case WRITE_10: 2787 case WRITE_12: 2788 case WRITE_16: 2789 case WRITE_VERIFY_10: 2790 case WRITE_VERIFY_12: 2791 case WRITE_VERIFY_16: 2792 /* MMC writing cannot be done via DMA helpers, because it sometimes 2793 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2794 * We might use scsi_block_dma_reqops as long as no writing commands are 2795 * seen, but performance usually isn't paramount on optical media. So, 2796 * just make scsi-block operate the same as scsi-generic for them. 2797 */ 2798 if (s->qdev.type != TYPE_ROM) { 2799 return false; 2800 } 2801 break; 2802 2803 default: 2804 break; 2805 } 2806 2807 return true; 2808 } 2809 2810 2811 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2812 { 2813 SCSIBlockReq *r = (SCSIBlockReq *)req; 2814 r->cmd = req->cmd.buf[0]; 2815 switch (r->cmd >> 5) { 2816 case 0: 2817 /* 6-byte CDB. */ 2818 r->cdb1 = r->group_number = 0; 2819 break; 2820 case 1: 2821 /* 10-byte CDB. */ 2822 r->cdb1 = req->cmd.buf[1]; 2823 r->group_number = req->cmd.buf[6]; 2824 break; 2825 case 4: 2826 /* 12-byte CDB. */ 2827 r->cdb1 = req->cmd.buf[1]; 2828 r->group_number = req->cmd.buf[10]; 2829 break; 2830 case 5: 2831 /* 16-byte CDB. */ 2832 r->cdb1 = req->cmd.buf[1]; 2833 r->group_number = req->cmd.buf[14]; 2834 break; 2835 default: 2836 abort(); 2837 } 2838 2839 if (r->cdb1 & 0xe0) { 2840 /* Protection information is not supported. */ 2841 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2842 return 0; 2843 } 2844 2845 r->req.status = &r->io_header.status; 2846 return scsi_disk_dma_command(req, buf); 2847 } 2848 2849 static const SCSIReqOps scsi_block_dma_reqops = { 2850 .size = sizeof(SCSIBlockReq), 2851 .free_req = scsi_free_request, 2852 .send_command = scsi_block_dma_command, 2853 .read_data = scsi_read_data, 2854 .write_data = scsi_write_data, 2855 .get_buf = scsi_get_buf, 2856 .load_request = scsi_disk_load_request, 2857 .save_request = scsi_disk_save_request, 2858 }; 2859 2860 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2861 uint32_t lun, uint8_t *buf, 2862 void *hba_private) 2863 { 2864 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2865 2866 if (scsi_block_is_passthrough(s, buf)) { 2867 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2868 hba_private); 2869 } else { 2870 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2871 hba_private); 2872 } 2873 } 2874 2875 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2876 uint8_t *buf, void *hba_private) 2877 { 2878 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2879 2880 if (scsi_block_is_passthrough(s, buf)) { 2881 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2882 } else { 2883 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2884 } 2885 } 2886 2887 #endif 2888 2889 static 2890 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2891 BlockCompletionFunc *cb, void *cb_opaque, 2892 void *opaque) 2893 { 2894 SCSIDiskReq *r = opaque; 2895 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2896 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2897 } 2898 2899 static 2900 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2901 BlockCompletionFunc *cb, void *cb_opaque, 2902 void *opaque) 2903 { 2904 SCSIDiskReq *r = opaque; 2905 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2906 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2907 } 2908 2909 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2910 { 2911 DeviceClass *dc = DEVICE_CLASS(klass); 2912 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2913 2914 dc->fw_name = "disk"; 2915 dc->reset = scsi_disk_reset; 2916 sdc->dma_readv = scsi_dma_readv; 2917 sdc->dma_writev = scsi_dma_writev; 2918 sdc->need_fua_emulation = scsi_is_cmd_fua; 2919 } 2920 2921 static const TypeInfo scsi_disk_base_info = { 2922 .name = TYPE_SCSI_DISK_BASE, 2923 .parent = TYPE_SCSI_DEVICE, 2924 .class_init = scsi_disk_base_class_initfn, 2925 .instance_size = sizeof(SCSIDiskState), 2926 .class_size = sizeof(SCSIDiskClass), 2927 .abstract = true, 2928 }; 2929 2930 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2931 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ 2932 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2933 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2934 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2935 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2936 DEFINE_PROP_STRING("product", SCSIDiskState, product) 2937 2938 static Property scsi_hd_properties[] = { 2939 DEFINE_SCSI_DISK_PROPERTIES(), 2940 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2941 SCSI_DISK_F_REMOVABLE, false), 2942 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2943 SCSI_DISK_F_DPOFUA, false), 2944 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2945 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2946 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2947 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2948 DEFAULT_MAX_UNMAP_SIZE), 2949 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2950 DEFAULT_MAX_IO_SIZE), 2951 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2952 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2953 DEFINE_PROP_END_OF_LIST(), 2954 }; 2955 2956 static const VMStateDescription vmstate_scsi_disk_state = { 2957 .name = "scsi-disk", 2958 .version_id = 1, 2959 .minimum_version_id = 1, 2960 .fields = (VMStateField[]) { 2961 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2962 VMSTATE_BOOL(media_changed, SCSIDiskState), 2963 VMSTATE_BOOL(media_event, SCSIDiskState), 2964 VMSTATE_BOOL(eject_request, SCSIDiskState), 2965 VMSTATE_BOOL(tray_open, SCSIDiskState), 2966 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2967 VMSTATE_END_OF_LIST() 2968 } 2969 }; 2970 2971 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 2972 { 2973 DeviceClass *dc = DEVICE_CLASS(klass); 2974 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2975 2976 sc->realize = scsi_hd_realize; 2977 sc->alloc_req = scsi_new_request; 2978 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 2979 dc->desc = "virtual SCSI disk"; 2980 dc->props = scsi_hd_properties; 2981 dc->vmsd = &vmstate_scsi_disk_state; 2982 } 2983 2984 static const TypeInfo scsi_hd_info = { 2985 .name = "scsi-hd", 2986 .parent = TYPE_SCSI_DISK_BASE, 2987 .class_init = scsi_hd_class_initfn, 2988 }; 2989 2990 static Property scsi_cd_properties[] = { 2991 DEFINE_SCSI_DISK_PROPERTIES(), 2992 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2993 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2994 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2995 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2996 DEFAULT_MAX_IO_SIZE), 2997 DEFINE_PROP_END_OF_LIST(), 2998 }; 2999 3000 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3001 { 3002 DeviceClass *dc = DEVICE_CLASS(klass); 3003 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3004 3005 sc->realize = scsi_cd_realize; 3006 sc->alloc_req = scsi_new_request; 3007 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3008 dc->desc = "virtual SCSI CD-ROM"; 3009 dc->props = scsi_cd_properties; 3010 dc->vmsd = &vmstate_scsi_disk_state; 3011 } 3012 3013 static const TypeInfo scsi_cd_info = { 3014 .name = "scsi-cd", 3015 .parent = TYPE_SCSI_DISK_BASE, 3016 .class_init = scsi_cd_class_initfn, 3017 }; 3018 3019 #ifdef __linux__ 3020 static Property scsi_block_properties[] = { 3021 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3022 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3023 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3024 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3025 DEFINE_PROP_END_OF_LIST(), 3026 }; 3027 3028 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3029 { 3030 DeviceClass *dc = DEVICE_CLASS(klass); 3031 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3032 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3033 3034 sc->realize = scsi_block_realize; 3035 sc->alloc_req = scsi_block_new_request; 3036 sc->parse_cdb = scsi_block_parse_cdb; 3037 sdc->dma_readv = scsi_block_dma_readv; 3038 sdc->dma_writev = scsi_block_dma_writev; 3039 sdc->need_fua_emulation = scsi_block_no_fua; 3040 dc->desc = "SCSI block device passthrough"; 3041 dc->props = scsi_block_properties; 3042 dc->vmsd = &vmstate_scsi_disk_state; 3043 } 3044 3045 static const TypeInfo scsi_block_info = { 3046 .name = "scsi-block", 3047 .parent = TYPE_SCSI_DISK_BASE, 3048 .class_init = scsi_block_class_initfn, 3049 }; 3050 #endif 3051 3052 static Property scsi_disk_properties[] = { 3053 DEFINE_SCSI_DISK_PROPERTIES(), 3054 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3055 SCSI_DISK_F_REMOVABLE, false), 3056 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3057 SCSI_DISK_F_DPOFUA, false), 3058 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3059 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3060 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3061 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3062 DEFAULT_MAX_UNMAP_SIZE), 3063 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3064 DEFAULT_MAX_IO_SIZE), 3065 DEFINE_PROP_END_OF_LIST(), 3066 }; 3067 3068 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3069 { 3070 DeviceClass *dc = DEVICE_CLASS(klass); 3071 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3072 3073 sc->realize = scsi_disk_realize; 3074 sc->alloc_req = scsi_new_request; 3075 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3076 dc->fw_name = "disk"; 3077 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3078 dc->reset = scsi_disk_reset; 3079 dc->props = scsi_disk_properties; 3080 dc->vmsd = &vmstate_scsi_disk_state; 3081 } 3082 3083 static const TypeInfo scsi_disk_info = { 3084 .name = "scsi-disk", 3085 .parent = TYPE_SCSI_DISK_BASE, 3086 .class_init = scsi_disk_class_initfn, 3087 }; 3088 3089 static void scsi_disk_register_types(void) 3090 { 3091 type_register_static(&scsi_disk_base_info); 3092 type_register_static(&scsi_hd_info); 3093 type_register_static(&scsi_cd_info); 3094 #ifdef __linux__ 3095 type_register_static(&scsi_block_info); 3096 #endif 3097 type_register_static(&scsi_disk_info); 3098 } 3099 3100 type_init(scsi_disk_register_types) 3101