1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 //#define DEBUG_SCSI 23 24 #ifdef DEBUG_SCSI 25 #define DPRINTF(fmt, ...) \ 26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0) 27 #else 28 #define DPRINTF(fmt, ...) do {} while(0) 29 #endif 30 31 #include "qemu/osdep.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 #include "hw/scsi/scsi.h" 35 #include "scsi/constants.h" 36 #include "sysemu/sysemu.h" 37 #include "sysemu/block-backend.h" 38 #include "sysemu/blockdev.h" 39 #include "hw/block/block.h" 40 #include "sysemu/dma.h" 41 #include "qemu/cutils.h" 42 43 #ifdef __linux 44 #include <scsi/sg.h> 45 #endif 46 47 #define SCSI_WRITE_SAME_MAX 524288 48 #define SCSI_DMA_BUF_SIZE 131072 49 #define SCSI_MAX_INQUIRY_LEN 256 50 #define SCSI_MAX_MODE_LEN 256 51 52 #define DEFAULT_DISCARD_GRANULARITY 4096 53 #define DEFAULT_MAX_UNMAP_SIZE (1 << 30) /* 1 GB */ 54 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 55 56 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 57 58 #define SCSI_DISK_BASE(obj) \ 59 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 60 #define SCSI_DISK_BASE_CLASS(klass) \ 61 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 62 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 63 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 64 65 typedef struct SCSIDiskClass { 66 SCSIDeviceClass parent_class; 67 DMAIOFunc *dma_readv; 68 DMAIOFunc *dma_writev; 69 bool (*need_fua_emulation)(SCSICommand *cmd); 70 } SCSIDiskClass; 71 72 typedef struct SCSIDiskReq { 73 SCSIRequest req; 74 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 75 uint64_t sector; 76 uint32_t sector_count; 77 uint32_t buflen; 78 bool started; 79 bool need_fua_emulation; 80 struct iovec iov; 81 QEMUIOVector qiov; 82 BlockAcctCookie acct; 83 unsigned char *status; 84 } SCSIDiskReq; 85 86 #define SCSI_DISK_F_REMOVABLE 0 87 #define SCSI_DISK_F_DPOFUA 1 88 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 89 90 typedef struct SCSIDiskState 91 { 92 SCSIDevice qdev; 93 uint32_t features; 94 bool media_changed; 95 bool media_event; 96 bool eject_request; 97 uint16_t port_index; 98 uint64_t max_unmap_size; 99 uint64_t max_io_size; 100 QEMUBH *bh; 101 char *version; 102 char *serial; 103 char *vendor; 104 char *product; 105 bool tray_open; 106 bool tray_locked; 107 /* 108 * 0x0000 - rotation rate not reported 109 * 0x0001 - non-rotating medium (SSD) 110 * 0x0002-0x0400 - reserved 111 * 0x0401-0xffe - rotations per minute 112 * 0xffff - reserved 113 */ 114 uint16_t rotation_rate; 115 } SCSIDiskState; 116 117 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 118 119 static void scsi_free_request(SCSIRequest *req) 120 { 121 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 122 123 qemu_vfree(r->iov.iov_base); 124 } 125 126 /* Helper function for command completion with sense. */ 127 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 128 { 129 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n", 130 r->req.tag, sense.key, sense.asc, sense.ascq); 131 scsi_req_build_sense(&r->req, sense); 132 scsi_req_complete(&r->req, CHECK_CONDITION); 133 } 134 135 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 136 { 137 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 138 139 if (!r->iov.iov_base) { 140 r->buflen = size; 141 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 142 } 143 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 144 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 145 } 146 147 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 148 { 149 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 150 151 qemu_put_be64s(f, &r->sector); 152 qemu_put_be32s(f, &r->sector_count); 153 qemu_put_be32s(f, &r->buflen); 154 if (r->buflen) { 155 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 156 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 157 } else if (!req->retry) { 158 uint32_t len = r->iov.iov_len; 159 qemu_put_be32s(f, &len); 160 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 161 } 162 } 163 } 164 165 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 166 { 167 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 168 169 qemu_get_be64s(f, &r->sector); 170 qemu_get_be32s(f, &r->sector_count); 171 qemu_get_be32s(f, &r->buflen); 172 if (r->buflen) { 173 scsi_init_iovec(r, r->buflen); 174 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 175 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 176 } else if (!r->req.retry) { 177 uint32_t len; 178 qemu_get_be32s(f, &len); 179 r->iov.iov_len = len; 180 assert(r->iov.iov_len <= r->buflen); 181 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 182 } 183 } 184 185 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 186 } 187 188 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 189 { 190 if (r->req.io_canceled) { 191 scsi_req_cancel_complete(&r->req); 192 return true; 193 } 194 195 if (ret < 0 || (r->status && *r->status)) { 196 return scsi_handle_rw_error(r, -ret, acct_failed); 197 } 198 199 return false; 200 } 201 202 static void scsi_aio_complete(void *opaque, int ret) 203 { 204 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 205 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 206 207 assert(r->req.aiocb != NULL); 208 r->req.aiocb = NULL; 209 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 210 if (scsi_disk_req_check_error(r, ret, true)) { 211 goto done; 212 } 213 214 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 215 scsi_req_complete(&r->req, GOOD); 216 217 done: 218 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 219 scsi_req_unref(&r->req); 220 } 221 222 static bool scsi_is_cmd_fua(SCSICommand *cmd) 223 { 224 switch (cmd->buf[0]) { 225 case READ_10: 226 case READ_12: 227 case READ_16: 228 case WRITE_10: 229 case WRITE_12: 230 case WRITE_16: 231 return (cmd->buf[1] & 8) != 0; 232 233 case VERIFY_10: 234 case VERIFY_12: 235 case VERIFY_16: 236 case WRITE_VERIFY_10: 237 case WRITE_VERIFY_12: 238 case WRITE_VERIFY_16: 239 return true; 240 241 case READ_6: 242 case WRITE_6: 243 default: 244 return false; 245 } 246 } 247 248 static void scsi_write_do_fua(SCSIDiskReq *r) 249 { 250 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 251 252 assert(r->req.aiocb == NULL); 253 assert(!r->req.io_canceled); 254 255 if (r->need_fua_emulation) { 256 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 257 BLOCK_ACCT_FLUSH); 258 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 259 return; 260 } 261 262 scsi_req_complete(&r->req, GOOD); 263 scsi_req_unref(&r->req); 264 } 265 266 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 267 { 268 assert(r->req.aiocb == NULL); 269 if (scsi_disk_req_check_error(r, ret, false)) { 270 goto done; 271 } 272 273 r->sector += r->sector_count; 274 r->sector_count = 0; 275 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 276 scsi_write_do_fua(r); 277 return; 278 } else { 279 scsi_req_complete(&r->req, GOOD); 280 } 281 282 done: 283 scsi_req_unref(&r->req); 284 } 285 286 static void scsi_dma_complete(void *opaque, int ret) 287 { 288 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 289 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 290 291 assert(r->req.aiocb != NULL); 292 r->req.aiocb = NULL; 293 294 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 295 if (ret < 0) { 296 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 297 } else { 298 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 299 } 300 scsi_dma_complete_noio(r, ret); 301 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 302 } 303 304 static void scsi_read_complete(void * opaque, int ret) 305 { 306 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 307 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 308 int n; 309 310 assert(r->req.aiocb != NULL); 311 r->req.aiocb = NULL; 312 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 313 if (scsi_disk_req_check_error(r, ret, true)) { 314 goto done; 315 } 316 317 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 318 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size); 319 320 n = r->qiov.size / 512; 321 r->sector += n; 322 r->sector_count -= n; 323 scsi_req_data(&r->req, r->qiov.size); 324 325 done: 326 scsi_req_unref(&r->req); 327 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 328 } 329 330 /* Actually issue a read to the block device. */ 331 static void scsi_do_read(SCSIDiskReq *r, int ret) 332 { 333 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 334 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 335 336 assert (r->req.aiocb == NULL); 337 if (scsi_disk_req_check_error(r, ret, false)) { 338 goto done; 339 } 340 341 /* The request is used as the AIO opaque value, so add a ref. */ 342 scsi_req_ref(&r->req); 343 344 if (r->req.sg) { 345 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 346 r->req.resid -= r->req.sg->size; 347 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 348 r->req.sg, r->sector << BDRV_SECTOR_BITS, 349 BDRV_SECTOR_SIZE, 350 sdc->dma_readv, r, scsi_dma_complete, r, 351 DMA_DIRECTION_FROM_DEVICE); 352 } else { 353 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 354 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 355 r->qiov.size, BLOCK_ACCT_READ); 356 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 357 scsi_read_complete, r, r); 358 } 359 360 done: 361 scsi_req_unref(&r->req); 362 } 363 364 static void scsi_do_read_cb(void *opaque, int ret) 365 { 366 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 367 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 368 369 assert (r->req.aiocb != NULL); 370 r->req.aiocb = NULL; 371 372 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 373 if (ret < 0) { 374 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 375 } else { 376 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 377 } 378 scsi_do_read(opaque, ret); 379 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 380 } 381 382 /* Read more data from scsi device into buffer. */ 383 static void scsi_read_data(SCSIRequest *req) 384 { 385 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 386 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 387 bool first; 388 389 DPRINTF("Read sector_count=%d\n", r->sector_count); 390 if (r->sector_count == 0) { 391 /* This also clears the sense buffer for REQUEST SENSE. */ 392 scsi_req_complete(&r->req, GOOD); 393 return; 394 } 395 396 /* No data transfer may already be in progress */ 397 assert(r->req.aiocb == NULL); 398 399 /* The request is used as the AIO opaque value, so add a ref. */ 400 scsi_req_ref(&r->req); 401 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 402 DPRINTF("Data transfer direction invalid\n"); 403 scsi_read_complete(r, -EINVAL); 404 return; 405 } 406 407 if (!blk_is_available(req->dev->conf.blk)) { 408 scsi_read_complete(r, -ENOMEDIUM); 409 return; 410 } 411 412 first = !r->started; 413 r->started = true; 414 if (first && r->need_fua_emulation) { 415 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 416 BLOCK_ACCT_FLUSH); 417 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 418 } else { 419 scsi_do_read(r, 0); 420 } 421 } 422 423 /* 424 * scsi_handle_rw_error has two return values. False means that the error 425 * must be ignored, true means that the error has been processed and the 426 * caller should not do anything else for this request. Note that 427 * scsi_handle_rw_error always manages its reference counts, independent 428 * of the return value. 429 */ 430 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 431 { 432 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 433 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 434 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 435 is_read, error); 436 437 if (action == BLOCK_ERROR_ACTION_REPORT) { 438 if (acct_failed) { 439 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 440 } 441 switch (error) { 442 case 0: 443 /* The command has run, no need to fake sense. */ 444 assert(r->status && *r->status); 445 scsi_req_complete(&r->req, *r->status); 446 break; 447 case ENOMEDIUM: 448 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 449 break; 450 case ENOMEM: 451 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 452 break; 453 case EINVAL: 454 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 455 break; 456 case ENOSPC: 457 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 458 break; 459 default: 460 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 461 break; 462 } 463 } 464 if (!error) { 465 assert(r->status && *r->status); 466 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 467 468 if (error == ECANCELED || error == EAGAIN || error == ENOTCONN || 469 error == 0) { 470 /* These errors are handled by guest. */ 471 scsi_req_complete(&r->req, *r->status); 472 return true; 473 } 474 } 475 476 blk_error_action(s->qdev.conf.blk, action, is_read, error); 477 if (action == BLOCK_ERROR_ACTION_STOP) { 478 scsi_req_retry(&r->req); 479 } 480 return action != BLOCK_ERROR_ACTION_IGNORE; 481 } 482 483 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 484 { 485 uint32_t n; 486 487 assert (r->req.aiocb == NULL); 488 if (scsi_disk_req_check_error(r, ret, false)) { 489 goto done; 490 } 491 492 n = r->qiov.size / 512; 493 r->sector += n; 494 r->sector_count -= n; 495 if (r->sector_count == 0) { 496 scsi_write_do_fua(r); 497 return; 498 } else { 499 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 500 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size); 501 scsi_req_data(&r->req, r->qiov.size); 502 } 503 504 done: 505 scsi_req_unref(&r->req); 506 } 507 508 static void scsi_write_complete(void * opaque, int ret) 509 { 510 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 511 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 512 513 assert (r->req.aiocb != NULL); 514 r->req.aiocb = NULL; 515 516 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 517 if (ret < 0) { 518 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 519 } else { 520 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 521 } 522 scsi_write_complete_noio(r, ret); 523 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 524 } 525 526 static void scsi_write_data(SCSIRequest *req) 527 { 528 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 529 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 530 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 531 532 /* No data transfer may already be in progress */ 533 assert(r->req.aiocb == NULL); 534 535 /* The request is used as the AIO opaque value, so add a ref. */ 536 scsi_req_ref(&r->req); 537 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 538 DPRINTF("Data transfer direction invalid\n"); 539 scsi_write_complete_noio(r, -EINVAL); 540 return; 541 } 542 543 if (!r->req.sg && !r->qiov.size) { 544 /* Called for the first time. Ask the driver to send us more data. */ 545 r->started = true; 546 scsi_write_complete_noio(r, 0); 547 return; 548 } 549 if (!blk_is_available(req->dev->conf.blk)) { 550 scsi_write_complete_noio(r, -ENOMEDIUM); 551 return; 552 } 553 554 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 555 r->req.cmd.buf[0] == VERIFY_16) { 556 if (r->req.sg) { 557 scsi_dma_complete_noio(r, 0); 558 } else { 559 scsi_write_complete_noio(r, 0); 560 } 561 return; 562 } 563 564 if (r->req.sg) { 565 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 566 r->req.resid -= r->req.sg->size; 567 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 568 r->req.sg, r->sector << BDRV_SECTOR_BITS, 569 BDRV_SECTOR_SIZE, 570 sdc->dma_writev, r, scsi_dma_complete, r, 571 DMA_DIRECTION_TO_DEVICE); 572 } else { 573 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 574 r->qiov.size, BLOCK_ACCT_WRITE); 575 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 576 scsi_write_complete, r, r); 577 } 578 } 579 580 /* Return a pointer to the data buffer. */ 581 static uint8_t *scsi_get_buf(SCSIRequest *req) 582 { 583 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 584 585 return (uint8_t *)r->iov.iov_base; 586 } 587 588 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 589 { 590 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 591 int buflen = 0; 592 int start; 593 594 if (req->cmd.buf[1] & 0x1) { 595 /* Vital product data */ 596 uint8_t page_code = req->cmd.buf[2]; 597 598 outbuf[buflen++] = s->qdev.type & 0x1f; 599 outbuf[buflen++] = page_code ; // this page 600 outbuf[buflen++] = 0x00; 601 outbuf[buflen++] = 0x00; 602 start = buflen; 603 604 switch (page_code) { 605 case 0x00: /* Supported page codes, mandatory */ 606 { 607 DPRINTF("Inquiry EVPD[Supported pages] " 608 "buffer size %zd\n", req->cmd.xfer); 609 outbuf[buflen++] = 0x00; // list of supported pages (this page) 610 if (s->serial) { 611 outbuf[buflen++] = 0x80; // unit serial number 612 } 613 outbuf[buflen++] = 0x83; // device identification 614 if (s->qdev.type == TYPE_DISK) { 615 outbuf[buflen++] = 0xb0; // block limits 616 outbuf[buflen++] = 0xb1; /* block device characteristics */ 617 outbuf[buflen++] = 0xb2; // thin provisioning 618 } 619 break; 620 } 621 case 0x80: /* Device serial number, optional */ 622 { 623 int l; 624 625 if (!s->serial) { 626 DPRINTF("Inquiry (EVPD[Serial number] not supported\n"); 627 return -1; 628 } 629 630 l = strlen(s->serial); 631 if (l > 36) { 632 l = 36; 633 } 634 635 DPRINTF("Inquiry EVPD[Serial number] " 636 "buffer size %zd\n", req->cmd.xfer); 637 memcpy(outbuf+buflen, s->serial, l); 638 buflen += l; 639 break; 640 } 641 642 case 0x83: /* Device identification page, mandatory */ 643 { 644 const char *str = s->serial ?: blk_name(s->qdev.conf.blk); 645 int max_len = s->serial ? 20 : 255 - 8; 646 int id_len = strlen(str); 647 648 if (id_len > max_len) { 649 id_len = max_len; 650 } 651 DPRINTF("Inquiry EVPD[Device identification] " 652 "buffer size %zd\n", req->cmd.xfer); 653 654 outbuf[buflen++] = 0x2; // ASCII 655 outbuf[buflen++] = 0; // not officially assigned 656 outbuf[buflen++] = 0; // reserved 657 outbuf[buflen++] = id_len; // length of data following 658 memcpy(outbuf+buflen, str, id_len); 659 buflen += id_len; 660 661 if (s->qdev.wwn) { 662 outbuf[buflen++] = 0x1; // Binary 663 outbuf[buflen++] = 0x3; // NAA 664 outbuf[buflen++] = 0; // reserved 665 outbuf[buflen++] = 8; 666 stq_be_p(&outbuf[buflen], s->qdev.wwn); 667 buflen += 8; 668 } 669 670 if (s->qdev.port_wwn) { 671 outbuf[buflen++] = 0x61; // SAS / Binary 672 outbuf[buflen++] = 0x93; // PIV / Target port / NAA 673 outbuf[buflen++] = 0; // reserved 674 outbuf[buflen++] = 8; 675 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 676 buflen += 8; 677 } 678 679 if (s->port_index) { 680 outbuf[buflen++] = 0x61; // SAS / Binary 681 outbuf[buflen++] = 0x94; // PIV / Target port / relative target port 682 outbuf[buflen++] = 0; // reserved 683 outbuf[buflen++] = 4; 684 stw_be_p(&outbuf[buflen + 2], s->port_index); 685 buflen += 4; 686 } 687 break; 688 } 689 case 0xb0: /* block limits */ 690 { 691 unsigned int unmap_sectors = 692 s->qdev.conf.discard_granularity / s->qdev.blocksize; 693 unsigned int min_io_size = 694 s->qdev.conf.min_io_size / s->qdev.blocksize; 695 unsigned int opt_io_size = 696 s->qdev.conf.opt_io_size / s->qdev.blocksize; 697 unsigned int max_unmap_sectors = 698 s->max_unmap_size / s->qdev.blocksize; 699 unsigned int max_io_sectors = 700 s->max_io_size / s->qdev.blocksize; 701 702 if (s->qdev.type == TYPE_ROM) { 703 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n", 704 page_code); 705 return -1; 706 } 707 if (s->qdev.type == TYPE_DISK) { 708 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 709 int max_io_sectors_blk = 710 max_transfer_blk / s->qdev.blocksize; 711 712 max_io_sectors = 713 MIN_NON_ZERO(max_io_sectors_blk, max_io_sectors); 714 715 /* min_io_size and opt_io_size can't be greater than 716 * max_io_sectors */ 717 if (min_io_size) { 718 min_io_size = MIN(min_io_size, max_io_sectors); 719 } 720 if (opt_io_size) { 721 opt_io_size = MIN(opt_io_size, max_io_sectors); 722 } 723 } 724 /* required VPD size with unmap support */ 725 buflen = 0x40; 726 memset(outbuf + 4, 0, buflen - 4); 727 728 outbuf[4] = 0x1; /* wsnz */ 729 730 /* optimal transfer length granularity */ 731 outbuf[6] = (min_io_size >> 8) & 0xff; 732 outbuf[7] = min_io_size & 0xff; 733 734 /* maximum transfer length */ 735 outbuf[8] = (max_io_sectors >> 24) & 0xff; 736 outbuf[9] = (max_io_sectors >> 16) & 0xff; 737 outbuf[10] = (max_io_sectors >> 8) & 0xff; 738 outbuf[11] = max_io_sectors & 0xff; 739 740 /* optimal transfer length */ 741 outbuf[12] = (opt_io_size >> 24) & 0xff; 742 outbuf[13] = (opt_io_size >> 16) & 0xff; 743 outbuf[14] = (opt_io_size >> 8) & 0xff; 744 outbuf[15] = opt_io_size & 0xff; 745 746 /* max unmap LBA count, default is 1GB */ 747 outbuf[20] = (max_unmap_sectors >> 24) & 0xff; 748 outbuf[21] = (max_unmap_sectors >> 16) & 0xff; 749 outbuf[22] = (max_unmap_sectors >> 8) & 0xff; 750 outbuf[23] = max_unmap_sectors & 0xff; 751 752 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header. */ 753 outbuf[24] = 0; 754 outbuf[25] = 0; 755 outbuf[26] = 0; 756 outbuf[27] = 255; 757 758 /* optimal unmap granularity */ 759 outbuf[28] = (unmap_sectors >> 24) & 0xff; 760 outbuf[29] = (unmap_sectors >> 16) & 0xff; 761 outbuf[30] = (unmap_sectors >> 8) & 0xff; 762 outbuf[31] = unmap_sectors & 0xff; 763 764 /* max write same size */ 765 outbuf[36] = 0; 766 outbuf[37] = 0; 767 outbuf[38] = 0; 768 outbuf[39] = 0; 769 770 outbuf[40] = (max_io_sectors >> 24) & 0xff; 771 outbuf[41] = (max_io_sectors >> 16) & 0xff; 772 outbuf[42] = (max_io_sectors >> 8) & 0xff; 773 outbuf[43] = max_io_sectors & 0xff; 774 break; 775 } 776 case 0xb1: /* block device characteristics */ 777 { 778 buflen = 8; 779 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 780 outbuf[5] = s->rotation_rate & 0xff; 781 outbuf[6] = 0; 782 outbuf[7] = 0; 783 break; 784 } 785 case 0xb2: /* thin provisioning */ 786 { 787 buflen = 8; 788 outbuf[4] = 0; 789 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 790 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 791 outbuf[7] = 0; 792 break; 793 } 794 default: 795 return -1; 796 } 797 /* done with EVPD */ 798 assert(buflen - start <= 255); 799 outbuf[start - 1] = buflen - start; 800 return buflen; 801 } 802 803 /* Standard INQUIRY data */ 804 if (req->cmd.buf[2] != 0) { 805 return -1; 806 } 807 808 /* PAGE CODE == 0 */ 809 buflen = req->cmd.xfer; 810 if (buflen > SCSI_MAX_INQUIRY_LEN) { 811 buflen = SCSI_MAX_INQUIRY_LEN; 812 } 813 814 outbuf[0] = s->qdev.type & 0x1f; 815 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 816 817 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 818 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 819 820 memset(&outbuf[32], 0, 4); 821 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 822 /* 823 * We claim conformance to SPC-3, which is required for guests 824 * to ask for modern features like READ CAPACITY(16) or the 825 * block characteristics VPD page by default. Not all of SPC-3 826 * is actually implemented, but we're good enough. 827 */ 828 outbuf[2] = s->qdev.default_scsi_version; 829 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 830 831 if (buflen > 36) { 832 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 833 } else { 834 /* If the allocation length of CDB is too small, 835 the additional length is not adjusted */ 836 outbuf[4] = 36 - 5; 837 } 838 839 /* Sync data transfer and TCQ. */ 840 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 841 return buflen; 842 } 843 844 static inline bool media_is_dvd(SCSIDiskState *s) 845 { 846 uint64_t nb_sectors; 847 if (s->qdev.type != TYPE_ROM) { 848 return false; 849 } 850 if (!blk_is_available(s->qdev.conf.blk)) { 851 return false; 852 } 853 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 854 return nb_sectors > CD_MAX_SECTORS; 855 } 856 857 static inline bool media_is_cd(SCSIDiskState *s) 858 { 859 uint64_t nb_sectors; 860 if (s->qdev.type != TYPE_ROM) { 861 return false; 862 } 863 if (!blk_is_available(s->qdev.conf.blk)) { 864 return false; 865 } 866 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 867 return nb_sectors <= CD_MAX_SECTORS; 868 } 869 870 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 871 uint8_t *outbuf) 872 { 873 uint8_t type = r->req.cmd.buf[1] & 7; 874 875 if (s->qdev.type != TYPE_ROM) { 876 return -1; 877 } 878 879 /* Types 1/2 are only defined for Blu-Ray. */ 880 if (type != 0) { 881 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 882 return -1; 883 } 884 885 memset(outbuf, 0, 34); 886 outbuf[1] = 32; 887 outbuf[2] = 0xe; /* last session complete, disc finalized */ 888 outbuf[3] = 1; /* first track on disc */ 889 outbuf[4] = 1; /* # of sessions */ 890 outbuf[5] = 1; /* first track of last session */ 891 outbuf[6] = 1; /* last track of last session */ 892 outbuf[7] = 0x20; /* unrestricted use */ 893 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 894 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 895 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 896 /* 24-31: disc bar code */ 897 /* 32: disc application code */ 898 /* 33: number of OPC tables */ 899 900 return 34; 901 } 902 903 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 904 uint8_t *outbuf) 905 { 906 static const int rds_caps_size[5] = { 907 [0] = 2048 + 4, 908 [1] = 4 + 4, 909 [3] = 188 + 4, 910 [4] = 2048 + 4, 911 }; 912 913 uint8_t media = r->req.cmd.buf[1]; 914 uint8_t layer = r->req.cmd.buf[6]; 915 uint8_t format = r->req.cmd.buf[7]; 916 int size = -1; 917 918 if (s->qdev.type != TYPE_ROM) { 919 return -1; 920 } 921 if (media != 0) { 922 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 923 return -1; 924 } 925 926 if (format != 0xff) { 927 if (!blk_is_available(s->qdev.conf.blk)) { 928 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 929 return -1; 930 } 931 if (media_is_cd(s)) { 932 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 933 return -1; 934 } 935 if (format >= ARRAY_SIZE(rds_caps_size)) { 936 return -1; 937 } 938 size = rds_caps_size[format]; 939 memset(outbuf, 0, size); 940 } 941 942 switch (format) { 943 case 0x00: { 944 /* Physical format information */ 945 uint64_t nb_sectors; 946 if (layer != 0) { 947 goto fail; 948 } 949 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 950 951 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 952 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 953 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 954 outbuf[7] = 0; /* default densities */ 955 956 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 957 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 958 break; 959 } 960 961 case 0x01: /* DVD copyright information, all zeros */ 962 break; 963 964 case 0x03: /* BCA information - invalid field for no BCA info */ 965 return -1; 966 967 case 0x04: /* DVD disc manufacturing information, all zeros */ 968 break; 969 970 case 0xff: { /* List capabilities */ 971 int i; 972 size = 4; 973 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 974 if (!rds_caps_size[i]) { 975 continue; 976 } 977 outbuf[size] = i; 978 outbuf[size + 1] = 0x40; /* Not writable, readable */ 979 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 980 size += 4; 981 } 982 break; 983 } 984 985 default: 986 return -1; 987 } 988 989 /* Size of buffer, not including 2 byte size field */ 990 stw_be_p(outbuf, size - 2); 991 return size; 992 993 fail: 994 return -1; 995 } 996 997 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 998 { 999 uint8_t event_code, media_status; 1000 1001 media_status = 0; 1002 if (s->tray_open) { 1003 media_status = MS_TRAY_OPEN; 1004 } else if (blk_is_inserted(s->qdev.conf.blk)) { 1005 media_status = MS_MEDIA_PRESENT; 1006 } 1007 1008 /* Event notification descriptor */ 1009 event_code = MEC_NO_CHANGE; 1010 if (media_status != MS_TRAY_OPEN) { 1011 if (s->media_event) { 1012 event_code = MEC_NEW_MEDIA; 1013 s->media_event = false; 1014 } else if (s->eject_request) { 1015 event_code = MEC_EJECT_REQUESTED; 1016 s->eject_request = false; 1017 } 1018 } 1019 1020 outbuf[0] = event_code; 1021 outbuf[1] = media_status; 1022 1023 /* These fields are reserved, just clear them. */ 1024 outbuf[2] = 0; 1025 outbuf[3] = 0; 1026 return 4; 1027 } 1028 1029 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 1030 uint8_t *outbuf) 1031 { 1032 int size; 1033 uint8_t *buf = r->req.cmd.buf; 1034 uint8_t notification_class_request = buf[4]; 1035 if (s->qdev.type != TYPE_ROM) { 1036 return -1; 1037 } 1038 if ((buf[1] & 1) == 0) { 1039 /* asynchronous */ 1040 return -1; 1041 } 1042 1043 size = 4; 1044 outbuf[0] = outbuf[1] = 0; 1045 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1046 if (notification_class_request & (1 << GESN_MEDIA)) { 1047 outbuf[2] = GESN_MEDIA; 1048 size += scsi_event_status_media(s, &outbuf[size]); 1049 } else { 1050 outbuf[2] = 0x80; 1051 } 1052 stw_be_p(outbuf, size - 4); 1053 return size; 1054 } 1055 1056 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1057 { 1058 int current; 1059 1060 if (s->qdev.type != TYPE_ROM) { 1061 return -1; 1062 } 1063 1064 if (media_is_dvd(s)) { 1065 current = MMC_PROFILE_DVD_ROM; 1066 } else if (media_is_cd(s)) { 1067 current = MMC_PROFILE_CD_ROM; 1068 } else { 1069 current = MMC_PROFILE_NONE; 1070 } 1071 1072 memset(outbuf, 0, 40); 1073 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1074 stw_be_p(&outbuf[6], current); 1075 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1076 outbuf[10] = 0x03; /* persistent, current */ 1077 outbuf[11] = 8; /* two profiles */ 1078 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1079 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1080 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1081 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1082 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1083 stw_be_p(&outbuf[20], 1); 1084 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1085 outbuf[23] = 8; 1086 stl_be_p(&outbuf[24], 1); /* SCSI */ 1087 outbuf[28] = 1; /* DBE = 1, mandatory */ 1088 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1089 stw_be_p(&outbuf[32], 3); 1090 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1091 outbuf[35] = 4; 1092 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1093 /* TODO: Random readable, CD read, DVD read, drive serial number, 1094 power management */ 1095 return 40; 1096 } 1097 1098 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1099 { 1100 if (s->qdev.type != TYPE_ROM) { 1101 return -1; 1102 } 1103 memset(outbuf, 0, 8); 1104 outbuf[5] = 1; /* CD-ROM */ 1105 return 8; 1106 } 1107 1108 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1109 int page_control) 1110 { 1111 static const int mode_sense_valid[0x3f] = { 1112 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1113 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1114 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1115 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1116 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1117 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1118 }; 1119 1120 uint8_t *p = *p_outbuf + 2; 1121 int length; 1122 1123 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1124 return -1; 1125 } 1126 1127 /* 1128 * If Changeable Values are requested, a mask denoting those mode parameters 1129 * that are changeable shall be returned. As we currently don't support 1130 * parameter changes via MODE_SELECT all bits are returned set to zero. 1131 * The buffer was already menset to zero by the caller of this function. 1132 * 1133 * The offsets here are off by two compared to the descriptions in the 1134 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1135 * but it is done so that offsets are consistent within our implementation 1136 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1137 * 2-byte and 4-byte headers. 1138 */ 1139 switch (page) { 1140 case MODE_PAGE_HD_GEOMETRY: 1141 length = 0x16; 1142 if (page_control == 1) { /* Changeable Values */ 1143 break; 1144 } 1145 /* if a geometry hint is available, use it */ 1146 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1147 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1148 p[2] = s->qdev.conf.cyls & 0xff; 1149 p[3] = s->qdev.conf.heads & 0xff; 1150 /* Write precomp start cylinder, disabled */ 1151 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1152 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1153 p[6] = s->qdev.conf.cyls & 0xff; 1154 /* Reduced current start cylinder, disabled */ 1155 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1156 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1157 p[9] = s->qdev.conf.cyls & 0xff; 1158 /* Device step rate [ns], 200ns */ 1159 p[10] = 0; 1160 p[11] = 200; 1161 /* Landing zone cylinder */ 1162 p[12] = 0xff; 1163 p[13] = 0xff; 1164 p[14] = 0xff; 1165 /* Medium rotation rate [rpm], 5400 rpm */ 1166 p[18] = (5400 >> 8) & 0xff; 1167 p[19] = 5400 & 0xff; 1168 break; 1169 1170 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1171 length = 0x1e; 1172 if (page_control == 1) { /* Changeable Values */ 1173 break; 1174 } 1175 /* Transfer rate [kbit/s], 5Mbit/s */ 1176 p[0] = 5000 >> 8; 1177 p[1] = 5000 & 0xff; 1178 /* if a geometry hint is available, use it */ 1179 p[2] = s->qdev.conf.heads & 0xff; 1180 p[3] = s->qdev.conf.secs & 0xff; 1181 p[4] = s->qdev.blocksize >> 8; 1182 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1183 p[7] = s->qdev.conf.cyls & 0xff; 1184 /* Write precomp start cylinder, disabled */ 1185 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1186 p[9] = s->qdev.conf.cyls & 0xff; 1187 /* Reduced current start cylinder, disabled */ 1188 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1189 p[11] = s->qdev.conf.cyls & 0xff; 1190 /* Device step rate [100us], 100us */ 1191 p[12] = 0; 1192 p[13] = 1; 1193 /* Device step pulse width [us], 1us */ 1194 p[14] = 1; 1195 /* Device head settle delay [100us], 100us */ 1196 p[15] = 0; 1197 p[16] = 1; 1198 /* Motor on delay [0.1s], 0.1s */ 1199 p[17] = 1; 1200 /* Motor off delay [0.1s], 0.1s */ 1201 p[18] = 1; 1202 /* Medium rotation rate [rpm], 5400 rpm */ 1203 p[26] = (5400 >> 8) & 0xff; 1204 p[27] = 5400 & 0xff; 1205 break; 1206 1207 case MODE_PAGE_CACHING: 1208 length = 0x12; 1209 if (page_control == 1 || /* Changeable Values */ 1210 blk_enable_write_cache(s->qdev.conf.blk)) { 1211 p[0] = 4; /* WCE */ 1212 } 1213 break; 1214 1215 case MODE_PAGE_R_W_ERROR: 1216 length = 10; 1217 if (page_control == 1) { /* Changeable Values */ 1218 break; 1219 } 1220 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1221 if (s->qdev.type == TYPE_ROM) { 1222 p[1] = 0x20; /* Read Retry Count */ 1223 } 1224 break; 1225 1226 case MODE_PAGE_AUDIO_CTL: 1227 length = 14; 1228 break; 1229 1230 case MODE_PAGE_CAPABILITIES: 1231 length = 0x14; 1232 if (page_control == 1) { /* Changeable Values */ 1233 break; 1234 } 1235 1236 p[0] = 0x3b; /* CD-R & CD-RW read */ 1237 p[1] = 0; /* Writing not supported */ 1238 p[2] = 0x7f; /* Audio, composite, digital out, 1239 mode 2 form 1&2, multi session */ 1240 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1241 RW corrected, C2 errors, ISRC, 1242 UPC, Bar code */ 1243 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1244 /* Locking supported, jumper present, eject, tray */ 1245 p[5] = 0; /* no volume & mute control, no 1246 changer */ 1247 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1248 p[7] = (50 * 176) & 0xff; 1249 p[8] = 2 >> 8; /* Two volume levels */ 1250 p[9] = 2 & 0xff; 1251 p[10] = 2048 >> 8; /* 2M buffer */ 1252 p[11] = 2048 & 0xff; 1253 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1254 p[13] = (16 * 176) & 0xff; 1255 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1256 p[17] = (16 * 176) & 0xff; 1257 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1258 p[19] = (16 * 176) & 0xff; 1259 break; 1260 1261 default: 1262 return -1; 1263 } 1264 1265 assert(length < 256); 1266 (*p_outbuf)[0] = page; 1267 (*p_outbuf)[1] = length; 1268 *p_outbuf += length + 2; 1269 return length + 2; 1270 } 1271 1272 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1273 { 1274 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1275 uint64_t nb_sectors; 1276 bool dbd; 1277 int page, buflen, ret, page_control; 1278 uint8_t *p; 1279 uint8_t dev_specific_param; 1280 1281 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1282 page = r->req.cmd.buf[2] & 0x3f; 1283 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1284 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n", 1285 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control); 1286 memset(outbuf, 0, r->req.cmd.xfer); 1287 p = outbuf; 1288 1289 if (s->qdev.type == TYPE_DISK) { 1290 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1291 if (blk_is_read_only(s->qdev.conf.blk)) { 1292 dev_specific_param |= 0x80; /* Readonly. */ 1293 } 1294 } else { 1295 /* MMC prescribes that CD/DVD drives have no block descriptors, 1296 * and defines no device-specific parameter. */ 1297 dev_specific_param = 0x00; 1298 dbd = true; 1299 } 1300 1301 if (r->req.cmd.buf[0] == MODE_SENSE) { 1302 p[1] = 0; /* Default media type. */ 1303 p[2] = dev_specific_param; 1304 p[3] = 0; /* Block descriptor length. */ 1305 p += 4; 1306 } else { /* MODE_SENSE_10 */ 1307 p[2] = 0; /* Default media type. */ 1308 p[3] = dev_specific_param; 1309 p[6] = p[7] = 0; /* Block descriptor length. */ 1310 p += 8; 1311 } 1312 1313 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1314 if (!dbd && nb_sectors) { 1315 if (r->req.cmd.buf[0] == MODE_SENSE) { 1316 outbuf[3] = 8; /* Block descriptor length */ 1317 } else { /* MODE_SENSE_10 */ 1318 outbuf[7] = 8; /* Block descriptor length */ 1319 } 1320 nb_sectors /= (s->qdev.blocksize / 512); 1321 if (nb_sectors > 0xffffff) { 1322 nb_sectors = 0; 1323 } 1324 p[0] = 0; /* media density code */ 1325 p[1] = (nb_sectors >> 16) & 0xff; 1326 p[2] = (nb_sectors >> 8) & 0xff; 1327 p[3] = nb_sectors & 0xff; 1328 p[4] = 0; /* reserved */ 1329 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1330 p[6] = s->qdev.blocksize >> 8; 1331 p[7] = 0; 1332 p += 8; 1333 } 1334 1335 if (page_control == 3) { 1336 /* Saved Values */ 1337 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1338 return -1; 1339 } 1340 1341 if (page == 0x3f) { 1342 for (page = 0; page <= 0x3e; page++) { 1343 mode_sense_page(s, page, &p, page_control); 1344 } 1345 } else { 1346 ret = mode_sense_page(s, page, &p, page_control); 1347 if (ret == -1) { 1348 return -1; 1349 } 1350 } 1351 1352 buflen = p - outbuf; 1353 /* 1354 * The mode data length field specifies the length in bytes of the 1355 * following data that is available to be transferred. The mode data 1356 * length does not include itself. 1357 */ 1358 if (r->req.cmd.buf[0] == MODE_SENSE) { 1359 outbuf[0] = buflen - 1; 1360 } else { /* MODE_SENSE_10 */ 1361 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1362 outbuf[1] = (buflen - 2) & 0xff; 1363 } 1364 return buflen; 1365 } 1366 1367 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1368 { 1369 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1370 int start_track, format, msf, toclen; 1371 uint64_t nb_sectors; 1372 1373 msf = req->cmd.buf[1] & 2; 1374 format = req->cmd.buf[2] & 0xf; 1375 start_track = req->cmd.buf[6]; 1376 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1377 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1); 1378 nb_sectors /= s->qdev.blocksize / 512; 1379 switch (format) { 1380 case 0: 1381 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1382 break; 1383 case 1: 1384 /* multi session : only a single session defined */ 1385 toclen = 12; 1386 memset(outbuf, 0, 12); 1387 outbuf[1] = 0x0a; 1388 outbuf[2] = 0x01; 1389 outbuf[3] = 0x01; 1390 break; 1391 case 2: 1392 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1393 break; 1394 default: 1395 return -1; 1396 } 1397 return toclen; 1398 } 1399 1400 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1401 { 1402 SCSIRequest *req = &r->req; 1403 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1404 bool start = req->cmd.buf[4] & 1; 1405 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1406 int pwrcnd = req->cmd.buf[4] & 0xf0; 1407 1408 if (pwrcnd) { 1409 /* eject/load only happens for power condition == 0 */ 1410 return 0; 1411 } 1412 1413 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1414 if (!start && !s->tray_open && s->tray_locked) { 1415 scsi_check_condition(r, 1416 blk_is_inserted(s->qdev.conf.blk) 1417 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1418 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1419 return -1; 1420 } 1421 1422 if (s->tray_open != !start) { 1423 blk_eject(s->qdev.conf.blk, !start); 1424 s->tray_open = !start; 1425 } 1426 } 1427 return 0; 1428 } 1429 1430 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1431 { 1432 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1433 int buflen = r->iov.iov_len; 1434 1435 if (buflen) { 1436 DPRINTF("Read buf_len=%d\n", buflen); 1437 r->iov.iov_len = 0; 1438 r->started = true; 1439 scsi_req_data(&r->req, buflen); 1440 return; 1441 } 1442 1443 /* This also clears the sense buffer for REQUEST SENSE. */ 1444 scsi_req_complete(&r->req, GOOD); 1445 } 1446 1447 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1448 uint8_t *inbuf, int inlen) 1449 { 1450 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1451 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1452 uint8_t *p; 1453 int len, expected_len, changeable_len, i; 1454 1455 /* The input buffer does not include the page header, so it is 1456 * off by 2 bytes. 1457 */ 1458 expected_len = inlen + 2; 1459 if (expected_len > SCSI_MAX_MODE_LEN) { 1460 return -1; 1461 } 1462 1463 p = mode_current; 1464 memset(mode_current, 0, inlen + 2); 1465 len = mode_sense_page(s, page, &p, 0); 1466 if (len < 0 || len != expected_len) { 1467 return -1; 1468 } 1469 1470 p = mode_changeable; 1471 memset(mode_changeable, 0, inlen + 2); 1472 changeable_len = mode_sense_page(s, page, &p, 1); 1473 assert(changeable_len == len); 1474 1475 /* Check that unchangeable bits are the same as what MODE SENSE 1476 * would return. 1477 */ 1478 for (i = 2; i < len; i++) { 1479 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1480 return -1; 1481 } 1482 } 1483 return 0; 1484 } 1485 1486 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1487 { 1488 switch (page) { 1489 case MODE_PAGE_CACHING: 1490 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1491 break; 1492 1493 default: 1494 break; 1495 } 1496 } 1497 1498 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1499 { 1500 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1501 1502 while (len > 0) { 1503 int page, subpage, page_len; 1504 1505 /* Parse both possible formats for the mode page headers. */ 1506 page = p[0] & 0x3f; 1507 if (p[0] & 0x40) { 1508 if (len < 4) { 1509 goto invalid_param_len; 1510 } 1511 subpage = p[1]; 1512 page_len = lduw_be_p(&p[2]); 1513 p += 4; 1514 len -= 4; 1515 } else { 1516 if (len < 2) { 1517 goto invalid_param_len; 1518 } 1519 subpage = 0; 1520 page_len = p[1]; 1521 p += 2; 1522 len -= 2; 1523 } 1524 1525 if (subpage) { 1526 goto invalid_param; 1527 } 1528 if (page_len > len) { 1529 goto invalid_param_len; 1530 } 1531 1532 if (!change) { 1533 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1534 goto invalid_param; 1535 } 1536 } else { 1537 scsi_disk_apply_mode_select(s, page, p); 1538 } 1539 1540 p += page_len; 1541 len -= page_len; 1542 } 1543 return 0; 1544 1545 invalid_param: 1546 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1547 return -1; 1548 1549 invalid_param_len: 1550 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1551 return -1; 1552 } 1553 1554 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1555 { 1556 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1557 uint8_t *p = inbuf; 1558 int cmd = r->req.cmd.buf[0]; 1559 int len = r->req.cmd.xfer; 1560 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1561 int bd_len; 1562 int pass; 1563 1564 /* We only support PF=1, SP=0. */ 1565 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1566 goto invalid_field; 1567 } 1568 1569 if (len < hdr_len) { 1570 goto invalid_param_len; 1571 } 1572 1573 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1574 len -= hdr_len; 1575 p += hdr_len; 1576 if (len < bd_len) { 1577 goto invalid_param_len; 1578 } 1579 if (bd_len != 0 && bd_len != 8) { 1580 goto invalid_param; 1581 } 1582 1583 len -= bd_len; 1584 p += bd_len; 1585 1586 /* Ensure no change is made if there is an error! */ 1587 for (pass = 0; pass < 2; pass++) { 1588 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1589 assert(pass == 0); 1590 return; 1591 } 1592 } 1593 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1594 /* The request is used as the AIO opaque value, so add a ref. */ 1595 scsi_req_ref(&r->req); 1596 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1597 BLOCK_ACCT_FLUSH); 1598 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1599 return; 1600 } 1601 1602 scsi_req_complete(&r->req, GOOD); 1603 return; 1604 1605 invalid_param: 1606 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1607 return; 1608 1609 invalid_param_len: 1610 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1611 return; 1612 1613 invalid_field: 1614 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1615 } 1616 1617 static inline bool check_lba_range(SCSIDiskState *s, 1618 uint64_t sector_num, uint32_t nb_sectors) 1619 { 1620 /* 1621 * The first line tests that no overflow happens when computing the last 1622 * sector. The second line tests that the last accessed sector is in 1623 * range. 1624 * 1625 * Careful, the computations should not underflow for nb_sectors == 0, 1626 * and a 0-block read to the first LBA beyond the end of device is 1627 * valid. 1628 */ 1629 return (sector_num <= sector_num + nb_sectors && 1630 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1631 } 1632 1633 typedef struct UnmapCBData { 1634 SCSIDiskReq *r; 1635 uint8_t *inbuf; 1636 int count; 1637 } UnmapCBData; 1638 1639 static void scsi_unmap_complete(void *opaque, int ret); 1640 1641 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1642 { 1643 SCSIDiskReq *r = data->r; 1644 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1645 uint64_t sector_num; 1646 uint32_t nb_sectors; 1647 1648 assert(r->req.aiocb == NULL); 1649 if (scsi_disk_req_check_error(r, ret, false)) { 1650 goto done; 1651 } 1652 1653 if (data->count > 0) { 1654 sector_num = ldq_be_p(&data->inbuf[0]); 1655 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1656 if (!check_lba_range(s, sector_num, nb_sectors)) { 1657 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1658 goto done; 1659 } 1660 1661 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1662 sector_num * s->qdev.blocksize, 1663 nb_sectors * s->qdev.blocksize, 1664 scsi_unmap_complete, data); 1665 data->count--; 1666 data->inbuf += 16; 1667 return; 1668 } 1669 1670 scsi_req_complete(&r->req, GOOD); 1671 1672 done: 1673 scsi_req_unref(&r->req); 1674 g_free(data); 1675 } 1676 1677 static void scsi_unmap_complete(void *opaque, int ret) 1678 { 1679 UnmapCBData *data = opaque; 1680 SCSIDiskReq *r = data->r; 1681 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1682 1683 assert(r->req.aiocb != NULL); 1684 r->req.aiocb = NULL; 1685 1686 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1687 scsi_unmap_complete_noio(data, ret); 1688 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1689 } 1690 1691 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1692 { 1693 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1694 uint8_t *p = inbuf; 1695 int len = r->req.cmd.xfer; 1696 UnmapCBData *data; 1697 1698 /* Reject ANCHOR=1. */ 1699 if (r->req.cmd.buf[1] & 0x1) { 1700 goto invalid_field; 1701 } 1702 1703 if (len < 8) { 1704 goto invalid_param_len; 1705 } 1706 if (len < lduw_be_p(&p[0]) + 2) { 1707 goto invalid_param_len; 1708 } 1709 if (len < lduw_be_p(&p[2]) + 8) { 1710 goto invalid_param_len; 1711 } 1712 if (lduw_be_p(&p[2]) & 15) { 1713 goto invalid_param_len; 1714 } 1715 1716 if (blk_is_read_only(s->qdev.conf.blk)) { 1717 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1718 return; 1719 } 1720 1721 data = g_new0(UnmapCBData, 1); 1722 data->r = r; 1723 data->inbuf = &p[8]; 1724 data->count = lduw_be_p(&p[2]) >> 4; 1725 1726 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1727 scsi_req_ref(&r->req); 1728 scsi_unmap_complete_noio(data, 0); 1729 return; 1730 1731 invalid_param_len: 1732 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1733 return; 1734 1735 invalid_field: 1736 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1737 } 1738 1739 typedef struct WriteSameCBData { 1740 SCSIDiskReq *r; 1741 int64_t sector; 1742 int nb_sectors; 1743 QEMUIOVector qiov; 1744 struct iovec iov; 1745 } WriteSameCBData; 1746 1747 static void scsi_write_same_complete(void *opaque, int ret) 1748 { 1749 WriteSameCBData *data = opaque; 1750 SCSIDiskReq *r = data->r; 1751 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1752 1753 assert(r->req.aiocb != NULL); 1754 r->req.aiocb = NULL; 1755 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1756 if (scsi_disk_req_check_error(r, ret, true)) { 1757 goto done; 1758 } 1759 1760 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1761 1762 data->nb_sectors -= data->iov.iov_len / 512; 1763 data->sector += data->iov.iov_len / 512; 1764 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1765 if (data->iov.iov_len) { 1766 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1767 data->iov.iov_len, BLOCK_ACCT_WRITE); 1768 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1769 * where final qiov may need smaller size */ 1770 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1771 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1772 data->sector << BDRV_SECTOR_BITS, 1773 &data->qiov, 0, 1774 scsi_write_same_complete, data); 1775 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1776 return; 1777 } 1778 1779 scsi_req_complete(&r->req, GOOD); 1780 1781 done: 1782 scsi_req_unref(&r->req); 1783 qemu_vfree(data->iov.iov_base); 1784 g_free(data); 1785 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1786 } 1787 1788 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1789 { 1790 SCSIRequest *req = &r->req; 1791 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1792 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1793 WriteSameCBData *data; 1794 uint8_t *buf; 1795 int i; 1796 1797 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1798 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1799 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1800 return; 1801 } 1802 1803 if (blk_is_read_only(s->qdev.conf.blk)) { 1804 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1805 return; 1806 } 1807 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1808 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1809 return; 1810 } 1811 1812 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1813 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1814 1815 /* The request is used as the AIO opaque value, so add a ref. */ 1816 scsi_req_ref(&r->req); 1817 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1818 nb_sectors * s->qdev.blocksize, 1819 BLOCK_ACCT_WRITE); 1820 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1821 r->req.cmd.lba * s->qdev.blocksize, 1822 nb_sectors * s->qdev.blocksize, 1823 flags, scsi_aio_complete, r); 1824 return; 1825 } 1826 1827 data = g_new0(WriteSameCBData, 1); 1828 data->r = r; 1829 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1830 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1831 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1832 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1833 data->iov.iov_len); 1834 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1835 1836 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1837 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1838 } 1839 1840 scsi_req_ref(&r->req); 1841 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1842 data->iov.iov_len, BLOCK_ACCT_WRITE); 1843 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1844 data->sector << BDRV_SECTOR_BITS, 1845 &data->qiov, 0, 1846 scsi_write_same_complete, data); 1847 } 1848 1849 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1850 { 1851 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1852 1853 if (r->iov.iov_len) { 1854 int buflen = r->iov.iov_len; 1855 DPRINTF("Write buf_len=%d\n", buflen); 1856 r->iov.iov_len = 0; 1857 scsi_req_data(&r->req, buflen); 1858 return; 1859 } 1860 1861 switch (req->cmd.buf[0]) { 1862 case MODE_SELECT: 1863 case MODE_SELECT_10: 1864 /* This also clears the sense buffer for REQUEST SENSE. */ 1865 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1866 break; 1867 1868 case UNMAP: 1869 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1870 break; 1871 1872 case VERIFY_10: 1873 case VERIFY_12: 1874 case VERIFY_16: 1875 if (r->req.status == -1) { 1876 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1877 } 1878 break; 1879 1880 case WRITE_SAME_10: 1881 case WRITE_SAME_16: 1882 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1883 break; 1884 1885 default: 1886 abort(); 1887 } 1888 } 1889 1890 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1891 { 1892 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1893 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1894 uint64_t nb_sectors; 1895 uint8_t *outbuf; 1896 int buflen; 1897 1898 switch (req->cmd.buf[0]) { 1899 case INQUIRY: 1900 case MODE_SENSE: 1901 case MODE_SENSE_10: 1902 case RESERVE: 1903 case RESERVE_10: 1904 case RELEASE: 1905 case RELEASE_10: 1906 case START_STOP: 1907 case ALLOW_MEDIUM_REMOVAL: 1908 case GET_CONFIGURATION: 1909 case GET_EVENT_STATUS_NOTIFICATION: 1910 case MECHANISM_STATUS: 1911 case REQUEST_SENSE: 1912 break; 1913 1914 default: 1915 if (!blk_is_available(s->qdev.conf.blk)) { 1916 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1917 return 0; 1918 } 1919 break; 1920 } 1921 1922 /* 1923 * FIXME: we shouldn't return anything bigger than 4k, but the code 1924 * requires the buffer to be as big as req->cmd.xfer in several 1925 * places. So, do not allow CDBs with a very large ALLOCATION 1926 * LENGTH. The real fix would be to modify scsi_read_data and 1927 * dma_buf_read, so that they return data beyond the buflen 1928 * as all zeros. 1929 */ 1930 if (req->cmd.xfer > 65536) { 1931 goto illegal_request; 1932 } 1933 r->buflen = MAX(4096, req->cmd.xfer); 1934 1935 if (!r->iov.iov_base) { 1936 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1937 } 1938 1939 buflen = req->cmd.xfer; 1940 outbuf = r->iov.iov_base; 1941 memset(outbuf, 0, r->buflen); 1942 switch (req->cmd.buf[0]) { 1943 case TEST_UNIT_READY: 1944 assert(blk_is_available(s->qdev.conf.blk)); 1945 break; 1946 case INQUIRY: 1947 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1948 if (buflen < 0) { 1949 goto illegal_request; 1950 } 1951 break; 1952 case MODE_SENSE: 1953 case MODE_SENSE_10: 1954 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1955 if (buflen < 0) { 1956 goto illegal_request; 1957 } 1958 break; 1959 case READ_TOC: 1960 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1961 if (buflen < 0) { 1962 goto illegal_request; 1963 } 1964 break; 1965 case RESERVE: 1966 if (req->cmd.buf[1] & 1) { 1967 goto illegal_request; 1968 } 1969 break; 1970 case RESERVE_10: 1971 if (req->cmd.buf[1] & 3) { 1972 goto illegal_request; 1973 } 1974 break; 1975 case RELEASE: 1976 if (req->cmd.buf[1] & 1) { 1977 goto illegal_request; 1978 } 1979 break; 1980 case RELEASE_10: 1981 if (req->cmd.buf[1] & 3) { 1982 goto illegal_request; 1983 } 1984 break; 1985 case START_STOP: 1986 if (scsi_disk_emulate_start_stop(r) < 0) { 1987 return 0; 1988 } 1989 break; 1990 case ALLOW_MEDIUM_REMOVAL: 1991 s->tray_locked = req->cmd.buf[4] & 1; 1992 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1993 break; 1994 case READ_CAPACITY_10: 1995 /* The normal LEN field for this command is zero. */ 1996 memset(outbuf, 0, 8); 1997 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1998 if (!nb_sectors) { 1999 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2000 return 0; 2001 } 2002 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 2003 goto illegal_request; 2004 } 2005 nb_sectors /= s->qdev.blocksize / 512; 2006 /* Returned value is the address of the last sector. */ 2007 nb_sectors--; 2008 /* Remember the new size for read/write sanity checking. */ 2009 s->qdev.max_lba = nb_sectors; 2010 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 2011 if (nb_sectors > UINT32_MAX) { 2012 nb_sectors = UINT32_MAX; 2013 } 2014 outbuf[0] = (nb_sectors >> 24) & 0xff; 2015 outbuf[1] = (nb_sectors >> 16) & 0xff; 2016 outbuf[2] = (nb_sectors >> 8) & 0xff; 2017 outbuf[3] = nb_sectors & 0xff; 2018 outbuf[4] = 0; 2019 outbuf[5] = 0; 2020 outbuf[6] = s->qdev.blocksize >> 8; 2021 outbuf[7] = 0; 2022 break; 2023 case REQUEST_SENSE: 2024 /* Just return "NO SENSE". */ 2025 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2026 (req->cmd.buf[1] & 1) == 0); 2027 if (buflen < 0) { 2028 goto illegal_request; 2029 } 2030 break; 2031 case MECHANISM_STATUS: 2032 buflen = scsi_emulate_mechanism_status(s, outbuf); 2033 if (buflen < 0) { 2034 goto illegal_request; 2035 } 2036 break; 2037 case GET_CONFIGURATION: 2038 buflen = scsi_get_configuration(s, outbuf); 2039 if (buflen < 0) { 2040 goto illegal_request; 2041 } 2042 break; 2043 case GET_EVENT_STATUS_NOTIFICATION: 2044 buflen = scsi_get_event_status_notification(s, r, outbuf); 2045 if (buflen < 0) { 2046 goto illegal_request; 2047 } 2048 break; 2049 case READ_DISC_INFORMATION: 2050 buflen = scsi_read_disc_information(s, r, outbuf); 2051 if (buflen < 0) { 2052 goto illegal_request; 2053 } 2054 break; 2055 case READ_DVD_STRUCTURE: 2056 buflen = scsi_read_dvd_structure(s, r, outbuf); 2057 if (buflen < 0) { 2058 goto illegal_request; 2059 } 2060 break; 2061 case SERVICE_ACTION_IN_16: 2062 /* Service Action In subcommands. */ 2063 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2064 DPRINTF("SAI READ CAPACITY(16)\n"); 2065 memset(outbuf, 0, req->cmd.xfer); 2066 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2067 if (!nb_sectors) { 2068 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2069 return 0; 2070 } 2071 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2072 goto illegal_request; 2073 } 2074 nb_sectors /= s->qdev.blocksize / 512; 2075 /* Returned value is the address of the last sector. */ 2076 nb_sectors--; 2077 /* Remember the new size for read/write sanity checking. */ 2078 s->qdev.max_lba = nb_sectors; 2079 outbuf[0] = (nb_sectors >> 56) & 0xff; 2080 outbuf[1] = (nb_sectors >> 48) & 0xff; 2081 outbuf[2] = (nb_sectors >> 40) & 0xff; 2082 outbuf[3] = (nb_sectors >> 32) & 0xff; 2083 outbuf[4] = (nb_sectors >> 24) & 0xff; 2084 outbuf[5] = (nb_sectors >> 16) & 0xff; 2085 outbuf[6] = (nb_sectors >> 8) & 0xff; 2086 outbuf[7] = nb_sectors & 0xff; 2087 outbuf[8] = 0; 2088 outbuf[9] = 0; 2089 outbuf[10] = s->qdev.blocksize >> 8; 2090 outbuf[11] = 0; 2091 outbuf[12] = 0; 2092 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2093 2094 /* set TPE bit if the format supports discard */ 2095 if (s->qdev.conf.discard_granularity) { 2096 outbuf[14] = 0x80; 2097 } 2098 2099 /* Protection, exponent and lowest lba field left blank. */ 2100 break; 2101 } 2102 DPRINTF("Unsupported Service Action In\n"); 2103 goto illegal_request; 2104 case SYNCHRONIZE_CACHE: 2105 /* The request is used as the AIO opaque value, so add a ref. */ 2106 scsi_req_ref(&r->req); 2107 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2108 BLOCK_ACCT_FLUSH); 2109 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2110 return 0; 2111 case SEEK_10: 2112 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba); 2113 if (r->req.cmd.lba > s->qdev.max_lba) { 2114 goto illegal_lba; 2115 } 2116 break; 2117 case MODE_SELECT: 2118 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2119 break; 2120 case MODE_SELECT_10: 2121 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2122 break; 2123 case UNMAP: 2124 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2125 break; 2126 case VERIFY_10: 2127 case VERIFY_12: 2128 case VERIFY_16: 2129 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3); 2130 if (req->cmd.buf[1] & 6) { 2131 goto illegal_request; 2132 } 2133 break; 2134 case WRITE_SAME_10: 2135 case WRITE_SAME_16: 2136 DPRINTF("WRITE SAME %d (len %lu)\n", 2137 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, 2138 (unsigned long)r->req.cmd.xfer); 2139 break; 2140 default: 2141 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0], 2142 scsi_command_name(buf[0])); 2143 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2144 return 0; 2145 } 2146 assert(!r->req.aiocb); 2147 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2148 if (r->iov.iov_len == 0) { 2149 scsi_req_complete(&r->req, GOOD); 2150 } 2151 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2152 assert(r->iov.iov_len == req->cmd.xfer); 2153 return -r->iov.iov_len; 2154 } else { 2155 return r->iov.iov_len; 2156 } 2157 2158 illegal_request: 2159 if (r->req.status == -1) { 2160 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2161 } 2162 return 0; 2163 2164 illegal_lba: 2165 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2166 return 0; 2167 } 2168 2169 /* Execute a scsi command. Returns the length of the data expected by the 2170 command. This will be Positive for data transfers from the device 2171 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2172 and zero if the command does not transfer any data. */ 2173 2174 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2175 { 2176 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2177 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2178 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2179 uint32_t len; 2180 uint8_t command; 2181 2182 command = buf[0]; 2183 2184 if (!blk_is_available(s->qdev.conf.blk)) { 2185 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2186 return 0; 2187 } 2188 2189 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2190 switch (command) { 2191 case READ_6: 2192 case READ_10: 2193 case READ_12: 2194 case READ_16: 2195 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len); 2196 /* Protection information is not supported. For SCSI versions 2 and 2197 * older (as determined by snooping the guest's INQUIRY commands), 2198 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2199 */ 2200 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2201 goto illegal_request; 2202 } 2203 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2204 goto illegal_lba; 2205 } 2206 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2207 r->sector_count = len * (s->qdev.blocksize / 512); 2208 break; 2209 case WRITE_6: 2210 case WRITE_10: 2211 case WRITE_12: 2212 case WRITE_16: 2213 case WRITE_VERIFY_10: 2214 case WRITE_VERIFY_12: 2215 case WRITE_VERIFY_16: 2216 if (blk_is_read_only(s->qdev.conf.blk)) { 2217 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2218 return 0; 2219 } 2220 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n", 2221 (command & 0xe) == 0xe ? "And Verify " : "", 2222 r->req.cmd.lba, len); 2223 /* fall through */ 2224 case VERIFY_10: 2225 case VERIFY_12: 2226 case VERIFY_16: 2227 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2228 * As far as DMA is concerned, we can treat it the same as a write; 2229 * scsi_block_do_sgio will send VERIFY commands. 2230 */ 2231 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2232 goto illegal_request; 2233 } 2234 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2235 goto illegal_lba; 2236 } 2237 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2238 r->sector_count = len * (s->qdev.blocksize / 512); 2239 break; 2240 default: 2241 abort(); 2242 illegal_request: 2243 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2244 return 0; 2245 illegal_lba: 2246 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2247 return 0; 2248 } 2249 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2250 if (r->sector_count == 0) { 2251 scsi_req_complete(&r->req, GOOD); 2252 } 2253 assert(r->iov.iov_len == 0); 2254 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2255 return -r->sector_count * 512; 2256 } else { 2257 return r->sector_count * 512; 2258 } 2259 } 2260 2261 static void scsi_disk_reset(DeviceState *dev) 2262 { 2263 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2264 uint64_t nb_sectors; 2265 2266 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2267 2268 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2269 nb_sectors /= s->qdev.blocksize / 512; 2270 if (nb_sectors) { 2271 nb_sectors--; 2272 } 2273 s->qdev.max_lba = nb_sectors; 2274 /* reset tray statuses */ 2275 s->tray_locked = 0; 2276 s->tray_open = 0; 2277 2278 s->qdev.scsi_version = s->qdev.default_scsi_version; 2279 } 2280 2281 static void scsi_disk_resize_cb(void *opaque) 2282 { 2283 SCSIDiskState *s = opaque; 2284 2285 /* SPC lists this sense code as available only for 2286 * direct-access devices. 2287 */ 2288 if (s->qdev.type == TYPE_DISK) { 2289 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2290 } 2291 } 2292 2293 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2294 { 2295 SCSIDiskState *s = opaque; 2296 2297 /* 2298 * When a CD gets changed, we have to report an ejected state and 2299 * then a loaded state to guests so that they detect tray 2300 * open/close and media change events. Guests that do not use 2301 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2302 * states rely on this behavior. 2303 * 2304 * media_changed governs the state machine used for unit attention 2305 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2306 */ 2307 s->media_changed = load; 2308 s->tray_open = !load; 2309 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2310 s->media_event = true; 2311 s->eject_request = false; 2312 } 2313 2314 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2315 { 2316 SCSIDiskState *s = opaque; 2317 2318 s->eject_request = true; 2319 if (force) { 2320 s->tray_locked = false; 2321 } 2322 } 2323 2324 static bool scsi_cd_is_tray_open(void *opaque) 2325 { 2326 return ((SCSIDiskState *)opaque)->tray_open; 2327 } 2328 2329 static bool scsi_cd_is_medium_locked(void *opaque) 2330 { 2331 return ((SCSIDiskState *)opaque)->tray_locked; 2332 } 2333 2334 static const BlockDevOps scsi_disk_removable_block_ops = { 2335 .change_media_cb = scsi_cd_change_media_cb, 2336 .eject_request_cb = scsi_cd_eject_request_cb, 2337 .is_tray_open = scsi_cd_is_tray_open, 2338 .is_medium_locked = scsi_cd_is_medium_locked, 2339 2340 .resize_cb = scsi_disk_resize_cb, 2341 }; 2342 2343 static const BlockDevOps scsi_disk_block_ops = { 2344 .resize_cb = scsi_disk_resize_cb, 2345 }; 2346 2347 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2348 { 2349 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2350 if (s->media_changed) { 2351 s->media_changed = false; 2352 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2353 } 2354 } 2355 2356 static void scsi_realize(SCSIDevice *dev, Error **errp) 2357 { 2358 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2359 2360 if (!s->qdev.conf.blk) { 2361 error_setg(errp, "drive property not set"); 2362 return; 2363 } 2364 2365 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2366 !blk_is_inserted(s->qdev.conf.blk)) { 2367 error_setg(errp, "Device needs media, but drive is empty"); 2368 return; 2369 } 2370 2371 blkconf_serial(&s->qdev.conf, &s->serial); 2372 blkconf_blocksizes(&s->qdev.conf); 2373 2374 if (s->qdev.conf.logical_block_size > 2375 s->qdev.conf.physical_block_size) { 2376 error_setg(errp, 2377 "logical_block_size > physical_block_size not supported"); 2378 return; 2379 } 2380 2381 if (dev->type == TYPE_DISK) { 2382 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2383 return; 2384 } 2385 } 2386 if (!blkconf_apply_backend_options(&dev->conf, 2387 blk_is_read_only(s->qdev.conf.blk), 2388 dev->type == TYPE_DISK, errp)) { 2389 return; 2390 } 2391 2392 if (s->qdev.conf.discard_granularity == -1) { 2393 s->qdev.conf.discard_granularity = 2394 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2395 } 2396 2397 if (!s->version) { 2398 s->version = g_strdup(qemu_hw_version()); 2399 } 2400 if (!s->vendor) { 2401 s->vendor = g_strdup("QEMU"); 2402 } 2403 2404 if (blk_is_sg(s->qdev.conf.blk)) { 2405 error_setg(errp, "unwanted /dev/sg*"); 2406 return; 2407 } 2408 2409 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2410 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2411 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2412 } else { 2413 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2414 } 2415 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2416 2417 blk_iostatus_enable(s->qdev.conf.blk); 2418 } 2419 2420 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2421 { 2422 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2423 /* can happen for devices without drive. The error message for missing 2424 * backend will be issued in scsi_realize 2425 */ 2426 if (s->qdev.conf.blk) { 2427 blkconf_blocksizes(&s->qdev.conf); 2428 } 2429 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2430 s->qdev.type = TYPE_DISK; 2431 if (!s->product) { 2432 s->product = g_strdup("QEMU HARDDISK"); 2433 } 2434 scsi_realize(&s->qdev, errp); 2435 } 2436 2437 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2438 { 2439 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2440 int ret; 2441 2442 if (!dev->conf.blk) { 2443 /* Anonymous BlockBackend for an empty drive. As we put it into 2444 * dev->conf, qdev takes care of detaching on unplug. */ 2445 dev->conf.blk = blk_new(0, BLK_PERM_ALL); 2446 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2447 assert(ret == 0); 2448 } 2449 2450 s->qdev.blocksize = 2048; 2451 s->qdev.type = TYPE_ROM; 2452 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2453 if (!s->product) { 2454 s->product = g_strdup("QEMU CD-ROM"); 2455 } 2456 scsi_realize(&s->qdev, errp); 2457 } 2458 2459 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2460 { 2461 DriveInfo *dinfo; 2462 Error *local_err = NULL; 2463 2464 if (!dev->conf.blk) { 2465 scsi_realize(dev, &local_err); 2466 assert(local_err); 2467 error_propagate(errp, local_err); 2468 return; 2469 } 2470 2471 dinfo = blk_legacy_dinfo(dev->conf.blk); 2472 if (dinfo && dinfo->media_cd) { 2473 scsi_cd_realize(dev, errp); 2474 } else { 2475 scsi_hd_realize(dev, errp); 2476 } 2477 } 2478 2479 static const SCSIReqOps scsi_disk_emulate_reqops = { 2480 .size = sizeof(SCSIDiskReq), 2481 .free_req = scsi_free_request, 2482 .send_command = scsi_disk_emulate_command, 2483 .read_data = scsi_disk_emulate_read_data, 2484 .write_data = scsi_disk_emulate_write_data, 2485 .get_buf = scsi_get_buf, 2486 }; 2487 2488 static const SCSIReqOps scsi_disk_dma_reqops = { 2489 .size = sizeof(SCSIDiskReq), 2490 .free_req = scsi_free_request, 2491 .send_command = scsi_disk_dma_command, 2492 .read_data = scsi_read_data, 2493 .write_data = scsi_write_data, 2494 .get_buf = scsi_get_buf, 2495 .load_request = scsi_disk_load_request, 2496 .save_request = scsi_disk_save_request, 2497 }; 2498 2499 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2500 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2501 [INQUIRY] = &scsi_disk_emulate_reqops, 2502 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2503 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2504 [START_STOP] = &scsi_disk_emulate_reqops, 2505 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2506 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2507 [READ_TOC] = &scsi_disk_emulate_reqops, 2508 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2509 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2510 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2511 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2512 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2513 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2514 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2515 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2516 [SEEK_10] = &scsi_disk_emulate_reqops, 2517 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2518 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2519 [UNMAP] = &scsi_disk_emulate_reqops, 2520 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2521 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2522 [VERIFY_10] = &scsi_disk_emulate_reqops, 2523 [VERIFY_12] = &scsi_disk_emulate_reqops, 2524 [VERIFY_16] = &scsi_disk_emulate_reqops, 2525 2526 [READ_6] = &scsi_disk_dma_reqops, 2527 [READ_10] = &scsi_disk_dma_reqops, 2528 [READ_12] = &scsi_disk_dma_reqops, 2529 [READ_16] = &scsi_disk_dma_reqops, 2530 [WRITE_6] = &scsi_disk_dma_reqops, 2531 [WRITE_10] = &scsi_disk_dma_reqops, 2532 [WRITE_12] = &scsi_disk_dma_reqops, 2533 [WRITE_16] = &scsi_disk_dma_reqops, 2534 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2535 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2536 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2537 }; 2538 2539 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2540 uint8_t *buf, void *hba_private) 2541 { 2542 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2543 SCSIRequest *req; 2544 const SCSIReqOps *ops; 2545 uint8_t command; 2546 2547 command = buf[0]; 2548 ops = scsi_disk_reqops_dispatch[command]; 2549 if (!ops) { 2550 ops = &scsi_disk_emulate_reqops; 2551 } 2552 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2553 2554 #ifdef DEBUG_SCSI 2555 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]); 2556 { 2557 int i; 2558 for (i = 1; i < scsi_cdb_length(buf); i++) { 2559 printf(" 0x%02x", buf[i]); 2560 } 2561 printf("\n"); 2562 } 2563 #endif 2564 2565 return req; 2566 } 2567 2568 #ifdef __linux__ 2569 static int get_device_type(SCSIDiskState *s) 2570 { 2571 uint8_t cmd[16]; 2572 uint8_t buf[36]; 2573 uint8_t sensebuf[8]; 2574 sg_io_hdr_t io_header; 2575 int ret; 2576 2577 memset(cmd, 0, sizeof(cmd)); 2578 memset(buf, 0, sizeof(buf)); 2579 cmd[0] = INQUIRY; 2580 cmd[4] = sizeof(buf); 2581 2582 memset(&io_header, 0, sizeof(io_header)); 2583 io_header.interface_id = 'S'; 2584 io_header.dxfer_direction = SG_DXFER_FROM_DEV; 2585 io_header.dxfer_len = sizeof(buf); 2586 io_header.dxferp = buf; 2587 io_header.cmdp = cmd; 2588 io_header.cmd_len = sizeof(cmd); 2589 io_header.mx_sb_len = sizeof(sensebuf); 2590 io_header.sbp = sensebuf; 2591 io_header.timeout = 6000; /* XXX */ 2592 2593 ret = blk_ioctl(s->qdev.conf.blk, SG_IO, &io_header); 2594 if (ret < 0 || io_header.driver_status || io_header.host_status) { 2595 return -1; 2596 } 2597 s->qdev.type = buf[0]; 2598 if (buf[1] & 0x80) { 2599 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2600 } 2601 return 0; 2602 } 2603 2604 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2605 { 2606 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2607 int sg_version; 2608 int rc; 2609 2610 if (!s->qdev.conf.blk) { 2611 error_setg(errp, "drive property not set"); 2612 return; 2613 } 2614 2615 /* check we are using a driver managing SG_IO (version 3 and after) */ 2616 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2617 if (rc < 0) { 2618 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2619 if (rc != -EPERM) { 2620 error_append_hint(errp, "Is this a SCSI device?\n"); 2621 } 2622 return; 2623 } 2624 if (sg_version < 30000) { 2625 error_setg(errp, "scsi generic interface too old"); 2626 return; 2627 } 2628 2629 /* get device type from INQUIRY data */ 2630 rc = get_device_type(s); 2631 if (rc < 0) { 2632 error_setg(errp, "INQUIRY failed"); 2633 return; 2634 } 2635 2636 /* Make a guess for the block size, we'll fix it when the guest sends. 2637 * READ CAPACITY. If they don't, they likely would assume these sizes 2638 * anyway. (TODO: check in /sys). 2639 */ 2640 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2641 s->qdev.blocksize = 2048; 2642 } else { 2643 s->qdev.blocksize = 512; 2644 } 2645 2646 /* Makes the scsi-block device not removable by using HMP and QMP eject 2647 * command. 2648 */ 2649 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2650 2651 scsi_realize(&s->qdev, errp); 2652 scsi_generic_read_device_identification(&s->qdev); 2653 } 2654 2655 typedef struct SCSIBlockReq { 2656 SCSIDiskReq req; 2657 sg_io_hdr_t io_header; 2658 2659 /* Selected bytes of the original CDB, copied into our own CDB. */ 2660 uint8_t cmd, cdb1, group_number; 2661 2662 /* CDB passed to SG_IO. */ 2663 uint8_t cdb[16]; 2664 } SCSIBlockReq; 2665 2666 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2667 int64_t offset, QEMUIOVector *iov, 2668 int direction, 2669 BlockCompletionFunc *cb, void *opaque) 2670 { 2671 sg_io_hdr_t *io_header = &req->io_header; 2672 SCSIDiskReq *r = &req->req; 2673 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2674 int nb_logical_blocks; 2675 uint64_t lba; 2676 BlockAIOCB *aiocb; 2677 2678 /* This is not supported yet. It can only happen if the guest does 2679 * reads and writes that are not aligned to one logical sectors 2680 * _and_ cover multiple MemoryRegions. 2681 */ 2682 assert(offset % s->qdev.blocksize == 0); 2683 assert(iov->size % s->qdev.blocksize == 0); 2684 2685 io_header->interface_id = 'S'; 2686 2687 /* The data transfer comes from the QEMUIOVector. */ 2688 io_header->dxfer_direction = direction; 2689 io_header->dxfer_len = iov->size; 2690 io_header->dxferp = (void *)iov->iov; 2691 io_header->iovec_count = iov->niov; 2692 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2693 2694 /* Build a new CDB with the LBA and length patched in, in case 2695 * DMA helpers split the transfer in multiple segments. Do not 2696 * build a CDB smaller than what the guest wanted, and only build 2697 * a larger one if strictly necessary. 2698 */ 2699 io_header->cmdp = req->cdb; 2700 lba = offset / s->qdev.blocksize; 2701 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2702 2703 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2704 /* 6-byte CDB */ 2705 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2706 req->cdb[4] = nb_logical_blocks; 2707 req->cdb[5] = 0; 2708 io_header->cmd_len = 6; 2709 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2710 /* 10-byte CDB */ 2711 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2712 req->cdb[1] = req->cdb1; 2713 stl_be_p(&req->cdb[2], lba); 2714 req->cdb[6] = req->group_number; 2715 stw_be_p(&req->cdb[7], nb_logical_blocks); 2716 req->cdb[9] = 0; 2717 io_header->cmd_len = 10; 2718 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2719 /* 12-byte CDB */ 2720 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2721 req->cdb[1] = req->cdb1; 2722 stl_be_p(&req->cdb[2], lba); 2723 stl_be_p(&req->cdb[6], nb_logical_blocks); 2724 req->cdb[10] = req->group_number; 2725 req->cdb[11] = 0; 2726 io_header->cmd_len = 12; 2727 } else { 2728 /* 16-byte CDB */ 2729 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2730 req->cdb[1] = req->cdb1; 2731 stq_be_p(&req->cdb[2], lba); 2732 stl_be_p(&req->cdb[10], nb_logical_blocks); 2733 req->cdb[14] = req->group_number; 2734 req->cdb[15] = 0; 2735 io_header->cmd_len = 16; 2736 } 2737 2738 /* The rest is as in scsi-generic.c. */ 2739 io_header->mx_sb_len = sizeof(r->req.sense); 2740 io_header->sbp = r->req.sense; 2741 io_header->timeout = UINT_MAX; 2742 io_header->usr_ptr = r; 2743 io_header->flags |= SG_FLAG_DIRECT_IO; 2744 2745 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2746 assert(aiocb != NULL); 2747 return aiocb; 2748 } 2749 2750 static bool scsi_block_no_fua(SCSICommand *cmd) 2751 { 2752 return false; 2753 } 2754 2755 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2756 QEMUIOVector *iov, 2757 BlockCompletionFunc *cb, void *cb_opaque, 2758 void *opaque) 2759 { 2760 SCSIBlockReq *r = opaque; 2761 return scsi_block_do_sgio(r, offset, iov, 2762 SG_DXFER_FROM_DEV, cb, cb_opaque); 2763 } 2764 2765 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2766 QEMUIOVector *iov, 2767 BlockCompletionFunc *cb, void *cb_opaque, 2768 void *opaque) 2769 { 2770 SCSIBlockReq *r = opaque; 2771 return scsi_block_do_sgio(r, offset, iov, 2772 SG_DXFER_TO_DEV, cb, cb_opaque); 2773 } 2774 2775 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2776 { 2777 switch (buf[0]) { 2778 case VERIFY_10: 2779 case VERIFY_12: 2780 case VERIFY_16: 2781 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2782 * for the number of logical blocks specified in the length 2783 * field). For other modes, do not use scatter/gather operation. 2784 */ 2785 if ((buf[1] & 6) == 2) { 2786 return false; 2787 } 2788 break; 2789 2790 case READ_6: 2791 case READ_10: 2792 case READ_12: 2793 case READ_16: 2794 case WRITE_6: 2795 case WRITE_10: 2796 case WRITE_12: 2797 case WRITE_16: 2798 case WRITE_VERIFY_10: 2799 case WRITE_VERIFY_12: 2800 case WRITE_VERIFY_16: 2801 /* MMC writing cannot be done via DMA helpers, because it sometimes 2802 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2803 * We might use scsi_block_dma_reqops as long as no writing commands are 2804 * seen, but performance usually isn't paramount on optical media. So, 2805 * just make scsi-block operate the same as scsi-generic for them. 2806 */ 2807 if (s->qdev.type != TYPE_ROM) { 2808 return false; 2809 } 2810 break; 2811 2812 default: 2813 break; 2814 } 2815 2816 return true; 2817 } 2818 2819 2820 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2821 { 2822 SCSIBlockReq *r = (SCSIBlockReq *)req; 2823 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2824 2825 r->cmd = req->cmd.buf[0]; 2826 switch (r->cmd >> 5) { 2827 case 0: 2828 /* 6-byte CDB. */ 2829 r->cdb1 = r->group_number = 0; 2830 break; 2831 case 1: 2832 /* 10-byte CDB. */ 2833 r->cdb1 = req->cmd.buf[1]; 2834 r->group_number = req->cmd.buf[6]; 2835 break; 2836 case 4: 2837 /* 12-byte CDB. */ 2838 r->cdb1 = req->cmd.buf[1]; 2839 r->group_number = req->cmd.buf[10]; 2840 break; 2841 case 5: 2842 /* 16-byte CDB. */ 2843 r->cdb1 = req->cmd.buf[1]; 2844 r->group_number = req->cmd.buf[14]; 2845 break; 2846 default: 2847 abort(); 2848 } 2849 2850 /* Protection information is not supported. For SCSI versions 2 and 2851 * older (as determined by snooping the guest's INQUIRY commands), 2852 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2853 */ 2854 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2855 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2856 return 0; 2857 } 2858 2859 r->req.status = &r->io_header.status; 2860 return scsi_disk_dma_command(req, buf); 2861 } 2862 2863 static const SCSIReqOps scsi_block_dma_reqops = { 2864 .size = sizeof(SCSIBlockReq), 2865 .free_req = scsi_free_request, 2866 .send_command = scsi_block_dma_command, 2867 .read_data = scsi_read_data, 2868 .write_data = scsi_write_data, 2869 .get_buf = scsi_get_buf, 2870 .load_request = scsi_disk_load_request, 2871 .save_request = scsi_disk_save_request, 2872 }; 2873 2874 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2875 uint32_t lun, uint8_t *buf, 2876 void *hba_private) 2877 { 2878 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2879 2880 if (scsi_block_is_passthrough(s, buf)) { 2881 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2882 hba_private); 2883 } else { 2884 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2885 hba_private); 2886 } 2887 } 2888 2889 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2890 uint8_t *buf, void *hba_private) 2891 { 2892 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2893 2894 if (scsi_block_is_passthrough(s, buf)) { 2895 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2896 } else { 2897 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2898 } 2899 } 2900 2901 #endif 2902 2903 static 2904 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2905 BlockCompletionFunc *cb, void *cb_opaque, 2906 void *opaque) 2907 { 2908 SCSIDiskReq *r = opaque; 2909 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2910 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2911 } 2912 2913 static 2914 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2915 BlockCompletionFunc *cb, void *cb_opaque, 2916 void *opaque) 2917 { 2918 SCSIDiskReq *r = opaque; 2919 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2920 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2921 } 2922 2923 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2924 { 2925 DeviceClass *dc = DEVICE_CLASS(klass); 2926 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2927 2928 dc->fw_name = "disk"; 2929 dc->reset = scsi_disk_reset; 2930 sdc->dma_readv = scsi_dma_readv; 2931 sdc->dma_writev = scsi_dma_writev; 2932 sdc->need_fua_emulation = scsi_is_cmd_fua; 2933 } 2934 2935 static const TypeInfo scsi_disk_base_info = { 2936 .name = TYPE_SCSI_DISK_BASE, 2937 .parent = TYPE_SCSI_DEVICE, 2938 .class_init = scsi_disk_base_class_initfn, 2939 .instance_size = sizeof(SCSIDiskState), 2940 .class_size = sizeof(SCSIDiskClass), 2941 .abstract = true, 2942 }; 2943 2944 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2945 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ 2946 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2947 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2948 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2949 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2950 DEFINE_PROP_STRING("product", SCSIDiskState, product) 2951 2952 static Property scsi_hd_properties[] = { 2953 DEFINE_SCSI_DISK_PROPERTIES(), 2954 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2955 SCSI_DISK_F_REMOVABLE, false), 2956 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2957 SCSI_DISK_F_DPOFUA, false), 2958 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2959 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2960 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2961 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2962 DEFAULT_MAX_UNMAP_SIZE), 2963 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2964 DEFAULT_MAX_IO_SIZE), 2965 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2966 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2967 5), 2968 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2969 DEFINE_PROP_END_OF_LIST(), 2970 }; 2971 2972 static const VMStateDescription vmstate_scsi_disk_state = { 2973 .name = "scsi-disk", 2974 .version_id = 1, 2975 .minimum_version_id = 1, 2976 .fields = (VMStateField[]) { 2977 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2978 VMSTATE_BOOL(media_changed, SCSIDiskState), 2979 VMSTATE_BOOL(media_event, SCSIDiskState), 2980 VMSTATE_BOOL(eject_request, SCSIDiskState), 2981 VMSTATE_BOOL(tray_open, SCSIDiskState), 2982 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2983 VMSTATE_END_OF_LIST() 2984 } 2985 }; 2986 2987 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 2988 { 2989 DeviceClass *dc = DEVICE_CLASS(klass); 2990 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2991 2992 sc->realize = scsi_hd_realize; 2993 sc->alloc_req = scsi_new_request; 2994 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 2995 dc->desc = "virtual SCSI disk"; 2996 dc->props = scsi_hd_properties; 2997 dc->vmsd = &vmstate_scsi_disk_state; 2998 } 2999 3000 static const TypeInfo scsi_hd_info = { 3001 .name = "scsi-hd", 3002 .parent = TYPE_SCSI_DISK_BASE, 3003 .class_init = scsi_hd_class_initfn, 3004 }; 3005 3006 static Property scsi_cd_properties[] = { 3007 DEFINE_SCSI_DISK_PROPERTIES(), 3008 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3009 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3010 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3011 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3012 DEFAULT_MAX_IO_SIZE), 3013 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3014 5), 3015 DEFINE_PROP_END_OF_LIST(), 3016 }; 3017 3018 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3019 { 3020 DeviceClass *dc = DEVICE_CLASS(klass); 3021 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3022 3023 sc->realize = scsi_cd_realize; 3024 sc->alloc_req = scsi_new_request; 3025 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3026 dc->desc = "virtual SCSI CD-ROM"; 3027 dc->props = scsi_cd_properties; 3028 dc->vmsd = &vmstate_scsi_disk_state; 3029 } 3030 3031 static const TypeInfo scsi_cd_info = { 3032 .name = "scsi-cd", 3033 .parent = TYPE_SCSI_DISK_BASE, 3034 .class_init = scsi_cd_class_initfn, 3035 }; 3036 3037 #ifdef __linux__ 3038 static Property scsi_block_properties[] = { 3039 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3040 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3041 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3042 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3043 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3044 -1), 3045 DEFINE_PROP_END_OF_LIST(), 3046 }; 3047 3048 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3049 { 3050 DeviceClass *dc = DEVICE_CLASS(klass); 3051 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3052 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3053 3054 sc->realize = scsi_block_realize; 3055 sc->alloc_req = scsi_block_new_request; 3056 sc->parse_cdb = scsi_block_parse_cdb; 3057 sdc->dma_readv = scsi_block_dma_readv; 3058 sdc->dma_writev = scsi_block_dma_writev; 3059 sdc->need_fua_emulation = scsi_block_no_fua; 3060 dc->desc = "SCSI block device passthrough"; 3061 dc->props = scsi_block_properties; 3062 dc->vmsd = &vmstate_scsi_disk_state; 3063 } 3064 3065 static const TypeInfo scsi_block_info = { 3066 .name = "scsi-block", 3067 .parent = TYPE_SCSI_DISK_BASE, 3068 .class_init = scsi_block_class_initfn, 3069 }; 3070 #endif 3071 3072 static Property scsi_disk_properties[] = { 3073 DEFINE_SCSI_DISK_PROPERTIES(), 3074 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3075 SCSI_DISK_F_REMOVABLE, false), 3076 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3077 SCSI_DISK_F_DPOFUA, false), 3078 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3079 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3080 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3081 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3082 DEFAULT_MAX_UNMAP_SIZE), 3083 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3084 DEFAULT_MAX_IO_SIZE), 3085 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3086 5), 3087 DEFINE_PROP_END_OF_LIST(), 3088 }; 3089 3090 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3091 { 3092 DeviceClass *dc = DEVICE_CLASS(klass); 3093 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3094 3095 sc->realize = scsi_disk_realize; 3096 sc->alloc_req = scsi_new_request; 3097 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3098 dc->fw_name = "disk"; 3099 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3100 dc->reset = scsi_disk_reset; 3101 dc->props = scsi_disk_properties; 3102 dc->vmsd = &vmstate_scsi_disk_state; 3103 } 3104 3105 static const TypeInfo scsi_disk_info = { 3106 .name = "scsi-disk", 3107 .parent = TYPE_SCSI_DISK_BASE, 3108 .class_init = scsi_disk_class_initfn, 3109 }; 3110 3111 static void scsi_disk_register_types(void) 3112 { 3113 type_register_static(&scsi_disk_base_info); 3114 type_register_static(&scsi_hd_info); 3115 type_register_static(&scsi_cd_info); 3116 #ifdef __linux__ 3117 type_register_static(&scsi_block_info); 3118 #endif 3119 type_register_static(&scsi_disk_info); 3120 } 3121 3122 type_init(scsi_disk_register_types) 3123