1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 //#define DEBUG_SCSI 23 24 #ifdef DEBUG_SCSI 25 #define DPRINTF(fmt, ...) \ 26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0) 27 #else 28 #define DPRINTF(fmt, ...) do {} while(0) 29 #endif 30 31 #include "qemu/osdep.h" 32 #include "qemu/units.h" 33 #include "qapi/error.h" 34 #include "qemu/error-report.h" 35 #include "hw/scsi/scsi.h" 36 #include "hw/scsi/emulation.h" 37 #include "scsi/constants.h" 38 #include "sysemu/sysemu.h" 39 #include "sysemu/block-backend.h" 40 #include "sysemu/blockdev.h" 41 #include "hw/block/block.h" 42 #include "sysemu/dma.h" 43 #include "qemu/cutils.h" 44 45 #ifdef __linux 46 #include <scsi/sg.h> 47 #endif 48 49 #define SCSI_WRITE_SAME_MAX (512 * KiB) 50 #define SCSI_DMA_BUF_SIZE (128 * KiB) 51 #define SCSI_MAX_INQUIRY_LEN 256 52 #define SCSI_MAX_MODE_LEN 256 53 54 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 55 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 56 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 57 58 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 59 60 #define SCSI_DISK_BASE(obj) \ 61 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 62 #define SCSI_DISK_BASE_CLASS(klass) \ 63 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 64 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 65 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 66 67 typedef struct SCSIDiskClass { 68 SCSIDeviceClass parent_class; 69 DMAIOFunc *dma_readv; 70 DMAIOFunc *dma_writev; 71 bool (*need_fua_emulation)(SCSICommand *cmd); 72 } SCSIDiskClass; 73 74 typedef struct SCSIDiskReq { 75 SCSIRequest req; 76 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 77 uint64_t sector; 78 uint32_t sector_count; 79 uint32_t buflen; 80 bool started; 81 bool need_fua_emulation; 82 struct iovec iov; 83 QEMUIOVector qiov; 84 BlockAcctCookie acct; 85 unsigned char *status; 86 } SCSIDiskReq; 87 88 #define SCSI_DISK_F_REMOVABLE 0 89 #define SCSI_DISK_F_DPOFUA 1 90 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 91 92 typedef struct SCSIDiskState 93 { 94 SCSIDevice qdev; 95 uint32_t features; 96 bool media_changed; 97 bool media_event; 98 bool eject_request; 99 uint16_t port_index; 100 uint64_t max_unmap_size; 101 uint64_t max_io_size; 102 QEMUBH *bh; 103 char *version; 104 char *serial; 105 char *vendor; 106 char *product; 107 bool tray_open; 108 bool tray_locked; 109 /* 110 * 0x0000 - rotation rate not reported 111 * 0x0001 - non-rotating medium (SSD) 112 * 0x0002-0x0400 - reserved 113 * 0x0401-0xffe - rotations per minute 114 * 0xffff - reserved 115 */ 116 uint16_t rotation_rate; 117 } SCSIDiskState; 118 119 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 120 121 static void scsi_free_request(SCSIRequest *req) 122 { 123 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 124 125 qemu_vfree(r->iov.iov_base); 126 } 127 128 /* Helper function for command completion with sense. */ 129 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 130 { 131 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n", 132 r->req.tag, sense.key, sense.asc, sense.ascq); 133 scsi_req_build_sense(&r->req, sense); 134 scsi_req_complete(&r->req, CHECK_CONDITION); 135 } 136 137 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 138 { 139 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 140 141 if (!r->iov.iov_base) { 142 r->buflen = size; 143 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 144 } 145 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 146 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 147 } 148 149 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 150 { 151 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 152 153 qemu_put_be64s(f, &r->sector); 154 qemu_put_be32s(f, &r->sector_count); 155 qemu_put_be32s(f, &r->buflen); 156 if (r->buflen) { 157 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 158 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 159 } else if (!req->retry) { 160 uint32_t len = r->iov.iov_len; 161 qemu_put_be32s(f, &len); 162 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 163 } 164 } 165 } 166 167 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 168 { 169 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 170 171 qemu_get_be64s(f, &r->sector); 172 qemu_get_be32s(f, &r->sector_count); 173 qemu_get_be32s(f, &r->buflen); 174 if (r->buflen) { 175 scsi_init_iovec(r, r->buflen); 176 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 177 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 178 } else if (!r->req.retry) { 179 uint32_t len; 180 qemu_get_be32s(f, &len); 181 r->iov.iov_len = len; 182 assert(r->iov.iov_len <= r->buflen); 183 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 184 } 185 } 186 187 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 188 } 189 190 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 191 { 192 if (r->req.io_canceled) { 193 scsi_req_cancel_complete(&r->req); 194 return true; 195 } 196 197 if (ret < 0 || (r->status && *r->status)) { 198 return scsi_handle_rw_error(r, -ret, acct_failed); 199 } 200 201 return false; 202 } 203 204 static void scsi_aio_complete(void *opaque, int ret) 205 { 206 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 207 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 208 209 assert(r->req.aiocb != NULL); 210 r->req.aiocb = NULL; 211 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 212 if (scsi_disk_req_check_error(r, ret, true)) { 213 goto done; 214 } 215 216 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 217 scsi_req_complete(&r->req, GOOD); 218 219 done: 220 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 221 scsi_req_unref(&r->req); 222 } 223 224 static bool scsi_is_cmd_fua(SCSICommand *cmd) 225 { 226 switch (cmd->buf[0]) { 227 case READ_10: 228 case READ_12: 229 case READ_16: 230 case WRITE_10: 231 case WRITE_12: 232 case WRITE_16: 233 return (cmd->buf[1] & 8) != 0; 234 235 case VERIFY_10: 236 case VERIFY_12: 237 case VERIFY_16: 238 case WRITE_VERIFY_10: 239 case WRITE_VERIFY_12: 240 case WRITE_VERIFY_16: 241 return true; 242 243 case READ_6: 244 case WRITE_6: 245 default: 246 return false; 247 } 248 } 249 250 static void scsi_write_do_fua(SCSIDiskReq *r) 251 { 252 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 253 254 assert(r->req.aiocb == NULL); 255 assert(!r->req.io_canceled); 256 257 if (r->need_fua_emulation) { 258 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 259 BLOCK_ACCT_FLUSH); 260 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 261 return; 262 } 263 264 scsi_req_complete(&r->req, GOOD); 265 scsi_req_unref(&r->req); 266 } 267 268 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 269 { 270 assert(r->req.aiocb == NULL); 271 if (scsi_disk_req_check_error(r, ret, false)) { 272 goto done; 273 } 274 275 r->sector += r->sector_count; 276 r->sector_count = 0; 277 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 278 scsi_write_do_fua(r); 279 return; 280 } else { 281 scsi_req_complete(&r->req, GOOD); 282 } 283 284 done: 285 scsi_req_unref(&r->req); 286 } 287 288 static void scsi_dma_complete(void *opaque, int ret) 289 { 290 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 291 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 292 293 assert(r->req.aiocb != NULL); 294 r->req.aiocb = NULL; 295 296 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 297 if (ret < 0) { 298 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 299 } else { 300 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 301 } 302 scsi_dma_complete_noio(r, ret); 303 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 304 } 305 306 static void scsi_read_complete(void * opaque, int ret) 307 { 308 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 309 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 310 int n; 311 312 assert(r->req.aiocb != NULL); 313 r->req.aiocb = NULL; 314 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 315 if (scsi_disk_req_check_error(r, ret, true)) { 316 goto done; 317 } 318 319 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 320 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size); 321 322 n = r->qiov.size / 512; 323 r->sector += n; 324 r->sector_count -= n; 325 scsi_req_data(&r->req, r->qiov.size); 326 327 done: 328 scsi_req_unref(&r->req); 329 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 330 } 331 332 /* Actually issue a read to the block device. */ 333 static void scsi_do_read(SCSIDiskReq *r, int ret) 334 { 335 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 336 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 337 338 assert (r->req.aiocb == NULL); 339 if (scsi_disk_req_check_error(r, ret, false)) { 340 goto done; 341 } 342 343 /* The request is used as the AIO opaque value, so add a ref. */ 344 scsi_req_ref(&r->req); 345 346 if (r->req.sg) { 347 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 348 r->req.resid -= r->req.sg->size; 349 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 350 r->req.sg, r->sector << BDRV_SECTOR_BITS, 351 BDRV_SECTOR_SIZE, 352 sdc->dma_readv, r, scsi_dma_complete, r, 353 DMA_DIRECTION_FROM_DEVICE); 354 } else { 355 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 356 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 357 r->qiov.size, BLOCK_ACCT_READ); 358 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 359 scsi_read_complete, r, r); 360 } 361 362 done: 363 scsi_req_unref(&r->req); 364 } 365 366 static void scsi_do_read_cb(void *opaque, int ret) 367 { 368 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 369 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 370 371 assert (r->req.aiocb != NULL); 372 r->req.aiocb = NULL; 373 374 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 375 if (ret < 0) { 376 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 377 } else { 378 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 379 } 380 scsi_do_read(opaque, ret); 381 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 382 } 383 384 /* Read more data from scsi device into buffer. */ 385 static void scsi_read_data(SCSIRequest *req) 386 { 387 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 388 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 389 bool first; 390 391 DPRINTF("Read sector_count=%d\n", r->sector_count); 392 if (r->sector_count == 0) { 393 /* This also clears the sense buffer for REQUEST SENSE. */ 394 scsi_req_complete(&r->req, GOOD); 395 return; 396 } 397 398 /* No data transfer may already be in progress */ 399 assert(r->req.aiocb == NULL); 400 401 /* The request is used as the AIO opaque value, so add a ref. */ 402 scsi_req_ref(&r->req); 403 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 404 DPRINTF("Data transfer direction invalid\n"); 405 scsi_read_complete(r, -EINVAL); 406 return; 407 } 408 409 if (!blk_is_available(req->dev->conf.blk)) { 410 scsi_read_complete(r, -ENOMEDIUM); 411 return; 412 } 413 414 first = !r->started; 415 r->started = true; 416 if (first && r->need_fua_emulation) { 417 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 418 BLOCK_ACCT_FLUSH); 419 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 420 } else { 421 scsi_do_read(r, 0); 422 } 423 } 424 425 /* 426 * scsi_handle_rw_error has two return values. False means that the error 427 * must be ignored, true means that the error has been processed and the 428 * caller should not do anything else for this request. Note that 429 * scsi_handle_rw_error always manages its reference counts, independent 430 * of the return value. 431 */ 432 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 433 { 434 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 435 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 436 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 437 is_read, error); 438 439 if (action == BLOCK_ERROR_ACTION_REPORT) { 440 if (acct_failed) { 441 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 442 } 443 switch (error) { 444 case 0: 445 /* A passthrough command has run and has produced sense data; check 446 * whether the error has to be handled by the guest or should rather 447 * pause the host. 448 */ 449 assert(r->status && *r->status); 450 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 451 if (error == ECANCELED || error == EAGAIN || error == ENOTCONN || 452 error == 0) { 453 /* These errors are handled by guest. */ 454 scsi_req_complete(&r->req, *r->status); 455 return true; 456 } 457 break; 458 case ENOMEDIUM: 459 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 460 break; 461 case ENOMEM: 462 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 463 break; 464 case EINVAL: 465 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 466 break; 467 case ENOSPC: 468 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 469 break; 470 default: 471 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 472 break; 473 } 474 } 475 476 blk_error_action(s->qdev.conf.blk, action, is_read, error); 477 if (action == BLOCK_ERROR_ACTION_IGNORE) { 478 scsi_req_complete(&r->req, 0); 479 return true; 480 } 481 482 if (action == BLOCK_ERROR_ACTION_STOP) { 483 scsi_req_retry(&r->req); 484 } 485 return true; 486 } 487 488 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 489 { 490 uint32_t n; 491 492 assert (r->req.aiocb == NULL); 493 if (scsi_disk_req_check_error(r, ret, false)) { 494 goto done; 495 } 496 497 n = r->qiov.size / 512; 498 r->sector += n; 499 r->sector_count -= n; 500 if (r->sector_count == 0) { 501 scsi_write_do_fua(r); 502 return; 503 } else { 504 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 505 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size); 506 scsi_req_data(&r->req, r->qiov.size); 507 } 508 509 done: 510 scsi_req_unref(&r->req); 511 } 512 513 static void scsi_write_complete(void * opaque, int ret) 514 { 515 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 516 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 517 518 assert (r->req.aiocb != NULL); 519 r->req.aiocb = NULL; 520 521 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 522 if (ret < 0) { 523 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 524 } else { 525 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 526 } 527 scsi_write_complete_noio(r, ret); 528 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 529 } 530 531 static void scsi_write_data(SCSIRequest *req) 532 { 533 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 534 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 535 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 536 537 /* No data transfer may already be in progress */ 538 assert(r->req.aiocb == NULL); 539 540 /* The request is used as the AIO opaque value, so add a ref. */ 541 scsi_req_ref(&r->req); 542 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 543 DPRINTF("Data transfer direction invalid\n"); 544 scsi_write_complete_noio(r, -EINVAL); 545 return; 546 } 547 548 if (!r->req.sg && !r->qiov.size) { 549 /* Called for the first time. Ask the driver to send us more data. */ 550 r->started = true; 551 scsi_write_complete_noio(r, 0); 552 return; 553 } 554 if (!blk_is_available(req->dev->conf.blk)) { 555 scsi_write_complete_noio(r, -ENOMEDIUM); 556 return; 557 } 558 559 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 560 r->req.cmd.buf[0] == VERIFY_16) { 561 if (r->req.sg) { 562 scsi_dma_complete_noio(r, 0); 563 } else { 564 scsi_write_complete_noio(r, 0); 565 } 566 return; 567 } 568 569 if (r->req.sg) { 570 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 571 r->req.resid -= r->req.sg->size; 572 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 573 r->req.sg, r->sector << BDRV_SECTOR_BITS, 574 BDRV_SECTOR_SIZE, 575 sdc->dma_writev, r, scsi_dma_complete, r, 576 DMA_DIRECTION_TO_DEVICE); 577 } else { 578 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 579 r->qiov.size, BLOCK_ACCT_WRITE); 580 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 581 scsi_write_complete, r, r); 582 } 583 } 584 585 /* Return a pointer to the data buffer. */ 586 static uint8_t *scsi_get_buf(SCSIRequest *req) 587 { 588 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 589 590 return (uint8_t *)r->iov.iov_base; 591 } 592 593 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 594 { 595 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 596 uint8_t page_code = req->cmd.buf[2]; 597 int start, buflen = 0; 598 599 outbuf[buflen++] = s->qdev.type & 0x1f; 600 outbuf[buflen++] = page_code; 601 outbuf[buflen++] = 0x00; 602 outbuf[buflen++] = 0x00; 603 start = buflen; 604 605 switch (page_code) { 606 case 0x00: /* Supported page codes, mandatory */ 607 { 608 DPRINTF("Inquiry EVPD[Supported pages] " 609 "buffer size %zd\n", req->cmd.xfer); 610 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 611 if (s->serial) { 612 outbuf[buflen++] = 0x80; /* unit serial number */ 613 } 614 outbuf[buflen++] = 0x83; /* device identification */ 615 if (s->qdev.type == TYPE_DISK) { 616 outbuf[buflen++] = 0xb0; /* block limits */ 617 outbuf[buflen++] = 0xb1; /* block device characteristics */ 618 outbuf[buflen++] = 0xb2; /* thin provisioning */ 619 } 620 break; 621 } 622 case 0x80: /* Device serial number, optional */ 623 { 624 int l; 625 626 if (!s->serial) { 627 DPRINTF("Inquiry (EVPD[Serial number] not supported\n"); 628 return -1; 629 } 630 631 l = strlen(s->serial); 632 if (l > 36) { 633 l = 36; 634 } 635 636 DPRINTF("Inquiry EVPD[Serial number] " 637 "buffer size %zd\n", req->cmd.xfer); 638 memcpy(outbuf + buflen, s->serial, l); 639 buflen += l; 640 break; 641 } 642 643 case 0x83: /* Device identification page, mandatory */ 644 { 645 const char *str = s->serial ?: blk_name(s->qdev.conf.blk); 646 int max_len = s->serial ? 20 : 255 - 8; 647 int id_len = strlen(str); 648 649 if (id_len > max_len) { 650 id_len = max_len; 651 } 652 DPRINTF("Inquiry EVPD[Device identification] " 653 "buffer size %zd\n", req->cmd.xfer); 654 655 outbuf[buflen++] = 0x2; /* ASCII */ 656 outbuf[buflen++] = 0; /* not officially assigned */ 657 outbuf[buflen++] = 0; /* reserved */ 658 outbuf[buflen++] = id_len; /* length of data following */ 659 memcpy(outbuf + buflen, str, id_len); 660 buflen += id_len; 661 662 if (s->qdev.wwn) { 663 outbuf[buflen++] = 0x1; /* Binary */ 664 outbuf[buflen++] = 0x3; /* NAA */ 665 outbuf[buflen++] = 0; /* reserved */ 666 outbuf[buflen++] = 8; 667 stq_be_p(&outbuf[buflen], s->qdev.wwn); 668 buflen += 8; 669 } 670 671 if (s->qdev.port_wwn) { 672 outbuf[buflen++] = 0x61; /* SAS / Binary */ 673 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 674 outbuf[buflen++] = 0; /* reserved */ 675 outbuf[buflen++] = 8; 676 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 677 buflen += 8; 678 } 679 680 if (s->port_index) { 681 outbuf[buflen++] = 0x61; /* SAS / Binary */ 682 683 /* PIV/Target port/relative target port */ 684 outbuf[buflen++] = 0x94; 685 686 outbuf[buflen++] = 0; /* reserved */ 687 outbuf[buflen++] = 4; 688 stw_be_p(&outbuf[buflen + 2], s->port_index); 689 buflen += 4; 690 } 691 break; 692 } 693 case 0xb0: /* block limits */ 694 { 695 SCSIBlockLimits bl = {}; 696 697 if (s->qdev.type == TYPE_ROM) { 698 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n", 699 page_code); 700 return -1; 701 } 702 bl.wsnz = 1; 703 bl.unmap_sectors = 704 s->qdev.conf.discard_granularity / s->qdev.blocksize; 705 bl.min_io_size = 706 s->qdev.conf.min_io_size / s->qdev.blocksize; 707 bl.opt_io_size = 708 s->qdev.conf.opt_io_size / s->qdev.blocksize; 709 bl.max_unmap_sectors = 710 s->max_unmap_size / s->qdev.blocksize; 711 bl.max_io_sectors = 712 s->max_io_size / s->qdev.blocksize; 713 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 714 bl.max_unmap_descr = 255; 715 716 if (s->qdev.type == TYPE_DISK) { 717 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 718 int max_io_sectors_blk = 719 max_transfer_blk / s->qdev.blocksize; 720 721 bl.max_io_sectors = 722 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 723 } 724 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 725 break; 726 } 727 case 0xb1: /* block device characteristics */ 728 { 729 buflen = 0x40; 730 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 731 outbuf[5] = s->rotation_rate & 0xff; 732 outbuf[6] = 0; /* PRODUCT TYPE */ 733 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 734 outbuf[8] = 0; /* VBULS */ 735 break; 736 } 737 case 0xb2: /* thin provisioning */ 738 { 739 buflen = 8; 740 outbuf[4] = 0; 741 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 742 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 743 outbuf[7] = 0; 744 break; 745 } 746 default: 747 return -1; 748 } 749 /* done with EVPD */ 750 assert(buflen - start <= 255); 751 outbuf[start - 1] = buflen - start; 752 return buflen; 753 } 754 755 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 756 { 757 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 758 int buflen = 0; 759 760 if (req->cmd.buf[1] & 0x1) { 761 /* Vital product data */ 762 return scsi_disk_emulate_vpd_page(req, outbuf); 763 } 764 765 /* Standard INQUIRY data */ 766 if (req->cmd.buf[2] != 0) { 767 return -1; 768 } 769 770 /* PAGE CODE == 0 */ 771 buflen = req->cmd.xfer; 772 if (buflen > SCSI_MAX_INQUIRY_LEN) { 773 buflen = SCSI_MAX_INQUIRY_LEN; 774 } 775 776 outbuf[0] = s->qdev.type & 0x1f; 777 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 778 779 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 780 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 781 782 memset(&outbuf[32], 0, 4); 783 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 784 /* 785 * We claim conformance to SPC-3, which is required for guests 786 * to ask for modern features like READ CAPACITY(16) or the 787 * block characteristics VPD page by default. Not all of SPC-3 788 * is actually implemented, but we're good enough. 789 */ 790 outbuf[2] = s->qdev.default_scsi_version; 791 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 792 793 if (buflen > 36) { 794 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 795 } else { 796 /* If the allocation length of CDB is too small, 797 the additional length is not adjusted */ 798 outbuf[4] = 36 - 5; 799 } 800 801 /* Sync data transfer and TCQ. */ 802 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 803 return buflen; 804 } 805 806 static inline bool media_is_dvd(SCSIDiskState *s) 807 { 808 uint64_t nb_sectors; 809 if (s->qdev.type != TYPE_ROM) { 810 return false; 811 } 812 if (!blk_is_available(s->qdev.conf.blk)) { 813 return false; 814 } 815 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 816 return nb_sectors > CD_MAX_SECTORS; 817 } 818 819 static inline bool media_is_cd(SCSIDiskState *s) 820 { 821 uint64_t nb_sectors; 822 if (s->qdev.type != TYPE_ROM) { 823 return false; 824 } 825 if (!blk_is_available(s->qdev.conf.blk)) { 826 return false; 827 } 828 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 829 return nb_sectors <= CD_MAX_SECTORS; 830 } 831 832 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 833 uint8_t *outbuf) 834 { 835 uint8_t type = r->req.cmd.buf[1] & 7; 836 837 if (s->qdev.type != TYPE_ROM) { 838 return -1; 839 } 840 841 /* Types 1/2 are only defined for Blu-Ray. */ 842 if (type != 0) { 843 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 844 return -1; 845 } 846 847 memset(outbuf, 0, 34); 848 outbuf[1] = 32; 849 outbuf[2] = 0xe; /* last session complete, disc finalized */ 850 outbuf[3] = 1; /* first track on disc */ 851 outbuf[4] = 1; /* # of sessions */ 852 outbuf[5] = 1; /* first track of last session */ 853 outbuf[6] = 1; /* last track of last session */ 854 outbuf[7] = 0x20; /* unrestricted use */ 855 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 856 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 857 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 858 /* 24-31: disc bar code */ 859 /* 32: disc application code */ 860 /* 33: number of OPC tables */ 861 862 return 34; 863 } 864 865 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 866 uint8_t *outbuf) 867 { 868 static const int rds_caps_size[5] = { 869 [0] = 2048 + 4, 870 [1] = 4 + 4, 871 [3] = 188 + 4, 872 [4] = 2048 + 4, 873 }; 874 875 uint8_t media = r->req.cmd.buf[1]; 876 uint8_t layer = r->req.cmd.buf[6]; 877 uint8_t format = r->req.cmd.buf[7]; 878 int size = -1; 879 880 if (s->qdev.type != TYPE_ROM) { 881 return -1; 882 } 883 if (media != 0) { 884 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 885 return -1; 886 } 887 888 if (format != 0xff) { 889 if (!blk_is_available(s->qdev.conf.blk)) { 890 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 891 return -1; 892 } 893 if (media_is_cd(s)) { 894 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 895 return -1; 896 } 897 if (format >= ARRAY_SIZE(rds_caps_size)) { 898 return -1; 899 } 900 size = rds_caps_size[format]; 901 memset(outbuf, 0, size); 902 } 903 904 switch (format) { 905 case 0x00: { 906 /* Physical format information */ 907 uint64_t nb_sectors; 908 if (layer != 0) { 909 goto fail; 910 } 911 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 912 913 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 914 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 915 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 916 outbuf[7] = 0; /* default densities */ 917 918 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 919 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 920 break; 921 } 922 923 case 0x01: /* DVD copyright information, all zeros */ 924 break; 925 926 case 0x03: /* BCA information - invalid field for no BCA info */ 927 return -1; 928 929 case 0x04: /* DVD disc manufacturing information, all zeros */ 930 break; 931 932 case 0xff: { /* List capabilities */ 933 int i; 934 size = 4; 935 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 936 if (!rds_caps_size[i]) { 937 continue; 938 } 939 outbuf[size] = i; 940 outbuf[size + 1] = 0x40; /* Not writable, readable */ 941 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 942 size += 4; 943 } 944 break; 945 } 946 947 default: 948 return -1; 949 } 950 951 /* Size of buffer, not including 2 byte size field */ 952 stw_be_p(outbuf, size - 2); 953 return size; 954 955 fail: 956 return -1; 957 } 958 959 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 960 { 961 uint8_t event_code, media_status; 962 963 media_status = 0; 964 if (s->tray_open) { 965 media_status = MS_TRAY_OPEN; 966 } else if (blk_is_inserted(s->qdev.conf.blk)) { 967 media_status = MS_MEDIA_PRESENT; 968 } 969 970 /* Event notification descriptor */ 971 event_code = MEC_NO_CHANGE; 972 if (media_status != MS_TRAY_OPEN) { 973 if (s->media_event) { 974 event_code = MEC_NEW_MEDIA; 975 s->media_event = false; 976 } else if (s->eject_request) { 977 event_code = MEC_EJECT_REQUESTED; 978 s->eject_request = false; 979 } 980 } 981 982 outbuf[0] = event_code; 983 outbuf[1] = media_status; 984 985 /* These fields are reserved, just clear them. */ 986 outbuf[2] = 0; 987 outbuf[3] = 0; 988 return 4; 989 } 990 991 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 992 uint8_t *outbuf) 993 { 994 int size; 995 uint8_t *buf = r->req.cmd.buf; 996 uint8_t notification_class_request = buf[4]; 997 if (s->qdev.type != TYPE_ROM) { 998 return -1; 999 } 1000 if ((buf[1] & 1) == 0) { 1001 /* asynchronous */ 1002 return -1; 1003 } 1004 1005 size = 4; 1006 outbuf[0] = outbuf[1] = 0; 1007 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1008 if (notification_class_request & (1 << GESN_MEDIA)) { 1009 outbuf[2] = GESN_MEDIA; 1010 size += scsi_event_status_media(s, &outbuf[size]); 1011 } else { 1012 outbuf[2] = 0x80; 1013 } 1014 stw_be_p(outbuf, size - 4); 1015 return size; 1016 } 1017 1018 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1019 { 1020 int current; 1021 1022 if (s->qdev.type != TYPE_ROM) { 1023 return -1; 1024 } 1025 1026 if (media_is_dvd(s)) { 1027 current = MMC_PROFILE_DVD_ROM; 1028 } else if (media_is_cd(s)) { 1029 current = MMC_PROFILE_CD_ROM; 1030 } else { 1031 current = MMC_PROFILE_NONE; 1032 } 1033 1034 memset(outbuf, 0, 40); 1035 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1036 stw_be_p(&outbuf[6], current); 1037 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1038 outbuf[10] = 0x03; /* persistent, current */ 1039 outbuf[11] = 8; /* two profiles */ 1040 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1041 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1042 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1043 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1044 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1045 stw_be_p(&outbuf[20], 1); 1046 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1047 outbuf[23] = 8; 1048 stl_be_p(&outbuf[24], 1); /* SCSI */ 1049 outbuf[28] = 1; /* DBE = 1, mandatory */ 1050 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1051 stw_be_p(&outbuf[32], 3); 1052 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1053 outbuf[35] = 4; 1054 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1055 /* TODO: Random readable, CD read, DVD read, drive serial number, 1056 power management */ 1057 return 40; 1058 } 1059 1060 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1061 { 1062 if (s->qdev.type != TYPE_ROM) { 1063 return -1; 1064 } 1065 memset(outbuf, 0, 8); 1066 outbuf[5] = 1; /* CD-ROM */ 1067 return 8; 1068 } 1069 1070 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1071 int page_control) 1072 { 1073 static const int mode_sense_valid[0x3f] = { 1074 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1075 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1076 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1077 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1078 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1079 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1080 }; 1081 1082 uint8_t *p = *p_outbuf + 2; 1083 int length; 1084 1085 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1086 return -1; 1087 } 1088 1089 /* 1090 * If Changeable Values are requested, a mask denoting those mode parameters 1091 * that are changeable shall be returned. As we currently don't support 1092 * parameter changes via MODE_SELECT all bits are returned set to zero. 1093 * The buffer was already menset to zero by the caller of this function. 1094 * 1095 * The offsets here are off by two compared to the descriptions in the 1096 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1097 * but it is done so that offsets are consistent within our implementation 1098 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1099 * 2-byte and 4-byte headers. 1100 */ 1101 switch (page) { 1102 case MODE_PAGE_HD_GEOMETRY: 1103 length = 0x16; 1104 if (page_control == 1) { /* Changeable Values */ 1105 break; 1106 } 1107 /* if a geometry hint is available, use it */ 1108 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1109 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1110 p[2] = s->qdev.conf.cyls & 0xff; 1111 p[3] = s->qdev.conf.heads & 0xff; 1112 /* Write precomp start cylinder, disabled */ 1113 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1114 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1115 p[6] = s->qdev.conf.cyls & 0xff; 1116 /* Reduced current start cylinder, disabled */ 1117 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1118 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1119 p[9] = s->qdev.conf.cyls & 0xff; 1120 /* Device step rate [ns], 200ns */ 1121 p[10] = 0; 1122 p[11] = 200; 1123 /* Landing zone cylinder */ 1124 p[12] = 0xff; 1125 p[13] = 0xff; 1126 p[14] = 0xff; 1127 /* Medium rotation rate [rpm], 5400 rpm */ 1128 p[18] = (5400 >> 8) & 0xff; 1129 p[19] = 5400 & 0xff; 1130 break; 1131 1132 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1133 length = 0x1e; 1134 if (page_control == 1) { /* Changeable Values */ 1135 break; 1136 } 1137 /* Transfer rate [kbit/s], 5Mbit/s */ 1138 p[0] = 5000 >> 8; 1139 p[1] = 5000 & 0xff; 1140 /* if a geometry hint is available, use it */ 1141 p[2] = s->qdev.conf.heads & 0xff; 1142 p[3] = s->qdev.conf.secs & 0xff; 1143 p[4] = s->qdev.blocksize >> 8; 1144 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1145 p[7] = s->qdev.conf.cyls & 0xff; 1146 /* Write precomp start cylinder, disabled */ 1147 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1148 p[9] = s->qdev.conf.cyls & 0xff; 1149 /* Reduced current start cylinder, disabled */ 1150 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1151 p[11] = s->qdev.conf.cyls & 0xff; 1152 /* Device step rate [100us], 100us */ 1153 p[12] = 0; 1154 p[13] = 1; 1155 /* Device step pulse width [us], 1us */ 1156 p[14] = 1; 1157 /* Device head settle delay [100us], 100us */ 1158 p[15] = 0; 1159 p[16] = 1; 1160 /* Motor on delay [0.1s], 0.1s */ 1161 p[17] = 1; 1162 /* Motor off delay [0.1s], 0.1s */ 1163 p[18] = 1; 1164 /* Medium rotation rate [rpm], 5400 rpm */ 1165 p[26] = (5400 >> 8) & 0xff; 1166 p[27] = 5400 & 0xff; 1167 break; 1168 1169 case MODE_PAGE_CACHING: 1170 length = 0x12; 1171 if (page_control == 1 || /* Changeable Values */ 1172 blk_enable_write_cache(s->qdev.conf.blk)) { 1173 p[0] = 4; /* WCE */ 1174 } 1175 break; 1176 1177 case MODE_PAGE_R_W_ERROR: 1178 length = 10; 1179 if (page_control == 1) { /* Changeable Values */ 1180 break; 1181 } 1182 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1183 if (s->qdev.type == TYPE_ROM) { 1184 p[1] = 0x20; /* Read Retry Count */ 1185 } 1186 break; 1187 1188 case MODE_PAGE_AUDIO_CTL: 1189 length = 14; 1190 break; 1191 1192 case MODE_PAGE_CAPABILITIES: 1193 length = 0x14; 1194 if (page_control == 1) { /* Changeable Values */ 1195 break; 1196 } 1197 1198 p[0] = 0x3b; /* CD-R & CD-RW read */ 1199 p[1] = 0; /* Writing not supported */ 1200 p[2] = 0x7f; /* Audio, composite, digital out, 1201 mode 2 form 1&2, multi session */ 1202 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1203 RW corrected, C2 errors, ISRC, 1204 UPC, Bar code */ 1205 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1206 /* Locking supported, jumper present, eject, tray */ 1207 p[5] = 0; /* no volume & mute control, no 1208 changer */ 1209 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1210 p[7] = (50 * 176) & 0xff; 1211 p[8] = 2 >> 8; /* Two volume levels */ 1212 p[9] = 2 & 0xff; 1213 p[10] = 2048 >> 8; /* 2M buffer */ 1214 p[11] = 2048 & 0xff; 1215 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1216 p[13] = (16 * 176) & 0xff; 1217 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1218 p[17] = (16 * 176) & 0xff; 1219 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1220 p[19] = (16 * 176) & 0xff; 1221 break; 1222 1223 default: 1224 return -1; 1225 } 1226 1227 assert(length < 256); 1228 (*p_outbuf)[0] = page; 1229 (*p_outbuf)[1] = length; 1230 *p_outbuf += length + 2; 1231 return length + 2; 1232 } 1233 1234 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1235 { 1236 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1237 uint64_t nb_sectors; 1238 bool dbd; 1239 int page, buflen, ret, page_control; 1240 uint8_t *p; 1241 uint8_t dev_specific_param; 1242 1243 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1244 page = r->req.cmd.buf[2] & 0x3f; 1245 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1246 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n", 1247 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control); 1248 memset(outbuf, 0, r->req.cmd.xfer); 1249 p = outbuf; 1250 1251 if (s->qdev.type == TYPE_DISK) { 1252 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1253 if (blk_is_read_only(s->qdev.conf.blk)) { 1254 dev_specific_param |= 0x80; /* Readonly. */ 1255 } 1256 } else { 1257 /* MMC prescribes that CD/DVD drives have no block descriptors, 1258 * and defines no device-specific parameter. */ 1259 dev_specific_param = 0x00; 1260 dbd = true; 1261 } 1262 1263 if (r->req.cmd.buf[0] == MODE_SENSE) { 1264 p[1] = 0; /* Default media type. */ 1265 p[2] = dev_specific_param; 1266 p[3] = 0; /* Block descriptor length. */ 1267 p += 4; 1268 } else { /* MODE_SENSE_10 */ 1269 p[2] = 0; /* Default media type. */ 1270 p[3] = dev_specific_param; 1271 p[6] = p[7] = 0; /* Block descriptor length. */ 1272 p += 8; 1273 } 1274 1275 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1276 if (!dbd && nb_sectors) { 1277 if (r->req.cmd.buf[0] == MODE_SENSE) { 1278 outbuf[3] = 8; /* Block descriptor length */ 1279 } else { /* MODE_SENSE_10 */ 1280 outbuf[7] = 8; /* Block descriptor length */ 1281 } 1282 nb_sectors /= (s->qdev.blocksize / 512); 1283 if (nb_sectors > 0xffffff) { 1284 nb_sectors = 0; 1285 } 1286 p[0] = 0; /* media density code */ 1287 p[1] = (nb_sectors >> 16) & 0xff; 1288 p[2] = (nb_sectors >> 8) & 0xff; 1289 p[3] = nb_sectors & 0xff; 1290 p[4] = 0; /* reserved */ 1291 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1292 p[6] = s->qdev.blocksize >> 8; 1293 p[7] = 0; 1294 p += 8; 1295 } 1296 1297 if (page_control == 3) { 1298 /* Saved Values */ 1299 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1300 return -1; 1301 } 1302 1303 if (page == 0x3f) { 1304 for (page = 0; page <= 0x3e; page++) { 1305 mode_sense_page(s, page, &p, page_control); 1306 } 1307 } else { 1308 ret = mode_sense_page(s, page, &p, page_control); 1309 if (ret == -1) { 1310 return -1; 1311 } 1312 } 1313 1314 buflen = p - outbuf; 1315 /* 1316 * The mode data length field specifies the length in bytes of the 1317 * following data that is available to be transferred. The mode data 1318 * length does not include itself. 1319 */ 1320 if (r->req.cmd.buf[0] == MODE_SENSE) { 1321 outbuf[0] = buflen - 1; 1322 } else { /* MODE_SENSE_10 */ 1323 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1324 outbuf[1] = (buflen - 2) & 0xff; 1325 } 1326 return buflen; 1327 } 1328 1329 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1330 { 1331 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1332 int start_track, format, msf, toclen; 1333 uint64_t nb_sectors; 1334 1335 msf = req->cmd.buf[1] & 2; 1336 format = req->cmd.buf[2] & 0xf; 1337 start_track = req->cmd.buf[6]; 1338 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1339 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1); 1340 nb_sectors /= s->qdev.blocksize / 512; 1341 switch (format) { 1342 case 0: 1343 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1344 break; 1345 case 1: 1346 /* multi session : only a single session defined */ 1347 toclen = 12; 1348 memset(outbuf, 0, 12); 1349 outbuf[1] = 0x0a; 1350 outbuf[2] = 0x01; 1351 outbuf[3] = 0x01; 1352 break; 1353 case 2: 1354 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1355 break; 1356 default: 1357 return -1; 1358 } 1359 return toclen; 1360 } 1361 1362 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1363 { 1364 SCSIRequest *req = &r->req; 1365 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1366 bool start = req->cmd.buf[4] & 1; 1367 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1368 int pwrcnd = req->cmd.buf[4] & 0xf0; 1369 1370 if (pwrcnd) { 1371 /* eject/load only happens for power condition == 0 */ 1372 return 0; 1373 } 1374 1375 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1376 if (!start && !s->tray_open && s->tray_locked) { 1377 scsi_check_condition(r, 1378 blk_is_inserted(s->qdev.conf.blk) 1379 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1380 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1381 return -1; 1382 } 1383 1384 if (s->tray_open != !start) { 1385 blk_eject(s->qdev.conf.blk, !start); 1386 s->tray_open = !start; 1387 } 1388 } 1389 return 0; 1390 } 1391 1392 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1393 { 1394 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1395 int buflen = r->iov.iov_len; 1396 1397 if (buflen) { 1398 DPRINTF("Read buf_len=%d\n", buflen); 1399 r->iov.iov_len = 0; 1400 r->started = true; 1401 scsi_req_data(&r->req, buflen); 1402 return; 1403 } 1404 1405 /* This also clears the sense buffer for REQUEST SENSE. */ 1406 scsi_req_complete(&r->req, GOOD); 1407 } 1408 1409 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1410 uint8_t *inbuf, int inlen) 1411 { 1412 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1413 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1414 uint8_t *p; 1415 int len, expected_len, changeable_len, i; 1416 1417 /* The input buffer does not include the page header, so it is 1418 * off by 2 bytes. 1419 */ 1420 expected_len = inlen + 2; 1421 if (expected_len > SCSI_MAX_MODE_LEN) { 1422 return -1; 1423 } 1424 1425 p = mode_current; 1426 memset(mode_current, 0, inlen + 2); 1427 len = mode_sense_page(s, page, &p, 0); 1428 if (len < 0 || len != expected_len) { 1429 return -1; 1430 } 1431 1432 p = mode_changeable; 1433 memset(mode_changeable, 0, inlen + 2); 1434 changeable_len = mode_sense_page(s, page, &p, 1); 1435 assert(changeable_len == len); 1436 1437 /* Check that unchangeable bits are the same as what MODE SENSE 1438 * would return. 1439 */ 1440 for (i = 2; i < len; i++) { 1441 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1442 return -1; 1443 } 1444 } 1445 return 0; 1446 } 1447 1448 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1449 { 1450 switch (page) { 1451 case MODE_PAGE_CACHING: 1452 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1453 break; 1454 1455 default: 1456 break; 1457 } 1458 } 1459 1460 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1461 { 1462 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1463 1464 while (len > 0) { 1465 int page, subpage, page_len; 1466 1467 /* Parse both possible formats for the mode page headers. */ 1468 page = p[0] & 0x3f; 1469 if (p[0] & 0x40) { 1470 if (len < 4) { 1471 goto invalid_param_len; 1472 } 1473 subpage = p[1]; 1474 page_len = lduw_be_p(&p[2]); 1475 p += 4; 1476 len -= 4; 1477 } else { 1478 if (len < 2) { 1479 goto invalid_param_len; 1480 } 1481 subpage = 0; 1482 page_len = p[1]; 1483 p += 2; 1484 len -= 2; 1485 } 1486 1487 if (subpage) { 1488 goto invalid_param; 1489 } 1490 if (page_len > len) { 1491 goto invalid_param_len; 1492 } 1493 1494 if (!change) { 1495 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1496 goto invalid_param; 1497 } 1498 } else { 1499 scsi_disk_apply_mode_select(s, page, p); 1500 } 1501 1502 p += page_len; 1503 len -= page_len; 1504 } 1505 return 0; 1506 1507 invalid_param: 1508 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1509 return -1; 1510 1511 invalid_param_len: 1512 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1513 return -1; 1514 } 1515 1516 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1517 { 1518 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1519 uint8_t *p = inbuf; 1520 int cmd = r->req.cmd.buf[0]; 1521 int len = r->req.cmd.xfer; 1522 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1523 int bd_len; 1524 int pass; 1525 1526 /* We only support PF=1, SP=0. */ 1527 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1528 goto invalid_field; 1529 } 1530 1531 if (len < hdr_len) { 1532 goto invalid_param_len; 1533 } 1534 1535 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1536 len -= hdr_len; 1537 p += hdr_len; 1538 if (len < bd_len) { 1539 goto invalid_param_len; 1540 } 1541 if (bd_len != 0 && bd_len != 8) { 1542 goto invalid_param; 1543 } 1544 1545 len -= bd_len; 1546 p += bd_len; 1547 1548 /* Ensure no change is made if there is an error! */ 1549 for (pass = 0; pass < 2; pass++) { 1550 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1551 assert(pass == 0); 1552 return; 1553 } 1554 } 1555 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1556 /* The request is used as the AIO opaque value, so add a ref. */ 1557 scsi_req_ref(&r->req); 1558 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1559 BLOCK_ACCT_FLUSH); 1560 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1561 return; 1562 } 1563 1564 scsi_req_complete(&r->req, GOOD); 1565 return; 1566 1567 invalid_param: 1568 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1569 return; 1570 1571 invalid_param_len: 1572 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1573 return; 1574 1575 invalid_field: 1576 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1577 } 1578 1579 static inline bool check_lba_range(SCSIDiskState *s, 1580 uint64_t sector_num, uint32_t nb_sectors) 1581 { 1582 /* 1583 * The first line tests that no overflow happens when computing the last 1584 * sector. The second line tests that the last accessed sector is in 1585 * range. 1586 * 1587 * Careful, the computations should not underflow for nb_sectors == 0, 1588 * and a 0-block read to the first LBA beyond the end of device is 1589 * valid. 1590 */ 1591 return (sector_num <= sector_num + nb_sectors && 1592 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1593 } 1594 1595 typedef struct UnmapCBData { 1596 SCSIDiskReq *r; 1597 uint8_t *inbuf; 1598 int count; 1599 } UnmapCBData; 1600 1601 static void scsi_unmap_complete(void *opaque, int ret); 1602 1603 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1604 { 1605 SCSIDiskReq *r = data->r; 1606 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1607 uint64_t sector_num; 1608 uint32_t nb_sectors; 1609 1610 assert(r->req.aiocb == NULL); 1611 if (scsi_disk_req_check_error(r, ret, false)) { 1612 goto done; 1613 } 1614 1615 if (data->count > 0) { 1616 sector_num = ldq_be_p(&data->inbuf[0]); 1617 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1618 if (!check_lba_range(s, sector_num, nb_sectors)) { 1619 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1620 goto done; 1621 } 1622 1623 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1624 sector_num * s->qdev.blocksize, 1625 nb_sectors * s->qdev.blocksize, 1626 scsi_unmap_complete, data); 1627 data->count--; 1628 data->inbuf += 16; 1629 return; 1630 } 1631 1632 scsi_req_complete(&r->req, GOOD); 1633 1634 done: 1635 scsi_req_unref(&r->req); 1636 g_free(data); 1637 } 1638 1639 static void scsi_unmap_complete(void *opaque, int ret) 1640 { 1641 UnmapCBData *data = opaque; 1642 SCSIDiskReq *r = data->r; 1643 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1644 1645 assert(r->req.aiocb != NULL); 1646 r->req.aiocb = NULL; 1647 1648 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1649 scsi_unmap_complete_noio(data, ret); 1650 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1651 } 1652 1653 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1654 { 1655 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1656 uint8_t *p = inbuf; 1657 int len = r->req.cmd.xfer; 1658 UnmapCBData *data; 1659 1660 /* Reject ANCHOR=1. */ 1661 if (r->req.cmd.buf[1] & 0x1) { 1662 goto invalid_field; 1663 } 1664 1665 if (len < 8) { 1666 goto invalid_param_len; 1667 } 1668 if (len < lduw_be_p(&p[0]) + 2) { 1669 goto invalid_param_len; 1670 } 1671 if (len < lduw_be_p(&p[2]) + 8) { 1672 goto invalid_param_len; 1673 } 1674 if (lduw_be_p(&p[2]) & 15) { 1675 goto invalid_param_len; 1676 } 1677 1678 if (blk_is_read_only(s->qdev.conf.blk)) { 1679 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1680 return; 1681 } 1682 1683 data = g_new0(UnmapCBData, 1); 1684 data->r = r; 1685 data->inbuf = &p[8]; 1686 data->count = lduw_be_p(&p[2]) >> 4; 1687 1688 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1689 scsi_req_ref(&r->req); 1690 scsi_unmap_complete_noio(data, 0); 1691 return; 1692 1693 invalid_param_len: 1694 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1695 return; 1696 1697 invalid_field: 1698 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1699 } 1700 1701 typedef struct WriteSameCBData { 1702 SCSIDiskReq *r; 1703 int64_t sector; 1704 int nb_sectors; 1705 QEMUIOVector qiov; 1706 struct iovec iov; 1707 } WriteSameCBData; 1708 1709 static void scsi_write_same_complete(void *opaque, int ret) 1710 { 1711 WriteSameCBData *data = opaque; 1712 SCSIDiskReq *r = data->r; 1713 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1714 1715 assert(r->req.aiocb != NULL); 1716 r->req.aiocb = NULL; 1717 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1718 if (scsi_disk_req_check_error(r, ret, true)) { 1719 goto done; 1720 } 1721 1722 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1723 1724 data->nb_sectors -= data->iov.iov_len / 512; 1725 data->sector += data->iov.iov_len / 512; 1726 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1727 if (data->iov.iov_len) { 1728 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1729 data->iov.iov_len, BLOCK_ACCT_WRITE); 1730 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1731 * where final qiov may need smaller size */ 1732 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1733 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1734 data->sector << BDRV_SECTOR_BITS, 1735 &data->qiov, 0, 1736 scsi_write_same_complete, data); 1737 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1738 return; 1739 } 1740 1741 scsi_req_complete(&r->req, GOOD); 1742 1743 done: 1744 scsi_req_unref(&r->req); 1745 qemu_vfree(data->iov.iov_base); 1746 g_free(data); 1747 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1748 } 1749 1750 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1751 { 1752 SCSIRequest *req = &r->req; 1753 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1754 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1755 WriteSameCBData *data; 1756 uint8_t *buf; 1757 int i; 1758 1759 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1760 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1761 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1762 return; 1763 } 1764 1765 if (blk_is_read_only(s->qdev.conf.blk)) { 1766 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1767 return; 1768 } 1769 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1770 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1771 return; 1772 } 1773 1774 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1775 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1776 1777 /* The request is used as the AIO opaque value, so add a ref. */ 1778 scsi_req_ref(&r->req); 1779 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1780 nb_sectors * s->qdev.blocksize, 1781 BLOCK_ACCT_WRITE); 1782 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1783 r->req.cmd.lba * s->qdev.blocksize, 1784 nb_sectors * s->qdev.blocksize, 1785 flags, scsi_aio_complete, r); 1786 return; 1787 } 1788 1789 data = g_new0(WriteSameCBData, 1); 1790 data->r = r; 1791 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1792 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1793 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1794 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1795 data->iov.iov_len); 1796 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1797 1798 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1799 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1800 } 1801 1802 scsi_req_ref(&r->req); 1803 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1804 data->iov.iov_len, BLOCK_ACCT_WRITE); 1805 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1806 data->sector << BDRV_SECTOR_BITS, 1807 &data->qiov, 0, 1808 scsi_write_same_complete, data); 1809 } 1810 1811 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1812 { 1813 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1814 1815 if (r->iov.iov_len) { 1816 int buflen = r->iov.iov_len; 1817 DPRINTF("Write buf_len=%d\n", buflen); 1818 r->iov.iov_len = 0; 1819 scsi_req_data(&r->req, buflen); 1820 return; 1821 } 1822 1823 switch (req->cmd.buf[0]) { 1824 case MODE_SELECT: 1825 case MODE_SELECT_10: 1826 /* This also clears the sense buffer for REQUEST SENSE. */ 1827 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1828 break; 1829 1830 case UNMAP: 1831 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1832 break; 1833 1834 case VERIFY_10: 1835 case VERIFY_12: 1836 case VERIFY_16: 1837 if (r->req.status == -1) { 1838 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1839 } 1840 break; 1841 1842 case WRITE_SAME_10: 1843 case WRITE_SAME_16: 1844 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1845 break; 1846 1847 default: 1848 abort(); 1849 } 1850 } 1851 1852 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1853 { 1854 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1855 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1856 uint64_t nb_sectors; 1857 uint8_t *outbuf; 1858 int buflen; 1859 1860 switch (req->cmd.buf[0]) { 1861 case INQUIRY: 1862 case MODE_SENSE: 1863 case MODE_SENSE_10: 1864 case RESERVE: 1865 case RESERVE_10: 1866 case RELEASE: 1867 case RELEASE_10: 1868 case START_STOP: 1869 case ALLOW_MEDIUM_REMOVAL: 1870 case GET_CONFIGURATION: 1871 case GET_EVENT_STATUS_NOTIFICATION: 1872 case MECHANISM_STATUS: 1873 case REQUEST_SENSE: 1874 break; 1875 1876 default: 1877 if (!blk_is_available(s->qdev.conf.blk)) { 1878 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1879 return 0; 1880 } 1881 break; 1882 } 1883 1884 /* 1885 * FIXME: we shouldn't return anything bigger than 4k, but the code 1886 * requires the buffer to be as big as req->cmd.xfer in several 1887 * places. So, do not allow CDBs with a very large ALLOCATION 1888 * LENGTH. The real fix would be to modify scsi_read_data and 1889 * dma_buf_read, so that they return data beyond the buflen 1890 * as all zeros. 1891 */ 1892 if (req->cmd.xfer > 65536) { 1893 goto illegal_request; 1894 } 1895 r->buflen = MAX(4096, req->cmd.xfer); 1896 1897 if (!r->iov.iov_base) { 1898 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1899 } 1900 1901 buflen = req->cmd.xfer; 1902 outbuf = r->iov.iov_base; 1903 memset(outbuf, 0, r->buflen); 1904 switch (req->cmd.buf[0]) { 1905 case TEST_UNIT_READY: 1906 assert(blk_is_available(s->qdev.conf.blk)); 1907 break; 1908 case INQUIRY: 1909 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1910 if (buflen < 0) { 1911 goto illegal_request; 1912 } 1913 break; 1914 case MODE_SENSE: 1915 case MODE_SENSE_10: 1916 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1917 if (buflen < 0) { 1918 goto illegal_request; 1919 } 1920 break; 1921 case READ_TOC: 1922 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1923 if (buflen < 0) { 1924 goto illegal_request; 1925 } 1926 break; 1927 case RESERVE: 1928 if (req->cmd.buf[1] & 1) { 1929 goto illegal_request; 1930 } 1931 break; 1932 case RESERVE_10: 1933 if (req->cmd.buf[1] & 3) { 1934 goto illegal_request; 1935 } 1936 break; 1937 case RELEASE: 1938 if (req->cmd.buf[1] & 1) { 1939 goto illegal_request; 1940 } 1941 break; 1942 case RELEASE_10: 1943 if (req->cmd.buf[1] & 3) { 1944 goto illegal_request; 1945 } 1946 break; 1947 case START_STOP: 1948 if (scsi_disk_emulate_start_stop(r) < 0) { 1949 return 0; 1950 } 1951 break; 1952 case ALLOW_MEDIUM_REMOVAL: 1953 s->tray_locked = req->cmd.buf[4] & 1; 1954 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1955 break; 1956 case READ_CAPACITY_10: 1957 /* The normal LEN field for this command is zero. */ 1958 memset(outbuf, 0, 8); 1959 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1960 if (!nb_sectors) { 1961 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1962 return 0; 1963 } 1964 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1965 goto illegal_request; 1966 } 1967 nb_sectors /= s->qdev.blocksize / 512; 1968 /* Returned value is the address of the last sector. */ 1969 nb_sectors--; 1970 /* Remember the new size for read/write sanity checking. */ 1971 s->qdev.max_lba = nb_sectors; 1972 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 1973 if (nb_sectors > UINT32_MAX) { 1974 nb_sectors = UINT32_MAX; 1975 } 1976 outbuf[0] = (nb_sectors >> 24) & 0xff; 1977 outbuf[1] = (nb_sectors >> 16) & 0xff; 1978 outbuf[2] = (nb_sectors >> 8) & 0xff; 1979 outbuf[3] = nb_sectors & 0xff; 1980 outbuf[4] = 0; 1981 outbuf[5] = 0; 1982 outbuf[6] = s->qdev.blocksize >> 8; 1983 outbuf[7] = 0; 1984 break; 1985 case REQUEST_SENSE: 1986 /* Just return "NO SENSE". */ 1987 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 1988 (req->cmd.buf[1] & 1) == 0); 1989 if (buflen < 0) { 1990 goto illegal_request; 1991 } 1992 break; 1993 case MECHANISM_STATUS: 1994 buflen = scsi_emulate_mechanism_status(s, outbuf); 1995 if (buflen < 0) { 1996 goto illegal_request; 1997 } 1998 break; 1999 case GET_CONFIGURATION: 2000 buflen = scsi_get_configuration(s, outbuf); 2001 if (buflen < 0) { 2002 goto illegal_request; 2003 } 2004 break; 2005 case GET_EVENT_STATUS_NOTIFICATION: 2006 buflen = scsi_get_event_status_notification(s, r, outbuf); 2007 if (buflen < 0) { 2008 goto illegal_request; 2009 } 2010 break; 2011 case READ_DISC_INFORMATION: 2012 buflen = scsi_read_disc_information(s, r, outbuf); 2013 if (buflen < 0) { 2014 goto illegal_request; 2015 } 2016 break; 2017 case READ_DVD_STRUCTURE: 2018 buflen = scsi_read_dvd_structure(s, r, outbuf); 2019 if (buflen < 0) { 2020 goto illegal_request; 2021 } 2022 break; 2023 case SERVICE_ACTION_IN_16: 2024 /* Service Action In subcommands. */ 2025 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2026 DPRINTF("SAI READ CAPACITY(16)\n"); 2027 memset(outbuf, 0, req->cmd.xfer); 2028 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2029 if (!nb_sectors) { 2030 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2031 return 0; 2032 } 2033 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2034 goto illegal_request; 2035 } 2036 nb_sectors /= s->qdev.blocksize / 512; 2037 /* Returned value is the address of the last sector. */ 2038 nb_sectors--; 2039 /* Remember the new size for read/write sanity checking. */ 2040 s->qdev.max_lba = nb_sectors; 2041 outbuf[0] = (nb_sectors >> 56) & 0xff; 2042 outbuf[1] = (nb_sectors >> 48) & 0xff; 2043 outbuf[2] = (nb_sectors >> 40) & 0xff; 2044 outbuf[3] = (nb_sectors >> 32) & 0xff; 2045 outbuf[4] = (nb_sectors >> 24) & 0xff; 2046 outbuf[5] = (nb_sectors >> 16) & 0xff; 2047 outbuf[6] = (nb_sectors >> 8) & 0xff; 2048 outbuf[7] = nb_sectors & 0xff; 2049 outbuf[8] = 0; 2050 outbuf[9] = 0; 2051 outbuf[10] = s->qdev.blocksize >> 8; 2052 outbuf[11] = 0; 2053 outbuf[12] = 0; 2054 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2055 2056 /* set TPE bit if the format supports discard */ 2057 if (s->qdev.conf.discard_granularity) { 2058 outbuf[14] = 0x80; 2059 } 2060 2061 /* Protection, exponent and lowest lba field left blank. */ 2062 break; 2063 } 2064 DPRINTF("Unsupported Service Action In\n"); 2065 goto illegal_request; 2066 case SYNCHRONIZE_CACHE: 2067 /* The request is used as the AIO opaque value, so add a ref. */ 2068 scsi_req_ref(&r->req); 2069 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2070 BLOCK_ACCT_FLUSH); 2071 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2072 return 0; 2073 case SEEK_10: 2074 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba); 2075 if (r->req.cmd.lba > s->qdev.max_lba) { 2076 goto illegal_lba; 2077 } 2078 break; 2079 case MODE_SELECT: 2080 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2081 break; 2082 case MODE_SELECT_10: 2083 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2084 break; 2085 case UNMAP: 2086 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2087 break; 2088 case VERIFY_10: 2089 case VERIFY_12: 2090 case VERIFY_16: 2091 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3); 2092 if (req->cmd.buf[1] & 6) { 2093 goto illegal_request; 2094 } 2095 break; 2096 case WRITE_SAME_10: 2097 case WRITE_SAME_16: 2098 DPRINTF("WRITE SAME %d (len %lu)\n", 2099 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, 2100 (unsigned long)r->req.cmd.xfer); 2101 break; 2102 default: 2103 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0], 2104 scsi_command_name(buf[0])); 2105 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2106 return 0; 2107 } 2108 assert(!r->req.aiocb); 2109 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2110 if (r->iov.iov_len == 0) { 2111 scsi_req_complete(&r->req, GOOD); 2112 } 2113 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2114 assert(r->iov.iov_len == req->cmd.xfer); 2115 return -r->iov.iov_len; 2116 } else { 2117 return r->iov.iov_len; 2118 } 2119 2120 illegal_request: 2121 if (r->req.status == -1) { 2122 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2123 } 2124 return 0; 2125 2126 illegal_lba: 2127 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2128 return 0; 2129 } 2130 2131 /* Execute a scsi command. Returns the length of the data expected by the 2132 command. This will be Positive for data transfers from the device 2133 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2134 and zero if the command does not transfer any data. */ 2135 2136 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2137 { 2138 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2139 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2140 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2141 uint32_t len; 2142 uint8_t command; 2143 2144 command = buf[0]; 2145 2146 if (!blk_is_available(s->qdev.conf.blk)) { 2147 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2148 return 0; 2149 } 2150 2151 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2152 switch (command) { 2153 case READ_6: 2154 case READ_10: 2155 case READ_12: 2156 case READ_16: 2157 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len); 2158 /* Protection information is not supported. For SCSI versions 2 and 2159 * older (as determined by snooping the guest's INQUIRY commands), 2160 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2161 */ 2162 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2163 goto illegal_request; 2164 } 2165 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2166 goto illegal_lba; 2167 } 2168 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2169 r->sector_count = len * (s->qdev.blocksize / 512); 2170 break; 2171 case WRITE_6: 2172 case WRITE_10: 2173 case WRITE_12: 2174 case WRITE_16: 2175 case WRITE_VERIFY_10: 2176 case WRITE_VERIFY_12: 2177 case WRITE_VERIFY_16: 2178 if (blk_is_read_only(s->qdev.conf.blk)) { 2179 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2180 return 0; 2181 } 2182 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n", 2183 (command & 0xe) == 0xe ? "And Verify " : "", 2184 r->req.cmd.lba, len); 2185 /* fall through */ 2186 case VERIFY_10: 2187 case VERIFY_12: 2188 case VERIFY_16: 2189 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2190 * As far as DMA is concerned, we can treat it the same as a write; 2191 * scsi_block_do_sgio will send VERIFY commands. 2192 */ 2193 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2194 goto illegal_request; 2195 } 2196 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2197 goto illegal_lba; 2198 } 2199 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2200 r->sector_count = len * (s->qdev.blocksize / 512); 2201 break; 2202 default: 2203 abort(); 2204 illegal_request: 2205 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2206 return 0; 2207 illegal_lba: 2208 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2209 return 0; 2210 } 2211 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2212 if (r->sector_count == 0) { 2213 scsi_req_complete(&r->req, GOOD); 2214 } 2215 assert(r->iov.iov_len == 0); 2216 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2217 return -r->sector_count * 512; 2218 } else { 2219 return r->sector_count * 512; 2220 } 2221 } 2222 2223 static void scsi_disk_reset(DeviceState *dev) 2224 { 2225 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2226 uint64_t nb_sectors; 2227 2228 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2229 2230 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2231 nb_sectors /= s->qdev.blocksize / 512; 2232 if (nb_sectors) { 2233 nb_sectors--; 2234 } 2235 s->qdev.max_lba = nb_sectors; 2236 /* reset tray statuses */ 2237 s->tray_locked = 0; 2238 s->tray_open = 0; 2239 2240 s->qdev.scsi_version = s->qdev.default_scsi_version; 2241 } 2242 2243 static void scsi_disk_resize_cb(void *opaque) 2244 { 2245 SCSIDiskState *s = opaque; 2246 2247 /* SPC lists this sense code as available only for 2248 * direct-access devices. 2249 */ 2250 if (s->qdev.type == TYPE_DISK) { 2251 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2252 } 2253 } 2254 2255 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2256 { 2257 SCSIDiskState *s = opaque; 2258 2259 /* 2260 * When a CD gets changed, we have to report an ejected state and 2261 * then a loaded state to guests so that they detect tray 2262 * open/close and media change events. Guests that do not use 2263 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2264 * states rely on this behavior. 2265 * 2266 * media_changed governs the state machine used for unit attention 2267 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2268 */ 2269 s->media_changed = load; 2270 s->tray_open = !load; 2271 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2272 s->media_event = true; 2273 s->eject_request = false; 2274 } 2275 2276 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2277 { 2278 SCSIDiskState *s = opaque; 2279 2280 s->eject_request = true; 2281 if (force) { 2282 s->tray_locked = false; 2283 } 2284 } 2285 2286 static bool scsi_cd_is_tray_open(void *opaque) 2287 { 2288 return ((SCSIDiskState *)opaque)->tray_open; 2289 } 2290 2291 static bool scsi_cd_is_medium_locked(void *opaque) 2292 { 2293 return ((SCSIDiskState *)opaque)->tray_locked; 2294 } 2295 2296 static const BlockDevOps scsi_disk_removable_block_ops = { 2297 .change_media_cb = scsi_cd_change_media_cb, 2298 .eject_request_cb = scsi_cd_eject_request_cb, 2299 .is_tray_open = scsi_cd_is_tray_open, 2300 .is_medium_locked = scsi_cd_is_medium_locked, 2301 2302 .resize_cb = scsi_disk_resize_cb, 2303 }; 2304 2305 static const BlockDevOps scsi_disk_block_ops = { 2306 .resize_cb = scsi_disk_resize_cb, 2307 }; 2308 2309 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2310 { 2311 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2312 if (s->media_changed) { 2313 s->media_changed = false; 2314 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2315 } 2316 } 2317 2318 static void scsi_realize(SCSIDevice *dev, Error **errp) 2319 { 2320 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2321 2322 if (!s->qdev.conf.blk) { 2323 error_setg(errp, "drive property not set"); 2324 return; 2325 } 2326 2327 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2328 !blk_is_inserted(s->qdev.conf.blk)) { 2329 error_setg(errp, "Device needs media, but drive is empty"); 2330 return; 2331 } 2332 2333 blkconf_blocksizes(&s->qdev.conf); 2334 2335 if (s->qdev.conf.logical_block_size > 2336 s->qdev.conf.physical_block_size) { 2337 error_setg(errp, 2338 "logical_block_size > physical_block_size not supported"); 2339 return; 2340 } 2341 2342 if (dev->type == TYPE_DISK) { 2343 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2344 return; 2345 } 2346 } 2347 if (!blkconf_apply_backend_options(&dev->conf, 2348 blk_is_read_only(s->qdev.conf.blk), 2349 dev->type == TYPE_DISK, errp)) { 2350 return; 2351 } 2352 2353 if (s->qdev.conf.discard_granularity == -1) { 2354 s->qdev.conf.discard_granularity = 2355 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2356 } 2357 2358 if (!s->version) { 2359 s->version = g_strdup(qemu_hw_version()); 2360 } 2361 if (!s->vendor) { 2362 s->vendor = g_strdup("QEMU"); 2363 } 2364 2365 if (blk_is_sg(s->qdev.conf.blk)) { 2366 error_setg(errp, "unwanted /dev/sg*"); 2367 return; 2368 } 2369 2370 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2371 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2372 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2373 } else { 2374 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2375 } 2376 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2377 2378 blk_iostatus_enable(s->qdev.conf.blk); 2379 } 2380 2381 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2382 { 2383 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2384 /* can happen for devices without drive. The error message for missing 2385 * backend will be issued in scsi_realize 2386 */ 2387 if (s->qdev.conf.blk) { 2388 blkconf_blocksizes(&s->qdev.conf); 2389 } 2390 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2391 s->qdev.type = TYPE_DISK; 2392 if (!s->product) { 2393 s->product = g_strdup("QEMU HARDDISK"); 2394 } 2395 scsi_realize(&s->qdev, errp); 2396 } 2397 2398 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2399 { 2400 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2401 int ret; 2402 2403 if (!dev->conf.blk) { 2404 /* Anonymous BlockBackend for an empty drive. As we put it into 2405 * dev->conf, qdev takes care of detaching on unplug. */ 2406 dev->conf.blk = blk_new(0, BLK_PERM_ALL); 2407 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2408 assert(ret == 0); 2409 } 2410 2411 s->qdev.blocksize = 2048; 2412 s->qdev.type = TYPE_ROM; 2413 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2414 if (!s->product) { 2415 s->product = g_strdup("QEMU CD-ROM"); 2416 } 2417 scsi_realize(&s->qdev, errp); 2418 } 2419 2420 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2421 { 2422 DriveInfo *dinfo; 2423 Error *local_err = NULL; 2424 2425 if (!dev->conf.blk) { 2426 scsi_realize(dev, &local_err); 2427 assert(local_err); 2428 error_propagate(errp, local_err); 2429 return; 2430 } 2431 2432 dinfo = blk_legacy_dinfo(dev->conf.blk); 2433 if (dinfo && dinfo->media_cd) { 2434 scsi_cd_realize(dev, errp); 2435 } else { 2436 scsi_hd_realize(dev, errp); 2437 } 2438 } 2439 2440 static const SCSIReqOps scsi_disk_emulate_reqops = { 2441 .size = sizeof(SCSIDiskReq), 2442 .free_req = scsi_free_request, 2443 .send_command = scsi_disk_emulate_command, 2444 .read_data = scsi_disk_emulate_read_data, 2445 .write_data = scsi_disk_emulate_write_data, 2446 .get_buf = scsi_get_buf, 2447 }; 2448 2449 static const SCSIReqOps scsi_disk_dma_reqops = { 2450 .size = sizeof(SCSIDiskReq), 2451 .free_req = scsi_free_request, 2452 .send_command = scsi_disk_dma_command, 2453 .read_data = scsi_read_data, 2454 .write_data = scsi_write_data, 2455 .get_buf = scsi_get_buf, 2456 .load_request = scsi_disk_load_request, 2457 .save_request = scsi_disk_save_request, 2458 }; 2459 2460 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2461 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2462 [INQUIRY] = &scsi_disk_emulate_reqops, 2463 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2464 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2465 [START_STOP] = &scsi_disk_emulate_reqops, 2466 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2467 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2468 [READ_TOC] = &scsi_disk_emulate_reqops, 2469 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2470 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2471 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2472 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2473 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2474 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2475 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2476 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2477 [SEEK_10] = &scsi_disk_emulate_reqops, 2478 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2479 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2480 [UNMAP] = &scsi_disk_emulate_reqops, 2481 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2482 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2483 [VERIFY_10] = &scsi_disk_emulate_reqops, 2484 [VERIFY_12] = &scsi_disk_emulate_reqops, 2485 [VERIFY_16] = &scsi_disk_emulate_reqops, 2486 2487 [READ_6] = &scsi_disk_dma_reqops, 2488 [READ_10] = &scsi_disk_dma_reqops, 2489 [READ_12] = &scsi_disk_dma_reqops, 2490 [READ_16] = &scsi_disk_dma_reqops, 2491 [WRITE_6] = &scsi_disk_dma_reqops, 2492 [WRITE_10] = &scsi_disk_dma_reqops, 2493 [WRITE_12] = &scsi_disk_dma_reqops, 2494 [WRITE_16] = &scsi_disk_dma_reqops, 2495 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2496 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2497 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2498 }; 2499 2500 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2501 uint8_t *buf, void *hba_private) 2502 { 2503 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2504 SCSIRequest *req; 2505 const SCSIReqOps *ops; 2506 uint8_t command; 2507 2508 command = buf[0]; 2509 ops = scsi_disk_reqops_dispatch[command]; 2510 if (!ops) { 2511 ops = &scsi_disk_emulate_reqops; 2512 } 2513 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2514 2515 #ifdef DEBUG_SCSI 2516 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]); 2517 { 2518 int i; 2519 for (i = 1; i < scsi_cdb_length(buf); i++) { 2520 printf(" 0x%02x", buf[i]); 2521 } 2522 printf("\n"); 2523 } 2524 #endif 2525 2526 return req; 2527 } 2528 2529 #ifdef __linux__ 2530 static int get_device_type(SCSIDiskState *s) 2531 { 2532 uint8_t cmd[16]; 2533 uint8_t buf[36]; 2534 int ret; 2535 2536 memset(cmd, 0, sizeof(cmd)); 2537 memset(buf, 0, sizeof(buf)); 2538 cmd[0] = INQUIRY; 2539 cmd[4] = sizeof(buf); 2540 2541 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2542 buf, sizeof(buf)); 2543 if (ret < 0) { 2544 return -1; 2545 } 2546 s->qdev.type = buf[0]; 2547 if (buf[1] & 0x80) { 2548 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2549 } 2550 return 0; 2551 } 2552 2553 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2554 { 2555 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2556 int sg_version; 2557 int rc; 2558 2559 if (!s->qdev.conf.blk) { 2560 error_setg(errp, "drive property not set"); 2561 return; 2562 } 2563 2564 if (s->rotation_rate) { 2565 error_report_once("rotation_rate is specified for scsi-block but is " 2566 "not implemented. This option is deprecated and will " 2567 "be removed in a future version"); 2568 } 2569 2570 /* check we are using a driver managing SG_IO (version 3 and after) */ 2571 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2572 if (rc < 0) { 2573 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2574 if (rc != -EPERM) { 2575 error_append_hint(errp, "Is this a SCSI device?\n"); 2576 } 2577 return; 2578 } 2579 if (sg_version < 30000) { 2580 error_setg(errp, "scsi generic interface too old"); 2581 return; 2582 } 2583 2584 /* get device type from INQUIRY data */ 2585 rc = get_device_type(s); 2586 if (rc < 0) { 2587 error_setg(errp, "INQUIRY failed"); 2588 return; 2589 } 2590 2591 /* Make a guess for the block size, we'll fix it when the guest sends. 2592 * READ CAPACITY. If they don't, they likely would assume these sizes 2593 * anyway. (TODO: check in /sys). 2594 */ 2595 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2596 s->qdev.blocksize = 2048; 2597 } else { 2598 s->qdev.blocksize = 512; 2599 } 2600 2601 /* Makes the scsi-block device not removable by using HMP and QMP eject 2602 * command. 2603 */ 2604 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2605 2606 scsi_realize(&s->qdev, errp); 2607 scsi_generic_read_device_inquiry(&s->qdev); 2608 } 2609 2610 typedef struct SCSIBlockReq { 2611 SCSIDiskReq req; 2612 sg_io_hdr_t io_header; 2613 2614 /* Selected bytes of the original CDB, copied into our own CDB. */ 2615 uint8_t cmd, cdb1, group_number; 2616 2617 /* CDB passed to SG_IO. */ 2618 uint8_t cdb[16]; 2619 } SCSIBlockReq; 2620 2621 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2622 int64_t offset, QEMUIOVector *iov, 2623 int direction, 2624 BlockCompletionFunc *cb, void *opaque) 2625 { 2626 sg_io_hdr_t *io_header = &req->io_header; 2627 SCSIDiskReq *r = &req->req; 2628 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2629 int nb_logical_blocks; 2630 uint64_t lba; 2631 BlockAIOCB *aiocb; 2632 2633 /* This is not supported yet. It can only happen if the guest does 2634 * reads and writes that are not aligned to one logical sectors 2635 * _and_ cover multiple MemoryRegions. 2636 */ 2637 assert(offset % s->qdev.blocksize == 0); 2638 assert(iov->size % s->qdev.blocksize == 0); 2639 2640 io_header->interface_id = 'S'; 2641 2642 /* The data transfer comes from the QEMUIOVector. */ 2643 io_header->dxfer_direction = direction; 2644 io_header->dxfer_len = iov->size; 2645 io_header->dxferp = (void *)iov->iov; 2646 io_header->iovec_count = iov->niov; 2647 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2648 2649 /* Build a new CDB with the LBA and length patched in, in case 2650 * DMA helpers split the transfer in multiple segments. Do not 2651 * build a CDB smaller than what the guest wanted, and only build 2652 * a larger one if strictly necessary. 2653 */ 2654 io_header->cmdp = req->cdb; 2655 lba = offset / s->qdev.blocksize; 2656 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2657 2658 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2659 /* 6-byte CDB */ 2660 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2661 req->cdb[4] = nb_logical_blocks; 2662 req->cdb[5] = 0; 2663 io_header->cmd_len = 6; 2664 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2665 /* 10-byte CDB */ 2666 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2667 req->cdb[1] = req->cdb1; 2668 stl_be_p(&req->cdb[2], lba); 2669 req->cdb[6] = req->group_number; 2670 stw_be_p(&req->cdb[7], nb_logical_blocks); 2671 req->cdb[9] = 0; 2672 io_header->cmd_len = 10; 2673 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2674 /* 12-byte CDB */ 2675 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2676 req->cdb[1] = req->cdb1; 2677 stl_be_p(&req->cdb[2], lba); 2678 stl_be_p(&req->cdb[6], nb_logical_blocks); 2679 req->cdb[10] = req->group_number; 2680 req->cdb[11] = 0; 2681 io_header->cmd_len = 12; 2682 } else { 2683 /* 16-byte CDB */ 2684 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2685 req->cdb[1] = req->cdb1; 2686 stq_be_p(&req->cdb[2], lba); 2687 stl_be_p(&req->cdb[10], nb_logical_blocks); 2688 req->cdb[14] = req->group_number; 2689 req->cdb[15] = 0; 2690 io_header->cmd_len = 16; 2691 } 2692 2693 /* The rest is as in scsi-generic.c. */ 2694 io_header->mx_sb_len = sizeof(r->req.sense); 2695 io_header->sbp = r->req.sense; 2696 io_header->timeout = UINT_MAX; 2697 io_header->usr_ptr = r; 2698 io_header->flags |= SG_FLAG_DIRECT_IO; 2699 2700 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2701 assert(aiocb != NULL); 2702 return aiocb; 2703 } 2704 2705 static bool scsi_block_no_fua(SCSICommand *cmd) 2706 { 2707 return false; 2708 } 2709 2710 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2711 QEMUIOVector *iov, 2712 BlockCompletionFunc *cb, void *cb_opaque, 2713 void *opaque) 2714 { 2715 SCSIBlockReq *r = opaque; 2716 return scsi_block_do_sgio(r, offset, iov, 2717 SG_DXFER_FROM_DEV, cb, cb_opaque); 2718 } 2719 2720 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2721 QEMUIOVector *iov, 2722 BlockCompletionFunc *cb, void *cb_opaque, 2723 void *opaque) 2724 { 2725 SCSIBlockReq *r = opaque; 2726 return scsi_block_do_sgio(r, offset, iov, 2727 SG_DXFER_TO_DEV, cb, cb_opaque); 2728 } 2729 2730 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2731 { 2732 switch (buf[0]) { 2733 case VERIFY_10: 2734 case VERIFY_12: 2735 case VERIFY_16: 2736 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2737 * for the number of logical blocks specified in the length 2738 * field). For other modes, do not use scatter/gather operation. 2739 */ 2740 if ((buf[1] & 6) == 2) { 2741 return false; 2742 } 2743 break; 2744 2745 case READ_6: 2746 case READ_10: 2747 case READ_12: 2748 case READ_16: 2749 case WRITE_6: 2750 case WRITE_10: 2751 case WRITE_12: 2752 case WRITE_16: 2753 case WRITE_VERIFY_10: 2754 case WRITE_VERIFY_12: 2755 case WRITE_VERIFY_16: 2756 /* MMC writing cannot be done via DMA helpers, because it sometimes 2757 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2758 * We might use scsi_block_dma_reqops as long as no writing commands are 2759 * seen, but performance usually isn't paramount on optical media. So, 2760 * just make scsi-block operate the same as scsi-generic for them. 2761 */ 2762 if (s->qdev.type != TYPE_ROM) { 2763 return false; 2764 } 2765 break; 2766 2767 default: 2768 break; 2769 } 2770 2771 return true; 2772 } 2773 2774 2775 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2776 { 2777 SCSIBlockReq *r = (SCSIBlockReq *)req; 2778 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2779 2780 r->cmd = req->cmd.buf[0]; 2781 switch (r->cmd >> 5) { 2782 case 0: 2783 /* 6-byte CDB. */ 2784 r->cdb1 = r->group_number = 0; 2785 break; 2786 case 1: 2787 /* 10-byte CDB. */ 2788 r->cdb1 = req->cmd.buf[1]; 2789 r->group_number = req->cmd.buf[6]; 2790 break; 2791 case 4: 2792 /* 12-byte CDB. */ 2793 r->cdb1 = req->cmd.buf[1]; 2794 r->group_number = req->cmd.buf[10]; 2795 break; 2796 case 5: 2797 /* 16-byte CDB. */ 2798 r->cdb1 = req->cmd.buf[1]; 2799 r->group_number = req->cmd.buf[14]; 2800 break; 2801 default: 2802 abort(); 2803 } 2804 2805 /* Protection information is not supported. For SCSI versions 2 and 2806 * older (as determined by snooping the guest's INQUIRY commands), 2807 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2808 */ 2809 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2810 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2811 return 0; 2812 } 2813 2814 r->req.status = &r->io_header.status; 2815 return scsi_disk_dma_command(req, buf); 2816 } 2817 2818 static const SCSIReqOps scsi_block_dma_reqops = { 2819 .size = sizeof(SCSIBlockReq), 2820 .free_req = scsi_free_request, 2821 .send_command = scsi_block_dma_command, 2822 .read_data = scsi_read_data, 2823 .write_data = scsi_write_data, 2824 .get_buf = scsi_get_buf, 2825 .load_request = scsi_disk_load_request, 2826 .save_request = scsi_disk_save_request, 2827 }; 2828 2829 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2830 uint32_t lun, uint8_t *buf, 2831 void *hba_private) 2832 { 2833 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2834 2835 if (scsi_block_is_passthrough(s, buf)) { 2836 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2837 hba_private); 2838 } else { 2839 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2840 hba_private); 2841 } 2842 } 2843 2844 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2845 uint8_t *buf, void *hba_private) 2846 { 2847 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2848 2849 if (scsi_block_is_passthrough(s, buf)) { 2850 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2851 } else { 2852 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2853 } 2854 } 2855 2856 #endif 2857 2858 static 2859 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2860 BlockCompletionFunc *cb, void *cb_opaque, 2861 void *opaque) 2862 { 2863 SCSIDiskReq *r = opaque; 2864 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2865 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2866 } 2867 2868 static 2869 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2870 BlockCompletionFunc *cb, void *cb_opaque, 2871 void *opaque) 2872 { 2873 SCSIDiskReq *r = opaque; 2874 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2875 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2876 } 2877 2878 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2879 { 2880 DeviceClass *dc = DEVICE_CLASS(klass); 2881 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2882 2883 dc->fw_name = "disk"; 2884 dc->reset = scsi_disk_reset; 2885 sdc->dma_readv = scsi_dma_readv; 2886 sdc->dma_writev = scsi_dma_writev; 2887 sdc->need_fua_emulation = scsi_is_cmd_fua; 2888 } 2889 2890 static const TypeInfo scsi_disk_base_info = { 2891 .name = TYPE_SCSI_DISK_BASE, 2892 .parent = TYPE_SCSI_DEVICE, 2893 .class_init = scsi_disk_base_class_initfn, 2894 .instance_size = sizeof(SCSIDiskState), 2895 .class_size = sizeof(SCSIDiskClass), 2896 .abstract = true, 2897 }; 2898 2899 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2900 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ 2901 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2902 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2903 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2904 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2905 DEFINE_PROP_STRING("product", SCSIDiskState, product) 2906 2907 static Property scsi_hd_properties[] = { 2908 DEFINE_SCSI_DISK_PROPERTIES(), 2909 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2910 SCSI_DISK_F_REMOVABLE, false), 2911 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2912 SCSI_DISK_F_DPOFUA, false), 2913 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2914 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2915 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2916 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2917 DEFAULT_MAX_UNMAP_SIZE), 2918 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2919 DEFAULT_MAX_IO_SIZE), 2920 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2921 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2922 5), 2923 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2924 DEFINE_PROP_END_OF_LIST(), 2925 }; 2926 2927 static const VMStateDescription vmstate_scsi_disk_state = { 2928 .name = "scsi-disk", 2929 .version_id = 1, 2930 .minimum_version_id = 1, 2931 .fields = (VMStateField[]) { 2932 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2933 VMSTATE_BOOL(media_changed, SCSIDiskState), 2934 VMSTATE_BOOL(media_event, SCSIDiskState), 2935 VMSTATE_BOOL(eject_request, SCSIDiskState), 2936 VMSTATE_BOOL(tray_open, SCSIDiskState), 2937 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2938 VMSTATE_END_OF_LIST() 2939 } 2940 }; 2941 2942 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 2943 { 2944 DeviceClass *dc = DEVICE_CLASS(klass); 2945 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2946 2947 sc->realize = scsi_hd_realize; 2948 sc->alloc_req = scsi_new_request; 2949 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 2950 dc->desc = "virtual SCSI disk"; 2951 dc->props = scsi_hd_properties; 2952 dc->vmsd = &vmstate_scsi_disk_state; 2953 } 2954 2955 static const TypeInfo scsi_hd_info = { 2956 .name = "scsi-hd", 2957 .parent = TYPE_SCSI_DISK_BASE, 2958 .class_init = scsi_hd_class_initfn, 2959 }; 2960 2961 static Property scsi_cd_properties[] = { 2962 DEFINE_SCSI_DISK_PROPERTIES(), 2963 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2964 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2965 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2966 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2967 DEFAULT_MAX_IO_SIZE), 2968 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2969 5), 2970 DEFINE_PROP_END_OF_LIST(), 2971 }; 2972 2973 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 2974 { 2975 DeviceClass *dc = DEVICE_CLASS(klass); 2976 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2977 2978 sc->realize = scsi_cd_realize; 2979 sc->alloc_req = scsi_new_request; 2980 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 2981 dc->desc = "virtual SCSI CD-ROM"; 2982 dc->props = scsi_cd_properties; 2983 dc->vmsd = &vmstate_scsi_disk_state; 2984 } 2985 2986 static const TypeInfo scsi_cd_info = { 2987 .name = "scsi-cd", 2988 .parent = TYPE_SCSI_DISK_BASE, 2989 .class_init = scsi_cd_class_initfn, 2990 }; 2991 2992 #ifdef __linux__ 2993 static Property scsi_block_properties[] = { 2994 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2995 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 2996 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 2997 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2998 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2999 DEFAULT_MAX_UNMAP_SIZE), 3000 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3001 DEFAULT_MAX_IO_SIZE), 3002 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3003 -1), 3004 DEFINE_PROP_END_OF_LIST(), 3005 }; 3006 3007 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3008 { 3009 DeviceClass *dc = DEVICE_CLASS(klass); 3010 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3011 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3012 3013 sc->realize = scsi_block_realize; 3014 sc->alloc_req = scsi_block_new_request; 3015 sc->parse_cdb = scsi_block_parse_cdb; 3016 sdc->dma_readv = scsi_block_dma_readv; 3017 sdc->dma_writev = scsi_block_dma_writev; 3018 sdc->need_fua_emulation = scsi_block_no_fua; 3019 dc->desc = "SCSI block device passthrough"; 3020 dc->props = scsi_block_properties; 3021 dc->vmsd = &vmstate_scsi_disk_state; 3022 } 3023 3024 static const TypeInfo scsi_block_info = { 3025 .name = "scsi-block", 3026 .parent = TYPE_SCSI_DISK_BASE, 3027 .class_init = scsi_block_class_initfn, 3028 }; 3029 #endif 3030 3031 static Property scsi_disk_properties[] = { 3032 DEFINE_SCSI_DISK_PROPERTIES(), 3033 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3034 SCSI_DISK_F_REMOVABLE, false), 3035 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3036 SCSI_DISK_F_DPOFUA, false), 3037 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3038 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3039 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3040 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3041 DEFAULT_MAX_UNMAP_SIZE), 3042 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3043 DEFAULT_MAX_IO_SIZE), 3044 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3045 5), 3046 DEFINE_PROP_END_OF_LIST(), 3047 }; 3048 3049 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3050 { 3051 DeviceClass *dc = DEVICE_CLASS(klass); 3052 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3053 3054 sc->realize = scsi_disk_realize; 3055 sc->alloc_req = scsi_new_request; 3056 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3057 dc->fw_name = "disk"; 3058 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3059 dc->reset = scsi_disk_reset; 3060 dc->props = scsi_disk_properties; 3061 dc->vmsd = &vmstate_scsi_disk_state; 3062 } 3063 3064 static const TypeInfo scsi_disk_info = { 3065 .name = "scsi-disk", 3066 .parent = TYPE_SCSI_DISK_BASE, 3067 .class_init = scsi_disk_class_initfn, 3068 }; 3069 3070 static void scsi_disk_register_types(void) 3071 { 3072 type_register_static(&scsi_disk_base_info); 3073 type_register_static(&scsi_hd_info); 3074 type_register_static(&scsi_cd_info); 3075 #ifdef __linux__ 3076 type_register_static(&scsi_block_info); 3077 #endif 3078 type_register_static(&scsi_disk_info); 3079 } 3080 3081 type_init(scsi_disk_register_types) 3082