1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 //#define DEBUG_SCSI 23 24 #ifdef DEBUG_SCSI 25 #define DPRINTF(fmt, ...) \ 26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0) 27 #else 28 #define DPRINTF(fmt, ...) do {} while(0) 29 #endif 30 31 #include "qemu/osdep.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 #include "hw/scsi/scsi.h" 35 #include "block/scsi.h" 36 #include "sysemu/sysemu.h" 37 #include "sysemu/block-backend.h" 38 #include "sysemu/blockdev.h" 39 #include "hw/block/block.h" 40 #include "sysemu/dma.h" 41 #include "qemu/cutils.h" 42 43 #ifdef __linux 44 #include <scsi/sg.h> 45 #endif 46 47 #define SCSI_WRITE_SAME_MAX 524288 48 #define SCSI_DMA_BUF_SIZE 131072 49 #define SCSI_MAX_INQUIRY_LEN 256 50 #define SCSI_MAX_MODE_LEN 256 51 52 #define DEFAULT_DISCARD_GRANULARITY 4096 53 #define DEFAULT_MAX_UNMAP_SIZE (1 << 30) /* 1 GB */ 54 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 55 56 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 57 58 #define SCSI_DISK_BASE(obj) \ 59 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 60 #define SCSI_DISK_BASE_CLASS(klass) \ 61 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 62 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 63 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 64 65 typedef struct SCSIDiskClass { 66 SCSIDeviceClass parent_class; 67 DMAIOFunc *dma_readv; 68 DMAIOFunc *dma_writev; 69 bool (*need_fua_emulation)(SCSICommand *cmd); 70 } SCSIDiskClass; 71 72 typedef struct SCSIDiskReq { 73 SCSIRequest req; 74 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 75 uint64_t sector; 76 uint32_t sector_count; 77 uint32_t buflen; 78 bool started; 79 bool need_fua_emulation; 80 struct iovec iov; 81 QEMUIOVector qiov; 82 BlockAcctCookie acct; 83 unsigned char *status; 84 } SCSIDiskReq; 85 86 #define SCSI_DISK_F_REMOVABLE 0 87 #define SCSI_DISK_F_DPOFUA 1 88 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 89 90 typedef struct SCSIDiskState 91 { 92 SCSIDevice qdev; 93 uint32_t features; 94 bool media_changed; 95 bool media_event; 96 bool eject_request; 97 uint16_t port_index; 98 uint64_t max_unmap_size; 99 uint64_t max_io_size; 100 QEMUBH *bh; 101 char *version; 102 char *serial; 103 char *vendor; 104 char *product; 105 bool tray_open; 106 bool tray_locked; 107 } SCSIDiskState; 108 109 static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 110 111 static void scsi_free_request(SCSIRequest *req) 112 { 113 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 114 115 qemu_vfree(r->iov.iov_base); 116 } 117 118 /* Helper function for command completion with sense. */ 119 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 120 { 121 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n", 122 r->req.tag, sense.key, sense.asc, sense.ascq); 123 scsi_req_build_sense(&r->req, sense); 124 scsi_req_complete(&r->req, CHECK_CONDITION); 125 } 126 127 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 128 { 129 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 130 131 if (!r->iov.iov_base) { 132 r->buflen = size; 133 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 134 } 135 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 136 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 137 } 138 139 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 140 { 141 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 142 143 qemu_put_be64s(f, &r->sector); 144 qemu_put_be32s(f, &r->sector_count); 145 qemu_put_be32s(f, &r->buflen); 146 if (r->buflen) { 147 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 148 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 149 } else if (!req->retry) { 150 uint32_t len = r->iov.iov_len; 151 qemu_put_be32s(f, &len); 152 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 153 } 154 } 155 } 156 157 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 158 { 159 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 160 161 qemu_get_be64s(f, &r->sector); 162 qemu_get_be32s(f, &r->sector_count); 163 qemu_get_be32s(f, &r->buflen); 164 if (r->buflen) { 165 scsi_init_iovec(r, r->buflen); 166 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 167 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 168 } else if (!r->req.retry) { 169 uint32_t len; 170 qemu_get_be32s(f, &len); 171 r->iov.iov_len = len; 172 assert(r->iov.iov_len <= r->buflen); 173 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 174 } 175 } 176 177 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 178 } 179 180 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 181 { 182 if (r->req.io_canceled) { 183 scsi_req_cancel_complete(&r->req); 184 return true; 185 } 186 187 if (ret < 0) { 188 return scsi_handle_rw_error(r, -ret, acct_failed); 189 } 190 191 if (r->status && *r->status) { 192 if (acct_failed) { 193 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 194 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 195 } 196 scsi_req_complete(&r->req, *r->status); 197 return true; 198 } 199 200 return false; 201 } 202 203 static void scsi_aio_complete(void *opaque, int ret) 204 { 205 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 206 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 207 208 assert(r->req.aiocb != NULL); 209 r->req.aiocb = NULL; 210 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 211 if (scsi_disk_req_check_error(r, ret, true)) { 212 goto done; 213 } 214 215 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 216 scsi_req_complete(&r->req, GOOD); 217 218 done: 219 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 220 scsi_req_unref(&r->req); 221 } 222 223 static bool scsi_is_cmd_fua(SCSICommand *cmd) 224 { 225 switch (cmd->buf[0]) { 226 case READ_10: 227 case READ_12: 228 case READ_16: 229 case WRITE_10: 230 case WRITE_12: 231 case WRITE_16: 232 return (cmd->buf[1] & 8) != 0; 233 234 case VERIFY_10: 235 case VERIFY_12: 236 case VERIFY_16: 237 case WRITE_VERIFY_10: 238 case WRITE_VERIFY_12: 239 case WRITE_VERIFY_16: 240 return true; 241 242 case READ_6: 243 case WRITE_6: 244 default: 245 return false; 246 } 247 } 248 249 static void scsi_write_do_fua(SCSIDiskReq *r) 250 { 251 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 252 253 assert(r->req.aiocb == NULL); 254 assert(!r->req.io_canceled); 255 256 if (r->need_fua_emulation) { 257 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 258 BLOCK_ACCT_FLUSH); 259 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 260 return; 261 } 262 263 scsi_req_complete(&r->req, GOOD); 264 scsi_req_unref(&r->req); 265 } 266 267 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 268 { 269 assert(r->req.aiocb == NULL); 270 if (scsi_disk_req_check_error(r, ret, false)) { 271 goto done; 272 } 273 274 r->sector += r->sector_count; 275 r->sector_count = 0; 276 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 277 scsi_write_do_fua(r); 278 return; 279 } else { 280 scsi_req_complete(&r->req, GOOD); 281 } 282 283 done: 284 scsi_req_unref(&r->req); 285 } 286 287 static void scsi_dma_complete(void *opaque, int ret) 288 { 289 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 290 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 291 292 assert(r->req.aiocb != NULL); 293 r->req.aiocb = NULL; 294 295 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 296 if (ret < 0) { 297 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 298 } else { 299 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 300 } 301 scsi_dma_complete_noio(r, ret); 302 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 303 } 304 305 static void scsi_read_complete(void * opaque, int ret) 306 { 307 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 308 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 309 int n; 310 311 assert(r->req.aiocb != NULL); 312 r->req.aiocb = NULL; 313 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 314 if (scsi_disk_req_check_error(r, ret, true)) { 315 goto done; 316 } 317 318 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 319 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size); 320 321 n = r->qiov.size / 512; 322 r->sector += n; 323 r->sector_count -= n; 324 scsi_req_data(&r->req, r->qiov.size); 325 326 done: 327 scsi_req_unref(&r->req); 328 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 329 } 330 331 /* Actually issue a read to the block device. */ 332 static void scsi_do_read(SCSIDiskReq *r, int ret) 333 { 334 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 335 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 336 337 assert (r->req.aiocb == NULL); 338 if (scsi_disk_req_check_error(r, ret, false)) { 339 goto done; 340 } 341 342 /* The request is used as the AIO opaque value, so add a ref. */ 343 scsi_req_ref(&r->req); 344 345 if (r->req.sg) { 346 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 347 r->req.resid -= r->req.sg->size; 348 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 349 r->req.sg, r->sector << BDRV_SECTOR_BITS, 350 BDRV_SECTOR_SIZE, 351 sdc->dma_readv, r, scsi_dma_complete, r, 352 DMA_DIRECTION_FROM_DEVICE); 353 } else { 354 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 355 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 356 r->qiov.size, BLOCK_ACCT_READ); 357 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 358 scsi_read_complete, r, r); 359 } 360 361 done: 362 scsi_req_unref(&r->req); 363 } 364 365 static void scsi_do_read_cb(void *opaque, int ret) 366 { 367 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 368 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 369 370 assert (r->req.aiocb != NULL); 371 r->req.aiocb = NULL; 372 373 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 374 if (ret < 0) { 375 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 376 } else { 377 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 378 } 379 scsi_do_read(opaque, ret); 380 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 381 } 382 383 /* Read more data from scsi device into buffer. */ 384 static void scsi_read_data(SCSIRequest *req) 385 { 386 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 387 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 388 bool first; 389 390 DPRINTF("Read sector_count=%d\n", r->sector_count); 391 if (r->sector_count == 0) { 392 /* This also clears the sense buffer for REQUEST SENSE. */ 393 scsi_req_complete(&r->req, GOOD); 394 return; 395 } 396 397 /* No data transfer may already be in progress */ 398 assert(r->req.aiocb == NULL); 399 400 /* The request is used as the AIO opaque value, so add a ref. */ 401 scsi_req_ref(&r->req); 402 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 403 DPRINTF("Data transfer direction invalid\n"); 404 scsi_read_complete(r, -EINVAL); 405 return; 406 } 407 408 if (!blk_is_available(req->dev->conf.blk)) { 409 scsi_read_complete(r, -ENOMEDIUM); 410 return; 411 } 412 413 first = !r->started; 414 r->started = true; 415 if (first && r->need_fua_emulation) { 416 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 417 BLOCK_ACCT_FLUSH); 418 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 419 } else { 420 scsi_do_read(r, 0); 421 } 422 } 423 424 /* 425 * scsi_handle_rw_error has two return values. 0 means that the error 426 * must be ignored, 1 means that the error has been processed and the 427 * caller should not do anything else for this request. Note that 428 * scsi_handle_rw_error always manages its reference counts, independent 429 * of the return value. 430 */ 431 static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 432 { 433 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 434 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 435 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 436 is_read, error); 437 438 if (action == BLOCK_ERROR_ACTION_REPORT) { 439 if (acct_failed) { 440 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 441 } 442 switch (error) { 443 case ENOMEDIUM: 444 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 445 break; 446 case ENOMEM: 447 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 448 break; 449 case EINVAL: 450 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 451 break; 452 case ENOSPC: 453 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 454 break; 455 default: 456 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 457 break; 458 } 459 } 460 blk_error_action(s->qdev.conf.blk, action, is_read, error); 461 if (action == BLOCK_ERROR_ACTION_STOP) { 462 scsi_req_retry(&r->req); 463 } 464 return action != BLOCK_ERROR_ACTION_IGNORE; 465 } 466 467 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 468 { 469 uint32_t n; 470 471 assert (r->req.aiocb == NULL); 472 if (scsi_disk_req_check_error(r, ret, false)) { 473 goto done; 474 } 475 476 n = r->qiov.size / 512; 477 r->sector += n; 478 r->sector_count -= n; 479 if (r->sector_count == 0) { 480 scsi_write_do_fua(r); 481 return; 482 } else { 483 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 484 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size); 485 scsi_req_data(&r->req, r->qiov.size); 486 } 487 488 done: 489 scsi_req_unref(&r->req); 490 } 491 492 static void scsi_write_complete(void * opaque, int ret) 493 { 494 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 495 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 496 497 assert (r->req.aiocb != NULL); 498 r->req.aiocb = NULL; 499 500 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 501 if (ret < 0) { 502 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 503 } else { 504 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 505 } 506 scsi_write_complete_noio(r, ret); 507 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 508 } 509 510 static void scsi_write_data(SCSIRequest *req) 511 { 512 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 513 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 514 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 515 516 /* No data transfer may already be in progress */ 517 assert(r->req.aiocb == NULL); 518 519 /* The request is used as the AIO opaque value, so add a ref. */ 520 scsi_req_ref(&r->req); 521 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 522 DPRINTF("Data transfer direction invalid\n"); 523 scsi_write_complete_noio(r, -EINVAL); 524 return; 525 } 526 527 if (!r->req.sg && !r->qiov.size) { 528 /* Called for the first time. Ask the driver to send us more data. */ 529 r->started = true; 530 scsi_write_complete_noio(r, 0); 531 return; 532 } 533 if (!blk_is_available(req->dev->conf.blk)) { 534 scsi_write_complete_noio(r, -ENOMEDIUM); 535 return; 536 } 537 538 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 539 r->req.cmd.buf[0] == VERIFY_16) { 540 if (r->req.sg) { 541 scsi_dma_complete_noio(r, 0); 542 } else { 543 scsi_write_complete_noio(r, 0); 544 } 545 return; 546 } 547 548 if (r->req.sg) { 549 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 550 r->req.resid -= r->req.sg->size; 551 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 552 r->req.sg, r->sector << BDRV_SECTOR_BITS, 553 BDRV_SECTOR_SIZE, 554 sdc->dma_writev, r, scsi_dma_complete, r, 555 DMA_DIRECTION_TO_DEVICE); 556 } else { 557 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 558 r->qiov.size, BLOCK_ACCT_WRITE); 559 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 560 scsi_write_complete, r, r); 561 } 562 } 563 564 /* Return a pointer to the data buffer. */ 565 static uint8_t *scsi_get_buf(SCSIRequest *req) 566 { 567 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 568 569 return (uint8_t *)r->iov.iov_base; 570 } 571 572 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 573 { 574 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 575 int buflen = 0; 576 int start; 577 578 if (req->cmd.buf[1] & 0x1) { 579 /* Vital product data */ 580 uint8_t page_code = req->cmd.buf[2]; 581 582 outbuf[buflen++] = s->qdev.type & 0x1f; 583 outbuf[buflen++] = page_code ; // this page 584 outbuf[buflen++] = 0x00; 585 outbuf[buflen++] = 0x00; 586 start = buflen; 587 588 switch (page_code) { 589 case 0x00: /* Supported page codes, mandatory */ 590 { 591 DPRINTF("Inquiry EVPD[Supported pages] " 592 "buffer size %zd\n", req->cmd.xfer); 593 outbuf[buflen++] = 0x00; // list of supported pages (this page) 594 if (s->serial) { 595 outbuf[buflen++] = 0x80; // unit serial number 596 } 597 outbuf[buflen++] = 0x83; // device identification 598 if (s->qdev.type == TYPE_DISK) { 599 outbuf[buflen++] = 0xb0; // block limits 600 outbuf[buflen++] = 0xb2; // thin provisioning 601 } 602 break; 603 } 604 case 0x80: /* Device serial number, optional */ 605 { 606 int l; 607 608 if (!s->serial) { 609 DPRINTF("Inquiry (EVPD[Serial number] not supported\n"); 610 return -1; 611 } 612 613 l = strlen(s->serial); 614 if (l > 36) { 615 l = 36; 616 } 617 618 DPRINTF("Inquiry EVPD[Serial number] " 619 "buffer size %zd\n", req->cmd.xfer); 620 memcpy(outbuf+buflen, s->serial, l); 621 buflen += l; 622 break; 623 } 624 625 case 0x83: /* Device identification page, mandatory */ 626 { 627 const char *str = s->serial ?: blk_name(s->qdev.conf.blk); 628 int max_len = s->serial ? 20 : 255 - 8; 629 int id_len = strlen(str); 630 631 if (id_len > max_len) { 632 id_len = max_len; 633 } 634 DPRINTF("Inquiry EVPD[Device identification] " 635 "buffer size %zd\n", req->cmd.xfer); 636 637 outbuf[buflen++] = 0x2; // ASCII 638 outbuf[buflen++] = 0; // not officially assigned 639 outbuf[buflen++] = 0; // reserved 640 outbuf[buflen++] = id_len; // length of data following 641 memcpy(outbuf+buflen, str, id_len); 642 buflen += id_len; 643 644 if (s->qdev.wwn) { 645 outbuf[buflen++] = 0x1; // Binary 646 outbuf[buflen++] = 0x3; // NAA 647 outbuf[buflen++] = 0; // reserved 648 outbuf[buflen++] = 8; 649 stq_be_p(&outbuf[buflen], s->qdev.wwn); 650 buflen += 8; 651 } 652 653 if (s->qdev.port_wwn) { 654 outbuf[buflen++] = 0x61; // SAS / Binary 655 outbuf[buflen++] = 0x93; // PIV / Target port / NAA 656 outbuf[buflen++] = 0; // reserved 657 outbuf[buflen++] = 8; 658 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 659 buflen += 8; 660 } 661 662 if (s->port_index) { 663 outbuf[buflen++] = 0x61; // SAS / Binary 664 outbuf[buflen++] = 0x94; // PIV / Target port / relative target port 665 outbuf[buflen++] = 0; // reserved 666 outbuf[buflen++] = 4; 667 stw_be_p(&outbuf[buflen + 2], s->port_index); 668 buflen += 4; 669 } 670 break; 671 } 672 case 0xb0: /* block limits */ 673 { 674 unsigned int unmap_sectors = 675 s->qdev.conf.discard_granularity / s->qdev.blocksize; 676 unsigned int min_io_size = 677 s->qdev.conf.min_io_size / s->qdev.blocksize; 678 unsigned int opt_io_size = 679 s->qdev.conf.opt_io_size / s->qdev.blocksize; 680 unsigned int max_unmap_sectors = 681 s->max_unmap_size / s->qdev.blocksize; 682 unsigned int max_io_sectors = 683 s->max_io_size / s->qdev.blocksize; 684 685 if (s->qdev.type == TYPE_ROM) { 686 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n", 687 page_code); 688 return -1; 689 } 690 /* required VPD size with unmap support */ 691 buflen = 0x40; 692 memset(outbuf + 4, 0, buflen - 4); 693 694 outbuf[4] = 0x1; /* wsnz */ 695 696 /* optimal transfer length granularity */ 697 outbuf[6] = (min_io_size >> 8) & 0xff; 698 outbuf[7] = min_io_size & 0xff; 699 700 /* maximum transfer length */ 701 outbuf[8] = (max_io_sectors >> 24) & 0xff; 702 outbuf[9] = (max_io_sectors >> 16) & 0xff; 703 outbuf[10] = (max_io_sectors >> 8) & 0xff; 704 outbuf[11] = max_io_sectors & 0xff; 705 706 /* optimal transfer length */ 707 outbuf[12] = (opt_io_size >> 24) & 0xff; 708 outbuf[13] = (opt_io_size >> 16) & 0xff; 709 outbuf[14] = (opt_io_size >> 8) & 0xff; 710 outbuf[15] = opt_io_size & 0xff; 711 712 /* max unmap LBA count, default is 1GB */ 713 outbuf[20] = (max_unmap_sectors >> 24) & 0xff; 714 outbuf[21] = (max_unmap_sectors >> 16) & 0xff; 715 outbuf[22] = (max_unmap_sectors >> 8) & 0xff; 716 outbuf[23] = max_unmap_sectors & 0xff; 717 718 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header. */ 719 outbuf[24] = 0; 720 outbuf[25] = 0; 721 outbuf[26] = 0; 722 outbuf[27] = 255; 723 724 /* optimal unmap granularity */ 725 outbuf[28] = (unmap_sectors >> 24) & 0xff; 726 outbuf[29] = (unmap_sectors >> 16) & 0xff; 727 outbuf[30] = (unmap_sectors >> 8) & 0xff; 728 outbuf[31] = unmap_sectors & 0xff; 729 730 /* max write same size */ 731 outbuf[36] = 0; 732 outbuf[37] = 0; 733 outbuf[38] = 0; 734 outbuf[39] = 0; 735 736 outbuf[40] = (max_io_sectors >> 24) & 0xff; 737 outbuf[41] = (max_io_sectors >> 16) & 0xff; 738 outbuf[42] = (max_io_sectors >> 8) & 0xff; 739 outbuf[43] = max_io_sectors & 0xff; 740 break; 741 } 742 case 0xb2: /* thin provisioning */ 743 { 744 buflen = 8; 745 outbuf[4] = 0; 746 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 747 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 748 outbuf[7] = 0; 749 break; 750 } 751 default: 752 return -1; 753 } 754 /* done with EVPD */ 755 assert(buflen - start <= 255); 756 outbuf[start - 1] = buflen - start; 757 return buflen; 758 } 759 760 /* Standard INQUIRY data */ 761 if (req->cmd.buf[2] != 0) { 762 return -1; 763 } 764 765 /* PAGE CODE == 0 */ 766 buflen = req->cmd.xfer; 767 if (buflen > SCSI_MAX_INQUIRY_LEN) { 768 buflen = SCSI_MAX_INQUIRY_LEN; 769 } 770 771 outbuf[0] = s->qdev.type & 0x1f; 772 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 773 774 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 775 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 776 777 memset(&outbuf[32], 0, 4); 778 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 779 /* 780 * We claim conformance to SPC-3, which is required for guests 781 * to ask for modern features like READ CAPACITY(16) or the 782 * block characteristics VPD page by default. Not all of SPC-3 783 * is actually implemented, but we're good enough. 784 */ 785 outbuf[2] = 5; 786 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 787 788 if (buflen > 36) { 789 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 790 } else { 791 /* If the allocation length of CDB is too small, 792 the additional length is not adjusted */ 793 outbuf[4] = 36 - 5; 794 } 795 796 /* Sync data transfer and TCQ. */ 797 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 798 return buflen; 799 } 800 801 static inline bool media_is_dvd(SCSIDiskState *s) 802 { 803 uint64_t nb_sectors; 804 if (s->qdev.type != TYPE_ROM) { 805 return false; 806 } 807 if (!blk_is_available(s->qdev.conf.blk)) { 808 return false; 809 } 810 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 811 return nb_sectors > CD_MAX_SECTORS; 812 } 813 814 static inline bool media_is_cd(SCSIDiskState *s) 815 { 816 uint64_t nb_sectors; 817 if (s->qdev.type != TYPE_ROM) { 818 return false; 819 } 820 if (!blk_is_available(s->qdev.conf.blk)) { 821 return false; 822 } 823 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 824 return nb_sectors <= CD_MAX_SECTORS; 825 } 826 827 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 828 uint8_t *outbuf) 829 { 830 uint8_t type = r->req.cmd.buf[1] & 7; 831 832 if (s->qdev.type != TYPE_ROM) { 833 return -1; 834 } 835 836 /* Types 1/2 are only defined for Blu-Ray. */ 837 if (type != 0) { 838 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 839 return -1; 840 } 841 842 memset(outbuf, 0, 34); 843 outbuf[1] = 32; 844 outbuf[2] = 0xe; /* last session complete, disc finalized */ 845 outbuf[3] = 1; /* first track on disc */ 846 outbuf[4] = 1; /* # of sessions */ 847 outbuf[5] = 1; /* first track of last session */ 848 outbuf[6] = 1; /* last track of last session */ 849 outbuf[7] = 0x20; /* unrestricted use */ 850 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 851 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 852 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 853 /* 24-31: disc bar code */ 854 /* 32: disc application code */ 855 /* 33: number of OPC tables */ 856 857 return 34; 858 } 859 860 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 861 uint8_t *outbuf) 862 { 863 static const int rds_caps_size[5] = { 864 [0] = 2048 + 4, 865 [1] = 4 + 4, 866 [3] = 188 + 4, 867 [4] = 2048 + 4, 868 }; 869 870 uint8_t media = r->req.cmd.buf[1]; 871 uint8_t layer = r->req.cmd.buf[6]; 872 uint8_t format = r->req.cmd.buf[7]; 873 int size = -1; 874 875 if (s->qdev.type != TYPE_ROM) { 876 return -1; 877 } 878 if (media != 0) { 879 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 880 return -1; 881 } 882 883 if (format != 0xff) { 884 if (!blk_is_available(s->qdev.conf.blk)) { 885 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 886 return -1; 887 } 888 if (media_is_cd(s)) { 889 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 890 return -1; 891 } 892 if (format >= ARRAY_SIZE(rds_caps_size)) { 893 return -1; 894 } 895 size = rds_caps_size[format]; 896 memset(outbuf, 0, size); 897 } 898 899 switch (format) { 900 case 0x00: { 901 /* Physical format information */ 902 uint64_t nb_sectors; 903 if (layer != 0) { 904 goto fail; 905 } 906 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 907 908 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 909 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 910 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 911 outbuf[7] = 0; /* default densities */ 912 913 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 914 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 915 break; 916 } 917 918 case 0x01: /* DVD copyright information, all zeros */ 919 break; 920 921 case 0x03: /* BCA information - invalid field for no BCA info */ 922 return -1; 923 924 case 0x04: /* DVD disc manufacturing information, all zeros */ 925 break; 926 927 case 0xff: { /* List capabilities */ 928 int i; 929 size = 4; 930 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 931 if (!rds_caps_size[i]) { 932 continue; 933 } 934 outbuf[size] = i; 935 outbuf[size + 1] = 0x40; /* Not writable, readable */ 936 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 937 size += 4; 938 } 939 break; 940 } 941 942 default: 943 return -1; 944 } 945 946 /* Size of buffer, not including 2 byte size field */ 947 stw_be_p(outbuf, size - 2); 948 return size; 949 950 fail: 951 return -1; 952 } 953 954 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 955 { 956 uint8_t event_code, media_status; 957 958 media_status = 0; 959 if (s->tray_open) { 960 media_status = MS_TRAY_OPEN; 961 } else if (blk_is_inserted(s->qdev.conf.blk)) { 962 media_status = MS_MEDIA_PRESENT; 963 } 964 965 /* Event notification descriptor */ 966 event_code = MEC_NO_CHANGE; 967 if (media_status != MS_TRAY_OPEN) { 968 if (s->media_event) { 969 event_code = MEC_NEW_MEDIA; 970 s->media_event = false; 971 } else if (s->eject_request) { 972 event_code = MEC_EJECT_REQUESTED; 973 s->eject_request = false; 974 } 975 } 976 977 outbuf[0] = event_code; 978 outbuf[1] = media_status; 979 980 /* These fields are reserved, just clear them. */ 981 outbuf[2] = 0; 982 outbuf[3] = 0; 983 return 4; 984 } 985 986 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 987 uint8_t *outbuf) 988 { 989 int size; 990 uint8_t *buf = r->req.cmd.buf; 991 uint8_t notification_class_request = buf[4]; 992 if (s->qdev.type != TYPE_ROM) { 993 return -1; 994 } 995 if ((buf[1] & 1) == 0) { 996 /* asynchronous */ 997 return -1; 998 } 999 1000 size = 4; 1001 outbuf[0] = outbuf[1] = 0; 1002 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1003 if (notification_class_request & (1 << GESN_MEDIA)) { 1004 outbuf[2] = GESN_MEDIA; 1005 size += scsi_event_status_media(s, &outbuf[size]); 1006 } else { 1007 outbuf[2] = 0x80; 1008 } 1009 stw_be_p(outbuf, size - 4); 1010 return size; 1011 } 1012 1013 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1014 { 1015 int current; 1016 1017 if (s->qdev.type != TYPE_ROM) { 1018 return -1; 1019 } 1020 1021 if (media_is_dvd(s)) { 1022 current = MMC_PROFILE_DVD_ROM; 1023 } else if (media_is_cd(s)) { 1024 current = MMC_PROFILE_CD_ROM; 1025 } else { 1026 current = MMC_PROFILE_NONE; 1027 } 1028 1029 memset(outbuf, 0, 40); 1030 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1031 stw_be_p(&outbuf[6], current); 1032 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1033 outbuf[10] = 0x03; /* persistent, current */ 1034 outbuf[11] = 8; /* two profiles */ 1035 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1036 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1037 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1038 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1039 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1040 stw_be_p(&outbuf[20], 1); 1041 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1042 outbuf[23] = 8; 1043 stl_be_p(&outbuf[24], 1); /* SCSI */ 1044 outbuf[28] = 1; /* DBE = 1, mandatory */ 1045 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1046 stw_be_p(&outbuf[32], 3); 1047 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1048 outbuf[35] = 4; 1049 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1050 /* TODO: Random readable, CD read, DVD read, drive serial number, 1051 power management */ 1052 return 40; 1053 } 1054 1055 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1056 { 1057 if (s->qdev.type != TYPE_ROM) { 1058 return -1; 1059 } 1060 memset(outbuf, 0, 8); 1061 outbuf[5] = 1; /* CD-ROM */ 1062 return 8; 1063 } 1064 1065 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1066 int page_control) 1067 { 1068 static const int mode_sense_valid[0x3f] = { 1069 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1070 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1071 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1072 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1073 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1074 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1075 }; 1076 1077 uint8_t *p = *p_outbuf + 2; 1078 int length; 1079 1080 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1081 return -1; 1082 } 1083 1084 /* 1085 * If Changeable Values are requested, a mask denoting those mode parameters 1086 * that are changeable shall be returned. As we currently don't support 1087 * parameter changes via MODE_SELECT all bits are returned set to zero. 1088 * The buffer was already menset to zero by the caller of this function. 1089 * 1090 * The offsets here are off by two compared to the descriptions in the 1091 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1092 * but it is done so that offsets are consistent within our implementation 1093 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1094 * 2-byte and 4-byte headers. 1095 */ 1096 switch (page) { 1097 case MODE_PAGE_HD_GEOMETRY: 1098 length = 0x16; 1099 if (page_control == 1) { /* Changeable Values */ 1100 break; 1101 } 1102 /* if a geometry hint is available, use it */ 1103 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1104 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1105 p[2] = s->qdev.conf.cyls & 0xff; 1106 p[3] = s->qdev.conf.heads & 0xff; 1107 /* Write precomp start cylinder, disabled */ 1108 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1109 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1110 p[6] = s->qdev.conf.cyls & 0xff; 1111 /* Reduced current start cylinder, disabled */ 1112 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1113 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1114 p[9] = s->qdev.conf.cyls & 0xff; 1115 /* Device step rate [ns], 200ns */ 1116 p[10] = 0; 1117 p[11] = 200; 1118 /* Landing zone cylinder */ 1119 p[12] = 0xff; 1120 p[13] = 0xff; 1121 p[14] = 0xff; 1122 /* Medium rotation rate [rpm], 5400 rpm */ 1123 p[18] = (5400 >> 8) & 0xff; 1124 p[19] = 5400 & 0xff; 1125 break; 1126 1127 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1128 length = 0x1e; 1129 if (page_control == 1) { /* Changeable Values */ 1130 break; 1131 } 1132 /* Transfer rate [kbit/s], 5Mbit/s */ 1133 p[0] = 5000 >> 8; 1134 p[1] = 5000 & 0xff; 1135 /* if a geometry hint is available, use it */ 1136 p[2] = s->qdev.conf.heads & 0xff; 1137 p[3] = s->qdev.conf.secs & 0xff; 1138 p[4] = s->qdev.blocksize >> 8; 1139 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1140 p[7] = s->qdev.conf.cyls & 0xff; 1141 /* Write precomp start cylinder, disabled */ 1142 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1143 p[9] = s->qdev.conf.cyls & 0xff; 1144 /* Reduced current start cylinder, disabled */ 1145 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1146 p[11] = s->qdev.conf.cyls & 0xff; 1147 /* Device step rate [100us], 100us */ 1148 p[12] = 0; 1149 p[13] = 1; 1150 /* Device step pulse width [us], 1us */ 1151 p[14] = 1; 1152 /* Device head settle delay [100us], 100us */ 1153 p[15] = 0; 1154 p[16] = 1; 1155 /* Motor on delay [0.1s], 0.1s */ 1156 p[17] = 1; 1157 /* Motor off delay [0.1s], 0.1s */ 1158 p[18] = 1; 1159 /* Medium rotation rate [rpm], 5400 rpm */ 1160 p[26] = (5400 >> 8) & 0xff; 1161 p[27] = 5400 & 0xff; 1162 break; 1163 1164 case MODE_PAGE_CACHING: 1165 length = 0x12; 1166 if (page_control == 1 || /* Changeable Values */ 1167 blk_enable_write_cache(s->qdev.conf.blk)) { 1168 p[0] = 4; /* WCE */ 1169 } 1170 break; 1171 1172 case MODE_PAGE_R_W_ERROR: 1173 length = 10; 1174 if (page_control == 1) { /* Changeable Values */ 1175 break; 1176 } 1177 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1178 if (s->qdev.type == TYPE_ROM) { 1179 p[1] = 0x20; /* Read Retry Count */ 1180 } 1181 break; 1182 1183 case MODE_PAGE_AUDIO_CTL: 1184 length = 14; 1185 break; 1186 1187 case MODE_PAGE_CAPABILITIES: 1188 length = 0x14; 1189 if (page_control == 1) { /* Changeable Values */ 1190 break; 1191 } 1192 1193 p[0] = 0x3b; /* CD-R & CD-RW read */ 1194 p[1] = 0; /* Writing not supported */ 1195 p[2] = 0x7f; /* Audio, composite, digital out, 1196 mode 2 form 1&2, multi session */ 1197 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1198 RW corrected, C2 errors, ISRC, 1199 UPC, Bar code */ 1200 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1201 /* Locking supported, jumper present, eject, tray */ 1202 p[5] = 0; /* no volume & mute control, no 1203 changer */ 1204 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1205 p[7] = (50 * 176) & 0xff; 1206 p[8] = 2 >> 8; /* Two volume levels */ 1207 p[9] = 2 & 0xff; 1208 p[10] = 2048 >> 8; /* 2M buffer */ 1209 p[11] = 2048 & 0xff; 1210 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1211 p[13] = (16 * 176) & 0xff; 1212 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1213 p[17] = (16 * 176) & 0xff; 1214 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1215 p[19] = (16 * 176) & 0xff; 1216 break; 1217 1218 default: 1219 return -1; 1220 } 1221 1222 assert(length < 256); 1223 (*p_outbuf)[0] = page; 1224 (*p_outbuf)[1] = length; 1225 *p_outbuf += length + 2; 1226 return length + 2; 1227 } 1228 1229 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1230 { 1231 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1232 uint64_t nb_sectors; 1233 bool dbd; 1234 int page, buflen, ret, page_control; 1235 uint8_t *p; 1236 uint8_t dev_specific_param; 1237 1238 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1239 page = r->req.cmd.buf[2] & 0x3f; 1240 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1241 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n", 1242 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control); 1243 memset(outbuf, 0, r->req.cmd.xfer); 1244 p = outbuf; 1245 1246 if (s->qdev.type == TYPE_DISK) { 1247 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1248 if (blk_is_read_only(s->qdev.conf.blk)) { 1249 dev_specific_param |= 0x80; /* Readonly. */ 1250 } 1251 } else { 1252 /* MMC prescribes that CD/DVD drives have no block descriptors, 1253 * and defines no device-specific parameter. */ 1254 dev_specific_param = 0x00; 1255 dbd = true; 1256 } 1257 1258 if (r->req.cmd.buf[0] == MODE_SENSE) { 1259 p[1] = 0; /* Default media type. */ 1260 p[2] = dev_specific_param; 1261 p[3] = 0; /* Block descriptor length. */ 1262 p += 4; 1263 } else { /* MODE_SENSE_10 */ 1264 p[2] = 0; /* Default media type. */ 1265 p[3] = dev_specific_param; 1266 p[6] = p[7] = 0; /* Block descriptor length. */ 1267 p += 8; 1268 } 1269 1270 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1271 if (!dbd && nb_sectors) { 1272 if (r->req.cmd.buf[0] == MODE_SENSE) { 1273 outbuf[3] = 8; /* Block descriptor length */ 1274 } else { /* MODE_SENSE_10 */ 1275 outbuf[7] = 8; /* Block descriptor length */ 1276 } 1277 nb_sectors /= (s->qdev.blocksize / 512); 1278 if (nb_sectors > 0xffffff) { 1279 nb_sectors = 0; 1280 } 1281 p[0] = 0; /* media density code */ 1282 p[1] = (nb_sectors >> 16) & 0xff; 1283 p[2] = (nb_sectors >> 8) & 0xff; 1284 p[3] = nb_sectors & 0xff; 1285 p[4] = 0; /* reserved */ 1286 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1287 p[6] = s->qdev.blocksize >> 8; 1288 p[7] = 0; 1289 p += 8; 1290 } 1291 1292 if (page_control == 3) { 1293 /* Saved Values */ 1294 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1295 return -1; 1296 } 1297 1298 if (page == 0x3f) { 1299 for (page = 0; page <= 0x3e; page++) { 1300 mode_sense_page(s, page, &p, page_control); 1301 } 1302 } else { 1303 ret = mode_sense_page(s, page, &p, page_control); 1304 if (ret == -1) { 1305 return -1; 1306 } 1307 } 1308 1309 buflen = p - outbuf; 1310 /* 1311 * The mode data length field specifies the length in bytes of the 1312 * following data that is available to be transferred. The mode data 1313 * length does not include itself. 1314 */ 1315 if (r->req.cmd.buf[0] == MODE_SENSE) { 1316 outbuf[0] = buflen - 1; 1317 } else { /* MODE_SENSE_10 */ 1318 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1319 outbuf[1] = (buflen - 2) & 0xff; 1320 } 1321 return buflen; 1322 } 1323 1324 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1325 { 1326 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1327 int start_track, format, msf, toclen; 1328 uint64_t nb_sectors; 1329 1330 msf = req->cmd.buf[1] & 2; 1331 format = req->cmd.buf[2] & 0xf; 1332 start_track = req->cmd.buf[6]; 1333 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1334 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1); 1335 nb_sectors /= s->qdev.blocksize / 512; 1336 switch (format) { 1337 case 0: 1338 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1339 break; 1340 case 1: 1341 /* multi session : only a single session defined */ 1342 toclen = 12; 1343 memset(outbuf, 0, 12); 1344 outbuf[1] = 0x0a; 1345 outbuf[2] = 0x01; 1346 outbuf[3] = 0x01; 1347 break; 1348 case 2: 1349 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1350 break; 1351 default: 1352 return -1; 1353 } 1354 return toclen; 1355 } 1356 1357 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1358 { 1359 SCSIRequest *req = &r->req; 1360 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1361 bool start = req->cmd.buf[4] & 1; 1362 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1363 int pwrcnd = req->cmd.buf[4] & 0xf0; 1364 1365 if (pwrcnd) { 1366 /* eject/load only happens for power condition == 0 */ 1367 return 0; 1368 } 1369 1370 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1371 if (!start && !s->tray_open && s->tray_locked) { 1372 scsi_check_condition(r, 1373 blk_is_inserted(s->qdev.conf.blk) 1374 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1375 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1376 return -1; 1377 } 1378 1379 if (s->tray_open != !start) { 1380 blk_eject(s->qdev.conf.blk, !start); 1381 s->tray_open = !start; 1382 } 1383 } 1384 return 0; 1385 } 1386 1387 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1388 { 1389 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1390 int buflen = r->iov.iov_len; 1391 1392 if (buflen) { 1393 DPRINTF("Read buf_len=%d\n", buflen); 1394 r->iov.iov_len = 0; 1395 r->started = true; 1396 scsi_req_data(&r->req, buflen); 1397 return; 1398 } 1399 1400 /* This also clears the sense buffer for REQUEST SENSE. */ 1401 scsi_req_complete(&r->req, GOOD); 1402 } 1403 1404 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1405 uint8_t *inbuf, int inlen) 1406 { 1407 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1408 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1409 uint8_t *p; 1410 int len, expected_len, changeable_len, i; 1411 1412 /* The input buffer does not include the page header, so it is 1413 * off by 2 bytes. 1414 */ 1415 expected_len = inlen + 2; 1416 if (expected_len > SCSI_MAX_MODE_LEN) { 1417 return -1; 1418 } 1419 1420 p = mode_current; 1421 memset(mode_current, 0, inlen + 2); 1422 len = mode_sense_page(s, page, &p, 0); 1423 if (len < 0 || len != expected_len) { 1424 return -1; 1425 } 1426 1427 p = mode_changeable; 1428 memset(mode_changeable, 0, inlen + 2); 1429 changeable_len = mode_sense_page(s, page, &p, 1); 1430 assert(changeable_len == len); 1431 1432 /* Check that unchangeable bits are the same as what MODE SENSE 1433 * would return. 1434 */ 1435 for (i = 2; i < len; i++) { 1436 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1437 return -1; 1438 } 1439 } 1440 return 0; 1441 } 1442 1443 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1444 { 1445 switch (page) { 1446 case MODE_PAGE_CACHING: 1447 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1448 break; 1449 1450 default: 1451 break; 1452 } 1453 } 1454 1455 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1456 { 1457 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1458 1459 while (len > 0) { 1460 int page, subpage, page_len; 1461 1462 /* Parse both possible formats for the mode page headers. */ 1463 page = p[0] & 0x3f; 1464 if (p[0] & 0x40) { 1465 if (len < 4) { 1466 goto invalid_param_len; 1467 } 1468 subpage = p[1]; 1469 page_len = lduw_be_p(&p[2]); 1470 p += 4; 1471 len -= 4; 1472 } else { 1473 if (len < 2) { 1474 goto invalid_param_len; 1475 } 1476 subpage = 0; 1477 page_len = p[1]; 1478 p += 2; 1479 len -= 2; 1480 } 1481 1482 if (subpage) { 1483 goto invalid_param; 1484 } 1485 if (page_len > len) { 1486 goto invalid_param_len; 1487 } 1488 1489 if (!change) { 1490 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1491 goto invalid_param; 1492 } 1493 } else { 1494 scsi_disk_apply_mode_select(s, page, p); 1495 } 1496 1497 p += page_len; 1498 len -= page_len; 1499 } 1500 return 0; 1501 1502 invalid_param: 1503 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1504 return -1; 1505 1506 invalid_param_len: 1507 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1508 return -1; 1509 } 1510 1511 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1512 { 1513 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1514 uint8_t *p = inbuf; 1515 int cmd = r->req.cmd.buf[0]; 1516 int len = r->req.cmd.xfer; 1517 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1518 int bd_len; 1519 int pass; 1520 1521 /* We only support PF=1, SP=0. */ 1522 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1523 goto invalid_field; 1524 } 1525 1526 if (len < hdr_len) { 1527 goto invalid_param_len; 1528 } 1529 1530 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1531 len -= hdr_len; 1532 p += hdr_len; 1533 if (len < bd_len) { 1534 goto invalid_param_len; 1535 } 1536 if (bd_len != 0 && bd_len != 8) { 1537 goto invalid_param; 1538 } 1539 1540 len -= bd_len; 1541 p += bd_len; 1542 1543 /* Ensure no change is made if there is an error! */ 1544 for (pass = 0; pass < 2; pass++) { 1545 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1546 assert(pass == 0); 1547 return; 1548 } 1549 } 1550 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1551 /* The request is used as the AIO opaque value, so add a ref. */ 1552 scsi_req_ref(&r->req); 1553 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1554 BLOCK_ACCT_FLUSH); 1555 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1556 return; 1557 } 1558 1559 scsi_req_complete(&r->req, GOOD); 1560 return; 1561 1562 invalid_param: 1563 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1564 return; 1565 1566 invalid_param_len: 1567 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1568 return; 1569 1570 invalid_field: 1571 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1572 } 1573 1574 static inline bool check_lba_range(SCSIDiskState *s, 1575 uint64_t sector_num, uint32_t nb_sectors) 1576 { 1577 /* 1578 * The first line tests that no overflow happens when computing the last 1579 * sector. The second line tests that the last accessed sector is in 1580 * range. 1581 * 1582 * Careful, the computations should not underflow for nb_sectors == 0, 1583 * and a 0-block read to the first LBA beyond the end of device is 1584 * valid. 1585 */ 1586 return (sector_num <= sector_num + nb_sectors && 1587 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1588 } 1589 1590 typedef struct UnmapCBData { 1591 SCSIDiskReq *r; 1592 uint8_t *inbuf; 1593 int count; 1594 } UnmapCBData; 1595 1596 static void scsi_unmap_complete(void *opaque, int ret); 1597 1598 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1599 { 1600 SCSIDiskReq *r = data->r; 1601 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1602 uint64_t sector_num; 1603 uint32_t nb_sectors; 1604 1605 assert(r->req.aiocb == NULL); 1606 if (scsi_disk_req_check_error(r, ret, false)) { 1607 goto done; 1608 } 1609 1610 if (data->count > 0) { 1611 sector_num = ldq_be_p(&data->inbuf[0]); 1612 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1613 if (!check_lba_range(s, sector_num, nb_sectors)) { 1614 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1615 goto done; 1616 } 1617 1618 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1619 sector_num * s->qdev.blocksize, 1620 nb_sectors * s->qdev.blocksize, 1621 scsi_unmap_complete, data); 1622 data->count--; 1623 data->inbuf += 16; 1624 return; 1625 } 1626 1627 scsi_req_complete(&r->req, GOOD); 1628 1629 done: 1630 scsi_req_unref(&r->req); 1631 g_free(data); 1632 } 1633 1634 static void scsi_unmap_complete(void *opaque, int ret) 1635 { 1636 UnmapCBData *data = opaque; 1637 SCSIDiskReq *r = data->r; 1638 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1639 1640 assert(r->req.aiocb != NULL); 1641 r->req.aiocb = NULL; 1642 1643 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1644 scsi_unmap_complete_noio(data, ret); 1645 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1646 } 1647 1648 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1649 { 1650 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1651 uint8_t *p = inbuf; 1652 int len = r->req.cmd.xfer; 1653 UnmapCBData *data; 1654 1655 /* Reject ANCHOR=1. */ 1656 if (r->req.cmd.buf[1] & 0x1) { 1657 goto invalid_field; 1658 } 1659 1660 if (len < 8) { 1661 goto invalid_param_len; 1662 } 1663 if (len < lduw_be_p(&p[0]) + 2) { 1664 goto invalid_param_len; 1665 } 1666 if (len < lduw_be_p(&p[2]) + 8) { 1667 goto invalid_param_len; 1668 } 1669 if (lduw_be_p(&p[2]) & 15) { 1670 goto invalid_param_len; 1671 } 1672 1673 if (blk_is_read_only(s->qdev.conf.blk)) { 1674 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1675 return; 1676 } 1677 1678 data = g_new0(UnmapCBData, 1); 1679 data->r = r; 1680 data->inbuf = &p[8]; 1681 data->count = lduw_be_p(&p[2]) >> 4; 1682 1683 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1684 scsi_req_ref(&r->req); 1685 scsi_unmap_complete_noio(data, 0); 1686 return; 1687 1688 invalid_param_len: 1689 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1690 return; 1691 1692 invalid_field: 1693 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1694 } 1695 1696 typedef struct WriteSameCBData { 1697 SCSIDiskReq *r; 1698 int64_t sector; 1699 int nb_sectors; 1700 QEMUIOVector qiov; 1701 struct iovec iov; 1702 } WriteSameCBData; 1703 1704 static void scsi_write_same_complete(void *opaque, int ret) 1705 { 1706 WriteSameCBData *data = opaque; 1707 SCSIDiskReq *r = data->r; 1708 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1709 1710 assert(r->req.aiocb != NULL); 1711 r->req.aiocb = NULL; 1712 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1713 if (scsi_disk_req_check_error(r, ret, true)) { 1714 goto done; 1715 } 1716 1717 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1718 1719 data->nb_sectors -= data->iov.iov_len / 512; 1720 data->sector += data->iov.iov_len / 512; 1721 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1722 if (data->iov.iov_len) { 1723 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1724 data->iov.iov_len, BLOCK_ACCT_WRITE); 1725 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1726 * where final qiov may need smaller size */ 1727 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1728 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1729 data->sector << BDRV_SECTOR_BITS, 1730 &data->qiov, 0, 1731 scsi_write_same_complete, data); 1732 return; 1733 } 1734 1735 scsi_req_complete(&r->req, GOOD); 1736 1737 done: 1738 scsi_req_unref(&r->req); 1739 qemu_vfree(data->iov.iov_base); 1740 g_free(data); 1741 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1742 } 1743 1744 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1745 { 1746 SCSIRequest *req = &r->req; 1747 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1748 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1749 WriteSameCBData *data; 1750 uint8_t *buf; 1751 int i; 1752 1753 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1754 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1755 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1756 return; 1757 } 1758 1759 if (blk_is_read_only(s->qdev.conf.blk)) { 1760 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1761 return; 1762 } 1763 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1764 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1765 return; 1766 } 1767 1768 if (buffer_is_zero(inbuf, s->qdev.blocksize)) { 1769 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1770 1771 /* The request is used as the AIO opaque value, so add a ref. */ 1772 scsi_req_ref(&r->req); 1773 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1774 nb_sectors * s->qdev.blocksize, 1775 BLOCK_ACCT_WRITE); 1776 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1777 r->req.cmd.lba * s->qdev.blocksize, 1778 nb_sectors * s->qdev.blocksize, 1779 flags, scsi_aio_complete, r); 1780 return; 1781 } 1782 1783 data = g_new0(WriteSameCBData, 1); 1784 data->r = r; 1785 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1786 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1787 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1788 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1789 data->iov.iov_len); 1790 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1791 1792 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1793 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1794 } 1795 1796 scsi_req_ref(&r->req); 1797 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1798 data->iov.iov_len, BLOCK_ACCT_WRITE); 1799 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1800 data->sector << BDRV_SECTOR_BITS, 1801 &data->qiov, 0, 1802 scsi_write_same_complete, data); 1803 } 1804 1805 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1806 { 1807 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1808 1809 if (r->iov.iov_len) { 1810 int buflen = r->iov.iov_len; 1811 DPRINTF("Write buf_len=%d\n", buflen); 1812 r->iov.iov_len = 0; 1813 scsi_req_data(&r->req, buflen); 1814 return; 1815 } 1816 1817 switch (req->cmd.buf[0]) { 1818 case MODE_SELECT: 1819 case MODE_SELECT_10: 1820 /* This also clears the sense buffer for REQUEST SENSE. */ 1821 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1822 break; 1823 1824 case UNMAP: 1825 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1826 break; 1827 1828 case VERIFY_10: 1829 case VERIFY_12: 1830 case VERIFY_16: 1831 if (r->req.status == -1) { 1832 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1833 } 1834 break; 1835 1836 case WRITE_SAME_10: 1837 case WRITE_SAME_16: 1838 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1839 break; 1840 1841 default: 1842 abort(); 1843 } 1844 } 1845 1846 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1847 { 1848 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1849 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1850 uint64_t nb_sectors; 1851 uint8_t *outbuf; 1852 int buflen; 1853 1854 switch (req->cmd.buf[0]) { 1855 case INQUIRY: 1856 case MODE_SENSE: 1857 case MODE_SENSE_10: 1858 case RESERVE: 1859 case RESERVE_10: 1860 case RELEASE: 1861 case RELEASE_10: 1862 case START_STOP: 1863 case ALLOW_MEDIUM_REMOVAL: 1864 case GET_CONFIGURATION: 1865 case GET_EVENT_STATUS_NOTIFICATION: 1866 case MECHANISM_STATUS: 1867 case REQUEST_SENSE: 1868 break; 1869 1870 default: 1871 if (!blk_is_available(s->qdev.conf.blk)) { 1872 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1873 return 0; 1874 } 1875 break; 1876 } 1877 1878 /* 1879 * FIXME: we shouldn't return anything bigger than 4k, but the code 1880 * requires the buffer to be as big as req->cmd.xfer in several 1881 * places. So, do not allow CDBs with a very large ALLOCATION 1882 * LENGTH. The real fix would be to modify scsi_read_data and 1883 * dma_buf_read, so that they return data beyond the buflen 1884 * as all zeros. 1885 */ 1886 if (req->cmd.xfer > 65536) { 1887 goto illegal_request; 1888 } 1889 r->buflen = MAX(4096, req->cmd.xfer); 1890 1891 if (!r->iov.iov_base) { 1892 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1893 } 1894 1895 buflen = req->cmd.xfer; 1896 outbuf = r->iov.iov_base; 1897 memset(outbuf, 0, r->buflen); 1898 switch (req->cmd.buf[0]) { 1899 case TEST_UNIT_READY: 1900 assert(blk_is_available(s->qdev.conf.blk)); 1901 break; 1902 case INQUIRY: 1903 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1904 if (buflen < 0) { 1905 goto illegal_request; 1906 } 1907 break; 1908 case MODE_SENSE: 1909 case MODE_SENSE_10: 1910 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1911 if (buflen < 0) { 1912 goto illegal_request; 1913 } 1914 break; 1915 case READ_TOC: 1916 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1917 if (buflen < 0) { 1918 goto illegal_request; 1919 } 1920 break; 1921 case RESERVE: 1922 if (req->cmd.buf[1] & 1) { 1923 goto illegal_request; 1924 } 1925 break; 1926 case RESERVE_10: 1927 if (req->cmd.buf[1] & 3) { 1928 goto illegal_request; 1929 } 1930 break; 1931 case RELEASE: 1932 if (req->cmd.buf[1] & 1) { 1933 goto illegal_request; 1934 } 1935 break; 1936 case RELEASE_10: 1937 if (req->cmd.buf[1] & 3) { 1938 goto illegal_request; 1939 } 1940 break; 1941 case START_STOP: 1942 if (scsi_disk_emulate_start_stop(r) < 0) { 1943 return 0; 1944 } 1945 break; 1946 case ALLOW_MEDIUM_REMOVAL: 1947 s->tray_locked = req->cmd.buf[4] & 1; 1948 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1949 break; 1950 case READ_CAPACITY_10: 1951 /* The normal LEN field for this command is zero. */ 1952 memset(outbuf, 0, 8); 1953 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1954 if (!nb_sectors) { 1955 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1956 return 0; 1957 } 1958 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1959 goto illegal_request; 1960 } 1961 nb_sectors /= s->qdev.blocksize / 512; 1962 /* Returned value is the address of the last sector. */ 1963 nb_sectors--; 1964 /* Remember the new size for read/write sanity checking. */ 1965 s->qdev.max_lba = nb_sectors; 1966 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 1967 if (nb_sectors > UINT32_MAX) { 1968 nb_sectors = UINT32_MAX; 1969 } 1970 outbuf[0] = (nb_sectors >> 24) & 0xff; 1971 outbuf[1] = (nb_sectors >> 16) & 0xff; 1972 outbuf[2] = (nb_sectors >> 8) & 0xff; 1973 outbuf[3] = nb_sectors & 0xff; 1974 outbuf[4] = 0; 1975 outbuf[5] = 0; 1976 outbuf[6] = s->qdev.blocksize >> 8; 1977 outbuf[7] = 0; 1978 break; 1979 case REQUEST_SENSE: 1980 /* Just return "NO SENSE". */ 1981 buflen = scsi_build_sense(NULL, 0, outbuf, r->buflen, 1982 (req->cmd.buf[1] & 1) == 0); 1983 if (buflen < 0) { 1984 goto illegal_request; 1985 } 1986 break; 1987 case MECHANISM_STATUS: 1988 buflen = scsi_emulate_mechanism_status(s, outbuf); 1989 if (buflen < 0) { 1990 goto illegal_request; 1991 } 1992 break; 1993 case GET_CONFIGURATION: 1994 buflen = scsi_get_configuration(s, outbuf); 1995 if (buflen < 0) { 1996 goto illegal_request; 1997 } 1998 break; 1999 case GET_EVENT_STATUS_NOTIFICATION: 2000 buflen = scsi_get_event_status_notification(s, r, outbuf); 2001 if (buflen < 0) { 2002 goto illegal_request; 2003 } 2004 break; 2005 case READ_DISC_INFORMATION: 2006 buflen = scsi_read_disc_information(s, r, outbuf); 2007 if (buflen < 0) { 2008 goto illegal_request; 2009 } 2010 break; 2011 case READ_DVD_STRUCTURE: 2012 buflen = scsi_read_dvd_structure(s, r, outbuf); 2013 if (buflen < 0) { 2014 goto illegal_request; 2015 } 2016 break; 2017 case SERVICE_ACTION_IN_16: 2018 /* Service Action In subcommands. */ 2019 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2020 DPRINTF("SAI READ CAPACITY(16)\n"); 2021 memset(outbuf, 0, req->cmd.xfer); 2022 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2023 if (!nb_sectors) { 2024 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2025 return 0; 2026 } 2027 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2028 goto illegal_request; 2029 } 2030 nb_sectors /= s->qdev.blocksize / 512; 2031 /* Returned value is the address of the last sector. */ 2032 nb_sectors--; 2033 /* Remember the new size for read/write sanity checking. */ 2034 s->qdev.max_lba = nb_sectors; 2035 outbuf[0] = (nb_sectors >> 56) & 0xff; 2036 outbuf[1] = (nb_sectors >> 48) & 0xff; 2037 outbuf[2] = (nb_sectors >> 40) & 0xff; 2038 outbuf[3] = (nb_sectors >> 32) & 0xff; 2039 outbuf[4] = (nb_sectors >> 24) & 0xff; 2040 outbuf[5] = (nb_sectors >> 16) & 0xff; 2041 outbuf[6] = (nb_sectors >> 8) & 0xff; 2042 outbuf[7] = nb_sectors & 0xff; 2043 outbuf[8] = 0; 2044 outbuf[9] = 0; 2045 outbuf[10] = s->qdev.blocksize >> 8; 2046 outbuf[11] = 0; 2047 outbuf[12] = 0; 2048 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2049 2050 /* set TPE bit if the format supports discard */ 2051 if (s->qdev.conf.discard_granularity) { 2052 outbuf[14] = 0x80; 2053 } 2054 2055 /* Protection, exponent and lowest lba field left blank. */ 2056 break; 2057 } 2058 DPRINTF("Unsupported Service Action In\n"); 2059 goto illegal_request; 2060 case SYNCHRONIZE_CACHE: 2061 /* The request is used as the AIO opaque value, so add a ref. */ 2062 scsi_req_ref(&r->req); 2063 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2064 BLOCK_ACCT_FLUSH); 2065 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2066 return 0; 2067 case SEEK_10: 2068 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba); 2069 if (r->req.cmd.lba > s->qdev.max_lba) { 2070 goto illegal_lba; 2071 } 2072 break; 2073 case MODE_SELECT: 2074 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2075 break; 2076 case MODE_SELECT_10: 2077 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2078 break; 2079 case UNMAP: 2080 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2081 break; 2082 case VERIFY_10: 2083 case VERIFY_12: 2084 case VERIFY_16: 2085 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3); 2086 if (req->cmd.buf[1] & 6) { 2087 goto illegal_request; 2088 } 2089 break; 2090 case WRITE_SAME_10: 2091 case WRITE_SAME_16: 2092 DPRINTF("WRITE SAME %d (len %lu)\n", 2093 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, 2094 (unsigned long)r->req.cmd.xfer); 2095 break; 2096 default: 2097 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0], 2098 scsi_command_name(buf[0])); 2099 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2100 return 0; 2101 } 2102 assert(!r->req.aiocb); 2103 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2104 if (r->iov.iov_len == 0) { 2105 scsi_req_complete(&r->req, GOOD); 2106 } 2107 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2108 assert(r->iov.iov_len == req->cmd.xfer); 2109 return -r->iov.iov_len; 2110 } else { 2111 return r->iov.iov_len; 2112 } 2113 2114 illegal_request: 2115 if (r->req.status == -1) { 2116 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2117 } 2118 return 0; 2119 2120 illegal_lba: 2121 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2122 return 0; 2123 } 2124 2125 /* Execute a scsi command. Returns the length of the data expected by the 2126 command. This will be Positive for data transfers from the device 2127 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2128 and zero if the command does not transfer any data. */ 2129 2130 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2131 { 2132 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2133 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2134 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2135 uint32_t len; 2136 uint8_t command; 2137 2138 command = buf[0]; 2139 2140 if (!blk_is_available(s->qdev.conf.blk)) { 2141 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2142 return 0; 2143 } 2144 2145 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2146 switch (command) { 2147 case READ_6: 2148 case READ_10: 2149 case READ_12: 2150 case READ_16: 2151 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len); 2152 if (r->req.cmd.buf[1] & 0xe0) { 2153 goto illegal_request; 2154 } 2155 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2156 goto illegal_lba; 2157 } 2158 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2159 r->sector_count = len * (s->qdev.blocksize / 512); 2160 break; 2161 case WRITE_6: 2162 case WRITE_10: 2163 case WRITE_12: 2164 case WRITE_16: 2165 case WRITE_VERIFY_10: 2166 case WRITE_VERIFY_12: 2167 case WRITE_VERIFY_16: 2168 if (blk_is_read_only(s->qdev.conf.blk)) { 2169 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2170 return 0; 2171 } 2172 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n", 2173 (command & 0xe) == 0xe ? "And Verify " : "", 2174 r->req.cmd.lba, len); 2175 /* fall through */ 2176 case VERIFY_10: 2177 case VERIFY_12: 2178 case VERIFY_16: 2179 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2180 * As far as DMA is concerned, we can treat it the same as a write; 2181 * scsi_block_do_sgio will send VERIFY commands. 2182 */ 2183 if (r->req.cmd.buf[1] & 0xe0) { 2184 goto illegal_request; 2185 } 2186 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2187 goto illegal_lba; 2188 } 2189 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2190 r->sector_count = len * (s->qdev.blocksize / 512); 2191 break; 2192 default: 2193 abort(); 2194 illegal_request: 2195 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2196 return 0; 2197 illegal_lba: 2198 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2199 return 0; 2200 } 2201 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2202 if (r->sector_count == 0) { 2203 scsi_req_complete(&r->req, GOOD); 2204 } 2205 assert(r->iov.iov_len == 0); 2206 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2207 return -r->sector_count * 512; 2208 } else { 2209 return r->sector_count * 512; 2210 } 2211 } 2212 2213 static void scsi_disk_reset(DeviceState *dev) 2214 { 2215 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2216 uint64_t nb_sectors; 2217 2218 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2219 2220 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2221 nb_sectors /= s->qdev.blocksize / 512; 2222 if (nb_sectors) { 2223 nb_sectors--; 2224 } 2225 s->qdev.max_lba = nb_sectors; 2226 /* reset tray statuses */ 2227 s->tray_locked = 0; 2228 s->tray_open = 0; 2229 } 2230 2231 static void scsi_disk_resize_cb(void *opaque) 2232 { 2233 SCSIDiskState *s = opaque; 2234 2235 /* SPC lists this sense code as available only for 2236 * direct-access devices. 2237 */ 2238 if (s->qdev.type == TYPE_DISK) { 2239 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2240 } 2241 } 2242 2243 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2244 { 2245 SCSIDiskState *s = opaque; 2246 2247 /* 2248 * When a CD gets changed, we have to report an ejected state and 2249 * then a loaded state to guests so that they detect tray 2250 * open/close and media change events. Guests that do not use 2251 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2252 * states rely on this behavior. 2253 * 2254 * media_changed governs the state machine used for unit attention 2255 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2256 */ 2257 s->media_changed = load; 2258 s->tray_open = !load; 2259 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2260 s->media_event = true; 2261 s->eject_request = false; 2262 } 2263 2264 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2265 { 2266 SCSIDiskState *s = opaque; 2267 2268 s->eject_request = true; 2269 if (force) { 2270 s->tray_locked = false; 2271 } 2272 } 2273 2274 static bool scsi_cd_is_tray_open(void *opaque) 2275 { 2276 return ((SCSIDiskState *)opaque)->tray_open; 2277 } 2278 2279 static bool scsi_cd_is_medium_locked(void *opaque) 2280 { 2281 return ((SCSIDiskState *)opaque)->tray_locked; 2282 } 2283 2284 static const BlockDevOps scsi_disk_removable_block_ops = { 2285 .change_media_cb = scsi_cd_change_media_cb, 2286 .eject_request_cb = scsi_cd_eject_request_cb, 2287 .is_tray_open = scsi_cd_is_tray_open, 2288 .is_medium_locked = scsi_cd_is_medium_locked, 2289 2290 .resize_cb = scsi_disk_resize_cb, 2291 }; 2292 2293 static const BlockDevOps scsi_disk_block_ops = { 2294 .resize_cb = scsi_disk_resize_cb, 2295 }; 2296 2297 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2298 { 2299 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2300 if (s->media_changed) { 2301 s->media_changed = false; 2302 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2303 } 2304 } 2305 2306 static void scsi_realize(SCSIDevice *dev, Error **errp) 2307 { 2308 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2309 Error *err = NULL; 2310 2311 if (!s->qdev.conf.blk) { 2312 error_setg(errp, "drive property not set"); 2313 return; 2314 } 2315 2316 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2317 !blk_is_inserted(s->qdev.conf.blk)) { 2318 error_setg(errp, "Device needs media, but drive is empty"); 2319 return; 2320 } 2321 2322 blkconf_serial(&s->qdev.conf, &s->serial); 2323 blkconf_blocksizes(&s->qdev.conf); 2324 if (dev->type == TYPE_DISK) { 2325 blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, &err); 2326 if (err) { 2327 error_propagate(errp, err); 2328 return; 2329 } 2330 } 2331 blkconf_apply_backend_options(&dev->conf, 2332 blk_is_read_only(s->qdev.conf.blk), 2333 dev->type == TYPE_DISK, &err); 2334 if (err) { 2335 error_propagate(errp, err); 2336 return; 2337 } 2338 2339 if (s->qdev.conf.discard_granularity == -1) { 2340 s->qdev.conf.discard_granularity = 2341 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2342 } 2343 2344 if (!s->version) { 2345 s->version = g_strdup(qemu_hw_version()); 2346 } 2347 if (!s->vendor) { 2348 s->vendor = g_strdup("QEMU"); 2349 } 2350 2351 if (blk_is_sg(s->qdev.conf.blk)) { 2352 error_setg(errp, "unwanted /dev/sg*"); 2353 return; 2354 } 2355 2356 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2357 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2358 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2359 } else { 2360 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2361 } 2362 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2363 2364 blk_iostatus_enable(s->qdev.conf.blk); 2365 } 2366 2367 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2368 { 2369 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2370 /* can happen for devices without drive. The error message for missing 2371 * backend will be issued in scsi_realize 2372 */ 2373 if (s->qdev.conf.blk) { 2374 blkconf_blocksizes(&s->qdev.conf); 2375 } 2376 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2377 s->qdev.type = TYPE_DISK; 2378 if (!s->product) { 2379 s->product = g_strdup("QEMU HARDDISK"); 2380 } 2381 scsi_realize(&s->qdev, errp); 2382 } 2383 2384 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2385 { 2386 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2387 int ret; 2388 2389 if (!dev->conf.blk) { 2390 /* Anonymous BlockBackend for an empty drive. As we put it into 2391 * dev->conf, qdev takes care of detaching on unplug. */ 2392 dev->conf.blk = blk_new(0, BLK_PERM_ALL); 2393 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2394 assert(ret == 0); 2395 } 2396 2397 s->qdev.blocksize = 2048; 2398 s->qdev.type = TYPE_ROM; 2399 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2400 if (!s->product) { 2401 s->product = g_strdup("QEMU CD-ROM"); 2402 } 2403 scsi_realize(&s->qdev, errp); 2404 } 2405 2406 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2407 { 2408 DriveInfo *dinfo; 2409 Error *local_err = NULL; 2410 2411 if (!dev->conf.blk) { 2412 scsi_realize(dev, &local_err); 2413 assert(local_err); 2414 error_propagate(errp, local_err); 2415 return; 2416 } 2417 2418 dinfo = blk_legacy_dinfo(dev->conf.blk); 2419 if (dinfo && dinfo->media_cd) { 2420 scsi_cd_realize(dev, errp); 2421 } else { 2422 scsi_hd_realize(dev, errp); 2423 } 2424 } 2425 2426 static const SCSIReqOps scsi_disk_emulate_reqops = { 2427 .size = sizeof(SCSIDiskReq), 2428 .free_req = scsi_free_request, 2429 .send_command = scsi_disk_emulate_command, 2430 .read_data = scsi_disk_emulate_read_data, 2431 .write_data = scsi_disk_emulate_write_data, 2432 .get_buf = scsi_get_buf, 2433 }; 2434 2435 static const SCSIReqOps scsi_disk_dma_reqops = { 2436 .size = sizeof(SCSIDiskReq), 2437 .free_req = scsi_free_request, 2438 .send_command = scsi_disk_dma_command, 2439 .read_data = scsi_read_data, 2440 .write_data = scsi_write_data, 2441 .get_buf = scsi_get_buf, 2442 .load_request = scsi_disk_load_request, 2443 .save_request = scsi_disk_save_request, 2444 }; 2445 2446 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2447 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2448 [INQUIRY] = &scsi_disk_emulate_reqops, 2449 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2450 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2451 [START_STOP] = &scsi_disk_emulate_reqops, 2452 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2453 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2454 [READ_TOC] = &scsi_disk_emulate_reqops, 2455 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2456 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2457 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2458 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2459 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2460 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2461 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2462 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2463 [SEEK_10] = &scsi_disk_emulate_reqops, 2464 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2465 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2466 [UNMAP] = &scsi_disk_emulate_reqops, 2467 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2468 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2469 [VERIFY_10] = &scsi_disk_emulate_reqops, 2470 [VERIFY_12] = &scsi_disk_emulate_reqops, 2471 [VERIFY_16] = &scsi_disk_emulate_reqops, 2472 2473 [READ_6] = &scsi_disk_dma_reqops, 2474 [READ_10] = &scsi_disk_dma_reqops, 2475 [READ_12] = &scsi_disk_dma_reqops, 2476 [READ_16] = &scsi_disk_dma_reqops, 2477 [WRITE_6] = &scsi_disk_dma_reqops, 2478 [WRITE_10] = &scsi_disk_dma_reqops, 2479 [WRITE_12] = &scsi_disk_dma_reqops, 2480 [WRITE_16] = &scsi_disk_dma_reqops, 2481 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2482 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2483 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2484 }; 2485 2486 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2487 uint8_t *buf, void *hba_private) 2488 { 2489 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2490 SCSIRequest *req; 2491 const SCSIReqOps *ops; 2492 uint8_t command; 2493 2494 command = buf[0]; 2495 ops = scsi_disk_reqops_dispatch[command]; 2496 if (!ops) { 2497 ops = &scsi_disk_emulate_reqops; 2498 } 2499 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2500 2501 #ifdef DEBUG_SCSI 2502 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]); 2503 { 2504 int i; 2505 for (i = 1; i < scsi_cdb_length(buf); i++) { 2506 printf(" 0x%02x", buf[i]); 2507 } 2508 printf("\n"); 2509 } 2510 #endif 2511 2512 return req; 2513 } 2514 2515 #ifdef __linux__ 2516 static int get_device_type(SCSIDiskState *s) 2517 { 2518 uint8_t cmd[16]; 2519 uint8_t buf[36]; 2520 uint8_t sensebuf[8]; 2521 sg_io_hdr_t io_header; 2522 int ret; 2523 2524 memset(cmd, 0, sizeof(cmd)); 2525 memset(buf, 0, sizeof(buf)); 2526 cmd[0] = INQUIRY; 2527 cmd[4] = sizeof(buf); 2528 2529 memset(&io_header, 0, sizeof(io_header)); 2530 io_header.interface_id = 'S'; 2531 io_header.dxfer_direction = SG_DXFER_FROM_DEV; 2532 io_header.dxfer_len = sizeof(buf); 2533 io_header.dxferp = buf; 2534 io_header.cmdp = cmd; 2535 io_header.cmd_len = sizeof(cmd); 2536 io_header.mx_sb_len = sizeof(sensebuf); 2537 io_header.sbp = sensebuf; 2538 io_header.timeout = 6000; /* XXX */ 2539 2540 ret = blk_ioctl(s->qdev.conf.blk, SG_IO, &io_header); 2541 if (ret < 0 || io_header.driver_status || io_header.host_status) { 2542 return -1; 2543 } 2544 s->qdev.type = buf[0]; 2545 if (buf[1] & 0x80) { 2546 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2547 } 2548 return 0; 2549 } 2550 2551 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2552 { 2553 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2554 int sg_version; 2555 int rc; 2556 2557 if (!s->qdev.conf.blk) { 2558 error_setg(errp, "drive property not set"); 2559 return; 2560 } 2561 2562 /* check we are using a driver managing SG_IO (version 3 and after) */ 2563 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2564 if (rc < 0) { 2565 error_setg(errp, "cannot get SG_IO version number: %s. " 2566 "Is this a SCSI device?", 2567 strerror(-rc)); 2568 return; 2569 } 2570 if (sg_version < 30000) { 2571 error_setg(errp, "scsi generic interface too old"); 2572 return; 2573 } 2574 2575 /* get device type from INQUIRY data */ 2576 rc = get_device_type(s); 2577 if (rc < 0) { 2578 error_setg(errp, "INQUIRY failed"); 2579 return; 2580 } 2581 2582 /* Make a guess for the block size, we'll fix it when the guest sends. 2583 * READ CAPACITY. If they don't, they likely would assume these sizes 2584 * anyway. (TODO: check in /sys). 2585 */ 2586 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2587 s->qdev.blocksize = 2048; 2588 } else { 2589 s->qdev.blocksize = 512; 2590 } 2591 2592 /* Makes the scsi-block device not removable by using HMP and QMP eject 2593 * command. 2594 */ 2595 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2596 2597 scsi_realize(&s->qdev, errp); 2598 scsi_generic_read_device_identification(&s->qdev); 2599 } 2600 2601 typedef struct SCSIBlockReq { 2602 SCSIDiskReq req; 2603 sg_io_hdr_t io_header; 2604 2605 /* Selected bytes of the original CDB, copied into our own CDB. */ 2606 uint8_t cmd, cdb1, group_number; 2607 2608 /* CDB passed to SG_IO. */ 2609 uint8_t cdb[16]; 2610 } SCSIBlockReq; 2611 2612 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2613 int64_t offset, QEMUIOVector *iov, 2614 int direction, 2615 BlockCompletionFunc *cb, void *opaque) 2616 { 2617 sg_io_hdr_t *io_header = &req->io_header; 2618 SCSIDiskReq *r = &req->req; 2619 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2620 int nb_logical_blocks; 2621 uint64_t lba; 2622 BlockAIOCB *aiocb; 2623 2624 /* This is not supported yet. It can only happen if the guest does 2625 * reads and writes that are not aligned to one logical sectors 2626 * _and_ cover multiple MemoryRegions. 2627 */ 2628 assert(offset % s->qdev.blocksize == 0); 2629 assert(iov->size % s->qdev.blocksize == 0); 2630 2631 io_header->interface_id = 'S'; 2632 2633 /* The data transfer comes from the QEMUIOVector. */ 2634 io_header->dxfer_direction = direction; 2635 io_header->dxfer_len = iov->size; 2636 io_header->dxferp = (void *)iov->iov; 2637 io_header->iovec_count = iov->niov; 2638 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2639 2640 /* Build a new CDB with the LBA and length patched in, in case 2641 * DMA helpers split the transfer in multiple segments. Do not 2642 * build a CDB smaller than what the guest wanted, and only build 2643 * a larger one if strictly necessary. 2644 */ 2645 io_header->cmdp = req->cdb; 2646 lba = offset / s->qdev.blocksize; 2647 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2648 2649 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2650 /* 6-byte CDB */ 2651 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2652 req->cdb[4] = nb_logical_blocks; 2653 req->cdb[5] = 0; 2654 io_header->cmd_len = 6; 2655 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2656 /* 10-byte CDB */ 2657 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2658 req->cdb[1] = req->cdb1; 2659 stl_be_p(&req->cdb[2], lba); 2660 req->cdb[6] = req->group_number; 2661 stw_be_p(&req->cdb[7], nb_logical_blocks); 2662 req->cdb[9] = 0; 2663 io_header->cmd_len = 10; 2664 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2665 /* 12-byte CDB */ 2666 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2667 req->cdb[1] = req->cdb1; 2668 stl_be_p(&req->cdb[2], lba); 2669 stl_be_p(&req->cdb[6], nb_logical_blocks); 2670 req->cdb[10] = req->group_number; 2671 req->cdb[11] = 0; 2672 io_header->cmd_len = 12; 2673 } else { 2674 /* 16-byte CDB */ 2675 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2676 req->cdb[1] = req->cdb1; 2677 stq_be_p(&req->cdb[2], lba); 2678 stl_be_p(&req->cdb[10], nb_logical_blocks); 2679 req->cdb[14] = req->group_number; 2680 req->cdb[15] = 0; 2681 io_header->cmd_len = 16; 2682 } 2683 2684 /* The rest is as in scsi-generic.c. */ 2685 io_header->mx_sb_len = sizeof(r->req.sense); 2686 io_header->sbp = r->req.sense; 2687 io_header->timeout = UINT_MAX; 2688 io_header->usr_ptr = r; 2689 io_header->flags |= SG_FLAG_DIRECT_IO; 2690 2691 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2692 assert(aiocb != NULL); 2693 return aiocb; 2694 } 2695 2696 static bool scsi_block_no_fua(SCSICommand *cmd) 2697 { 2698 return false; 2699 } 2700 2701 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2702 QEMUIOVector *iov, 2703 BlockCompletionFunc *cb, void *cb_opaque, 2704 void *opaque) 2705 { 2706 SCSIBlockReq *r = opaque; 2707 return scsi_block_do_sgio(r, offset, iov, 2708 SG_DXFER_FROM_DEV, cb, cb_opaque); 2709 } 2710 2711 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2712 QEMUIOVector *iov, 2713 BlockCompletionFunc *cb, void *cb_opaque, 2714 void *opaque) 2715 { 2716 SCSIBlockReq *r = opaque; 2717 return scsi_block_do_sgio(r, offset, iov, 2718 SG_DXFER_TO_DEV, cb, cb_opaque); 2719 } 2720 2721 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2722 { 2723 switch (buf[0]) { 2724 case VERIFY_10: 2725 case VERIFY_12: 2726 case VERIFY_16: 2727 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2728 * for the number of logical blocks specified in the length 2729 * field). For other modes, do not use scatter/gather operation. 2730 */ 2731 if ((buf[1] & 6) == 2) { 2732 return false; 2733 } 2734 break; 2735 2736 case READ_6: 2737 case READ_10: 2738 case READ_12: 2739 case READ_16: 2740 case WRITE_6: 2741 case WRITE_10: 2742 case WRITE_12: 2743 case WRITE_16: 2744 case WRITE_VERIFY_10: 2745 case WRITE_VERIFY_12: 2746 case WRITE_VERIFY_16: 2747 /* MMC writing cannot be done via DMA helpers, because it sometimes 2748 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2749 * We might use scsi_block_dma_reqops as long as no writing commands are 2750 * seen, but performance usually isn't paramount on optical media. So, 2751 * just make scsi-block operate the same as scsi-generic for them. 2752 */ 2753 if (s->qdev.type != TYPE_ROM) { 2754 return false; 2755 } 2756 break; 2757 2758 default: 2759 break; 2760 } 2761 2762 return true; 2763 } 2764 2765 2766 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2767 { 2768 SCSIBlockReq *r = (SCSIBlockReq *)req; 2769 r->cmd = req->cmd.buf[0]; 2770 switch (r->cmd >> 5) { 2771 case 0: 2772 /* 6-byte CDB. */ 2773 r->cdb1 = r->group_number = 0; 2774 break; 2775 case 1: 2776 /* 10-byte CDB. */ 2777 r->cdb1 = req->cmd.buf[1]; 2778 r->group_number = req->cmd.buf[6]; 2779 break; 2780 case 4: 2781 /* 12-byte CDB. */ 2782 r->cdb1 = req->cmd.buf[1]; 2783 r->group_number = req->cmd.buf[10]; 2784 break; 2785 case 5: 2786 /* 16-byte CDB. */ 2787 r->cdb1 = req->cmd.buf[1]; 2788 r->group_number = req->cmd.buf[14]; 2789 break; 2790 default: 2791 abort(); 2792 } 2793 2794 if (r->cdb1 & 0xe0) { 2795 /* Protection information is not supported. */ 2796 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2797 return 0; 2798 } 2799 2800 r->req.status = &r->io_header.status; 2801 return scsi_disk_dma_command(req, buf); 2802 } 2803 2804 static const SCSIReqOps scsi_block_dma_reqops = { 2805 .size = sizeof(SCSIBlockReq), 2806 .free_req = scsi_free_request, 2807 .send_command = scsi_block_dma_command, 2808 .read_data = scsi_read_data, 2809 .write_data = scsi_write_data, 2810 .get_buf = scsi_get_buf, 2811 .load_request = scsi_disk_load_request, 2812 .save_request = scsi_disk_save_request, 2813 }; 2814 2815 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2816 uint32_t lun, uint8_t *buf, 2817 void *hba_private) 2818 { 2819 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2820 2821 if (scsi_block_is_passthrough(s, buf)) { 2822 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2823 hba_private); 2824 } else { 2825 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2826 hba_private); 2827 } 2828 } 2829 2830 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2831 uint8_t *buf, void *hba_private) 2832 { 2833 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2834 2835 if (scsi_block_is_passthrough(s, buf)) { 2836 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2837 } else { 2838 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2839 } 2840 } 2841 2842 #endif 2843 2844 static 2845 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2846 BlockCompletionFunc *cb, void *cb_opaque, 2847 void *opaque) 2848 { 2849 SCSIDiskReq *r = opaque; 2850 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2851 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2852 } 2853 2854 static 2855 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2856 BlockCompletionFunc *cb, void *cb_opaque, 2857 void *opaque) 2858 { 2859 SCSIDiskReq *r = opaque; 2860 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2861 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2862 } 2863 2864 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2865 { 2866 DeviceClass *dc = DEVICE_CLASS(klass); 2867 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2868 2869 dc->fw_name = "disk"; 2870 dc->reset = scsi_disk_reset; 2871 sdc->dma_readv = scsi_dma_readv; 2872 sdc->dma_writev = scsi_dma_writev; 2873 sdc->need_fua_emulation = scsi_is_cmd_fua; 2874 } 2875 2876 static const TypeInfo scsi_disk_base_info = { 2877 .name = TYPE_SCSI_DISK_BASE, 2878 .parent = TYPE_SCSI_DEVICE, 2879 .class_init = scsi_disk_base_class_initfn, 2880 .instance_size = sizeof(SCSIDiskState), 2881 .class_size = sizeof(SCSIDiskClass), 2882 .abstract = true, 2883 }; 2884 2885 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2886 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ 2887 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2888 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2889 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2890 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2891 DEFINE_PROP_STRING("product", SCSIDiskState, product) 2892 2893 static Property scsi_hd_properties[] = { 2894 DEFINE_SCSI_DISK_PROPERTIES(), 2895 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2896 SCSI_DISK_F_REMOVABLE, false), 2897 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2898 SCSI_DISK_F_DPOFUA, false), 2899 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2900 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2901 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2902 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2903 DEFAULT_MAX_UNMAP_SIZE), 2904 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2905 DEFAULT_MAX_IO_SIZE), 2906 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2907 DEFINE_PROP_END_OF_LIST(), 2908 }; 2909 2910 static const VMStateDescription vmstate_scsi_disk_state = { 2911 .name = "scsi-disk", 2912 .version_id = 1, 2913 .minimum_version_id = 1, 2914 .fields = (VMStateField[]) { 2915 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2916 VMSTATE_BOOL(media_changed, SCSIDiskState), 2917 VMSTATE_BOOL(media_event, SCSIDiskState), 2918 VMSTATE_BOOL(eject_request, SCSIDiskState), 2919 VMSTATE_BOOL(tray_open, SCSIDiskState), 2920 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2921 VMSTATE_END_OF_LIST() 2922 } 2923 }; 2924 2925 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 2926 { 2927 DeviceClass *dc = DEVICE_CLASS(klass); 2928 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2929 2930 sc->realize = scsi_hd_realize; 2931 sc->alloc_req = scsi_new_request; 2932 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 2933 dc->desc = "virtual SCSI disk"; 2934 dc->props = scsi_hd_properties; 2935 dc->vmsd = &vmstate_scsi_disk_state; 2936 } 2937 2938 static const TypeInfo scsi_hd_info = { 2939 .name = "scsi-hd", 2940 .parent = TYPE_SCSI_DISK_BASE, 2941 .class_init = scsi_hd_class_initfn, 2942 }; 2943 2944 static Property scsi_cd_properties[] = { 2945 DEFINE_SCSI_DISK_PROPERTIES(), 2946 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2947 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2948 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2949 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2950 DEFAULT_MAX_IO_SIZE), 2951 DEFINE_PROP_END_OF_LIST(), 2952 }; 2953 2954 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 2955 { 2956 DeviceClass *dc = DEVICE_CLASS(klass); 2957 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2958 2959 sc->realize = scsi_cd_realize; 2960 sc->alloc_req = scsi_new_request; 2961 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 2962 dc->desc = "virtual SCSI CD-ROM"; 2963 dc->props = scsi_cd_properties; 2964 dc->vmsd = &vmstate_scsi_disk_state; 2965 } 2966 2967 static const TypeInfo scsi_cd_info = { 2968 .name = "scsi-cd", 2969 .parent = TYPE_SCSI_DISK_BASE, 2970 .class_init = scsi_cd_class_initfn, 2971 }; 2972 2973 #ifdef __linux__ 2974 static Property scsi_block_properties[] = { 2975 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 2976 DEFINE_PROP_END_OF_LIST(), 2977 }; 2978 2979 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 2980 { 2981 DeviceClass *dc = DEVICE_CLASS(klass); 2982 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2983 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2984 2985 sc->realize = scsi_block_realize; 2986 sc->alloc_req = scsi_block_new_request; 2987 sc->parse_cdb = scsi_block_parse_cdb; 2988 sdc->dma_readv = scsi_block_dma_readv; 2989 sdc->dma_writev = scsi_block_dma_writev; 2990 sdc->need_fua_emulation = scsi_block_no_fua; 2991 dc->desc = "SCSI block device passthrough"; 2992 dc->props = scsi_block_properties; 2993 dc->vmsd = &vmstate_scsi_disk_state; 2994 } 2995 2996 static const TypeInfo scsi_block_info = { 2997 .name = "scsi-block", 2998 .parent = TYPE_SCSI_DISK_BASE, 2999 .class_init = scsi_block_class_initfn, 3000 }; 3001 #endif 3002 3003 static Property scsi_disk_properties[] = { 3004 DEFINE_SCSI_DISK_PROPERTIES(), 3005 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3006 SCSI_DISK_F_REMOVABLE, false), 3007 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3008 SCSI_DISK_F_DPOFUA, false), 3009 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3010 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3011 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3012 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3013 DEFAULT_MAX_UNMAP_SIZE), 3014 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3015 DEFAULT_MAX_IO_SIZE), 3016 DEFINE_PROP_END_OF_LIST(), 3017 }; 3018 3019 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3020 { 3021 DeviceClass *dc = DEVICE_CLASS(klass); 3022 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3023 3024 sc->realize = scsi_disk_realize; 3025 sc->alloc_req = scsi_new_request; 3026 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3027 dc->fw_name = "disk"; 3028 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3029 dc->reset = scsi_disk_reset; 3030 dc->props = scsi_disk_properties; 3031 dc->vmsd = &vmstate_scsi_disk_state; 3032 } 3033 3034 static const TypeInfo scsi_disk_info = { 3035 .name = "scsi-disk", 3036 .parent = TYPE_SCSI_DISK_BASE, 3037 .class_init = scsi_disk_class_initfn, 3038 }; 3039 3040 static void scsi_disk_register_types(void) 3041 { 3042 type_register_static(&scsi_disk_base_info); 3043 type_register_static(&scsi_hd_info); 3044 type_register_static(&scsi_cd_info); 3045 #ifdef __linux__ 3046 type_register_static(&scsi_block_info); 3047 #endif 3048 type_register_static(&scsi_disk_info); 3049 } 3050 3051 type_init(scsi_disk_register_types) 3052