1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 //#define DEBUG_SCSI 23 24 #ifdef DEBUG_SCSI 25 #define DPRINTF(fmt, ...) \ 26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0) 27 #else 28 #define DPRINTF(fmt, ...) do {} while(0) 29 #endif 30 31 #include "qemu/osdep.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 #include "hw/scsi/scsi.h" 35 #include "scsi/constants.h" 36 #include "sysemu/sysemu.h" 37 #include "sysemu/block-backend.h" 38 #include "sysemu/blockdev.h" 39 #include "hw/block/block.h" 40 #include "sysemu/dma.h" 41 #include "qemu/cutils.h" 42 43 #ifdef __linux 44 #include <scsi/sg.h> 45 #endif 46 47 #define SCSI_WRITE_SAME_MAX 524288 48 #define SCSI_DMA_BUF_SIZE 131072 49 #define SCSI_MAX_INQUIRY_LEN 256 50 #define SCSI_MAX_MODE_LEN 256 51 52 #define DEFAULT_DISCARD_GRANULARITY 4096 53 #define DEFAULT_MAX_UNMAP_SIZE (1 << 30) /* 1 GB */ 54 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 55 56 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 57 58 #define SCSI_DISK_BASE(obj) \ 59 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 60 #define SCSI_DISK_BASE_CLASS(klass) \ 61 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 62 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 63 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 64 65 typedef struct SCSIDiskClass { 66 SCSIDeviceClass parent_class; 67 DMAIOFunc *dma_readv; 68 DMAIOFunc *dma_writev; 69 bool (*need_fua_emulation)(SCSICommand *cmd); 70 } SCSIDiskClass; 71 72 typedef struct SCSIDiskReq { 73 SCSIRequest req; 74 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 75 uint64_t sector; 76 uint32_t sector_count; 77 uint32_t buflen; 78 bool started; 79 bool need_fua_emulation; 80 struct iovec iov; 81 QEMUIOVector qiov; 82 BlockAcctCookie acct; 83 unsigned char *status; 84 } SCSIDiskReq; 85 86 #define SCSI_DISK_F_REMOVABLE 0 87 #define SCSI_DISK_F_DPOFUA 1 88 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 89 90 typedef struct SCSIDiskState 91 { 92 SCSIDevice qdev; 93 uint32_t features; 94 bool media_changed; 95 bool media_event; 96 bool eject_request; 97 uint16_t port_index; 98 uint64_t max_unmap_size; 99 uint64_t max_io_size; 100 QEMUBH *bh; 101 char *version; 102 char *serial; 103 char *vendor; 104 char *product; 105 bool tray_open; 106 bool tray_locked; 107 /* 108 * 0x0000 - rotation rate not reported 109 * 0x0001 - non-rotating medium (SSD) 110 * 0x0002-0x0400 - reserved 111 * 0x0401-0xffe - rotations per minute 112 * 0xffff - reserved 113 */ 114 uint16_t rotation_rate; 115 } SCSIDiskState; 116 117 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 118 119 static void scsi_free_request(SCSIRequest *req) 120 { 121 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 122 123 qemu_vfree(r->iov.iov_base); 124 } 125 126 /* Helper function for command completion with sense. */ 127 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 128 { 129 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n", 130 r->req.tag, sense.key, sense.asc, sense.ascq); 131 scsi_req_build_sense(&r->req, sense); 132 scsi_req_complete(&r->req, CHECK_CONDITION); 133 } 134 135 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 136 { 137 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 138 139 if (!r->iov.iov_base) { 140 r->buflen = size; 141 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 142 } 143 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 144 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 145 } 146 147 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 148 { 149 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 150 151 qemu_put_be64s(f, &r->sector); 152 qemu_put_be32s(f, &r->sector_count); 153 qemu_put_be32s(f, &r->buflen); 154 if (r->buflen) { 155 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 156 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 157 } else if (!req->retry) { 158 uint32_t len = r->iov.iov_len; 159 qemu_put_be32s(f, &len); 160 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 161 } 162 } 163 } 164 165 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 166 { 167 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 168 169 qemu_get_be64s(f, &r->sector); 170 qemu_get_be32s(f, &r->sector_count); 171 qemu_get_be32s(f, &r->buflen); 172 if (r->buflen) { 173 scsi_init_iovec(r, r->buflen); 174 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 175 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 176 } else if (!r->req.retry) { 177 uint32_t len; 178 qemu_get_be32s(f, &len); 179 r->iov.iov_len = len; 180 assert(r->iov.iov_len <= r->buflen); 181 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 182 } 183 } 184 185 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 186 } 187 188 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 189 { 190 if (r->req.io_canceled) { 191 scsi_req_cancel_complete(&r->req); 192 return true; 193 } 194 195 if (ret < 0 || (r->status && *r->status)) { 196 return scsi_handle_rw_error(r, -ret, acct_failed); 197 } 198 199 return false; 200 } 201 202 static void scsi_aio_complete(void *opaque, int ret) 203 { 204 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 205 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 206 207 assert(r->req.aiocb != NULL); 208 r->req.aiocb = NULL; 209 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 210 if (scsi_disk_req_check_error(r, ret, true)) { 211 goto done; 212 } 213 214 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 215 scsi_req_complete(&r->req, GOOD); 216 217 done: 218 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 219 scsi_req_unref(&r->req); 220 } 221 222 static bool scsi_is_cmd_fua(SCSICommand *cmd) 223 { 224 switch (cmd->buf[0]) { 225 case READ_10: 226 case READ_12: 227 case READ_16: 228 case WRITE_10: 229 case WRITE_12: 230 case WRITE_16: 231 return (cmd->buf[1] & 8) != 0; 232 233 case VERIFY_10: 234 case VERIFY_12: 235 case VERIFY_16: 236 case WRITE_VERIFY_10: 237 case WRITE_VERIFY_12: 238 case WRITE_VERIFY_16: 239 return true; 240 241 case READ_6: 242 case WRITE_6: 243 default: 244 return false; 245 } 246 } 247 248 static void scsi_write_do_fua(SCSIDiskReq *r) 249 { 250 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 251 252 assert(r->req.aiocb == NULL); 253 assert(!r->req.io_canceled); 254 255 if (r->need_fua_emulation) { 256 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 257 BLOCK_ACCT_FLUSH); 258 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 259 return; 260 } 261 262 scsi_req_complete(&r->req, GOOD); 263 scsi_req_unref(&r->req); 264 } 265 266 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 267 { 268 assert(r->req.aiocb == NULL); 269 if (scsi_disk_req_check_error(r, ret, false)) { 270 goto done; 271 } 272 273 r->sector += r->sector_count; 274 r->sector_count = 0; 275 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 276 scsi_write_do_fua(r); 277 return; 278 } else { 279 scsi_req_complete(&r->req, GOOD); 280 } 281 282 done: 283 scsi_req_unref(&r->req); 284 } 285 286 static void scsi_dma_complete(void *opaque, int ret) 287 { 288 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 289 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 290 291 assert(r->req.aiocb != NULL); 292 r->req.aiocb = NULL; 293 294 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 295 if (ret < 0) { 296 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 297 } else { 298 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 299 } 300 scsi_dma_complete_noio(r, ret); 301 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 302 } 303 304 static void scsi_read_complete(void * opaque, int ret) 305 { 306 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 307 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 308 int n; 309 310 assert(r->req.aiocb != NULL); 311 r->req.aiocb = NULL; 312 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 313 if (scsi_disk_req_check_error(r, ret, true)) { 314 goto done; 315 } 316 317 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 318 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size); 319 320 n = r->qiov.size / 512; 321 r->sector += n; 322 r->sector_count -= n; 323 scsi_req_data(&r->req, r->qiov.size); 324 325 done: 326 scsi_req_unref(&r->req); 327 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 328 } 329 330 /* Actually issue a read to the block device. */ 331 static void scsi_do_read(SCSIDiskReq *r, int ret) 332 { 333 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 334 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 335 336 assert (r->req.aiocb == NULL); 337 if (scsi_disk_req_check_error(r, ret, false)) { 338 goto done; 339 } 340 341 /* The request is used as the AIO opaque value, so add a ref. */ 342 scsi_req_ref(&r->req); 343 344 if (r->req.sg) { 345 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 346 r->req.resid -= r->req.sg->size; 347 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 348 r->req.sg, r->sector << BDRV_SECTOR_BITS, 349 BDRV_SECTOR_SIZE, 350 sdc->dma_readv, r, scsi_dma_complete, r, 351 DMA_DIRECTION_FROM_DEVICE); 352 } else { 353 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 354 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 355 r->qiov.size, BLOCK_ACCT_READ); 356 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 357 scsi_read_complete, r, r); 358 } 359 360 done: 361 scsi_req_unref(&r->req); 362 } 363 364 static void scsi_do_read_cb(void *opaque, int ret) 365 { 366 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 367 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 368 369 assert (r->req.aiocb != NULL); 370 r->req.aiocb = NULL; 371 372 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 373 if (ret < 0) { 374 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 375 } else { 376 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 377 } 378 scsi_do_read(opaque, ret); 379 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 380 } 381 382 /* Read more data from scsi device into buffer. */ 383 static void scsi_read_data(SCSIRequest *req) 384 { 385 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 386 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 387 bool first; 388 389 DPRINTF("Read sector_count=%d\n", r->sector_count); 390 if (r->sector_count == 0) { 391 /* This also clears the sense buffer for REQUEST SENSE. */ 392 scsi_req_complete(&r->req, GOOD); 393 return; 394 } 395 396 /* No data transfer may already be in progress */ 397 assert(r->req.aiocb == NULL); 398 399 /* The request is used as the AIO opaque value, so add a ref. */ 400 scsi_req_ref(&r->req); 401 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 402 DPRINTF("Data transfer direction invalid\n"); 403 scsi_read_complete(r, -EINVAL); 404 return; 405 } 406 407 if (!blk_is_available(req->dev->conf.blk)) { 408 scsi_read_complete(r, -ENOMEDIUM); 409 return; 410 } 411 412 first = !r->started; 413 r->started = true; 414 if (first && r->need_fua_emulation) { 415 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 416 BLOCK_ACCT_FLUSH); 417 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 418 } else { 419 scsi_do_read(r, 0); 420 } 421 } 422 423 /* 424 * scsi_handle_rw_error has two return values. False means that the error 425 * must be ignored, true means that the error has been processed and the 426 * caller should not do anything else for this request. Note that 427 * scsi_handle_rw_error always manages its reference counts, independent 428 * of the return value. 429 */ 430 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 431 { 432 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 433 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 434 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 435 is_read, error); 436 437 if (action == BLOCK_ERROR_ACTION_REPORT) { 438 if (acct_failed) { 439 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 440 } 441 switch (error) { 442 case 0: 443 /* The command has run, no need to fake sense. */ 444 assert(r->status && *r->status); 445 scsi_req_complete(&r->req, *r->status); 446 break; 447 case ENOMEDIUM: 448 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 449 break; 450 case ENOMEM: 451 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 452 break; 453 case EINVAL: 454 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 455 break; 456 case ENOSPC: 457 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 458 break; 459 default: 460 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 461 break; 462 } 463 } 464 if (!error) { 465 assert(r->status && *r->status); 466 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 467 468 if (error == ECANCELED || error == EAGAIN || error == ENOTCONN || 469 error == 0) { 470 /* These errors are handled by guest. */ 471 scsi_req_complete(&r->req, *r->status); 472 return true; 473 } 474 } 475 476 blk_error_action(s->qdev.conf.blk, action, is_read, error); 477 if (action == BLOCK_ERROR_ACTION_STOP) { 478 scsi_req_retry(&r->req); 479 } 480 return action != BLOCK_ERROR_ACTION_IGNORE; 481 } 482 483 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 484 { 485 uint32_t n; 486 487 assert (r->req.aiocb == NULL); 488 if (scsi_disk_req_check_error(r, ret, false)) { 489 goto done; 490 } 491 492 n = r->qiov.size / 512; 493 r->sector += n; 494 r->sector_count -= n; 495 if (r->sector_count == 0) { 496 scsi_write_do_fua(r); 497 return; 498 } else { 499 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 500 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size); 501 scsi_req_data(&r->req, r->qiov.size); 502 } 503 504 done: 505 scsi_req_unref(&r->req); 506 } 507 508 static void scsi_write_complete(void * opaque, int ret) 509 { 510 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 511 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 512 513 assert (r->req.aiocb != NULL); 514 r->req.aiocb = NULL; 515 516 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 517 if (ret < 0) { 518 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 519 } else { 520 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 521 } 522 scsi_write_complete_noio(r, ret); 523 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 524 } 525 526 static void scsi_write_data(SCSIRequest *req) 527 { 528 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 529 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 530 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 531 532 /* No data transfer may already be in progress */ 533 assert(r->req.aiocb == NULL); 534 535 /* The request is used as the AIO opaque value, so add a ref. */ 536 scsi_req_ref(&r->req); 537 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 538 DPRINTF("Data transfer direction invalid\n"); 539 scsi_write_complete_noio(r, -EINVAL); 540 return; 541 } 542 543 if (!r->req.sg && !r->qiov.size) { 544 /* Called for the first time. Ask the driver to send us more data. */ 545 r->started = true; 546 scsi_write_complete_noio(r, 0); 547 return; 548 } 549 if (!blk_is_available(req->dev->conf.blk)) { 550 scsi_write_complete_noio(r, -ENOMEDIUM); 551 return; 552 } 553 554 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 555 r->req.cmd.buf[0] == VERIFY_16) { 556 if (r->req.sg) { 557 scsi_dma_complete_noio(r, 0); 558 } else { 559 scsi_write_complete_noio(r, 0); 560 } 561 return; 562 } 563 564 if (r->req.sg) { 565 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 566 r->req.resid -= r->req.sg->size; 567 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 568 r->req.sg, r->sector << BDRV_SECTOR_BITS, 569 BDRV_SECTOR_SIZE, 570 sdc->dma_writev, r, scsi_dma_complete, r, 571 DMA_DIRECTION_TO_DEVICE); 572 } else { 573 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 574 r->qiov.size, BLOCK_ACCT_WRITE); 575 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 576 scsi_write_complete, r, r); 577 } 578 } 579 580 /* Return a pointer to the data buffer. */ 581 static uint8_t *scsi_get_buf(SCSIRequest *req) 582 { 583 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 584 585 return (uint8_t *)r->iov.iov_base; 586 } 587 588 int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 589 { 590 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 591 uint8_t page_code = req->cmd.buf[2]; 592 int start, buflen = 0; 593 594 outbuf[buflen++] = s->qdev.type & 0x1f; 595 outbuf[buflen++] = page_code; 596 outbuf[buflen++] = 0x00; 597 outbuf[buflen++] = 0x00; 598 start = buflen; 599 600 switch (page_code) { 601 case 0x00: /* Supported page codes, mandatory */ 602 { 603 DPRINTF("Inquiry EVPD[Supported pages] " 604 "buffer size %zd\n", req->cmd.xfer); 605 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 606 if (s->serial) { 607 outbuf[buflen++] = 0x80; /* unit serial number */ 608 } 609 outbuf[buflen++] = 0x83; /* device identification */ 610 if (s->qdev.type == TYPE_DISK) { 611 outbuf[buflen++] = 0xb0; /* block limits */ 612 outbuf[buflen++] = 0xb1; /* block device characteristics */ 613 outbuf[buflen++] = 0xb2; /* thin provisioning */ 614 } 615 break; 616 } 617 case 0x80: /* Device serial number, optional */ 618 { 619 int l; 620 621 if (!s->serial) { 622 DPRINTF("Inquiry (EVPD[Serial number] not supported\n"); 623 return -1; 624 } 625 626 l = strlen(s->serial); 627 if (l > 36) { 628 l = 36; 629 } 630 631 DPRINTF("Inquiry EVPD[Serial number] " 632 "buffer size %zd\n", req->cmd.xfer); 633 memcpy(outbuf + buflen, s->serial, l); 634 buflen += l; 635 break; 636 } 637 638 case 0x83: /* Device identification page, mandatory */ 639 { 640 const char *str = s->serial ?: blk_name(s->qdev.conf.blk); 641 int max_len = s->serial ? 20 : 255 - 8; 642 int id_len = strlen(str); 643 644 if (id_len > max_len) { 645 id_len = max_len; 646 } 647 DPRINTF("Inquiry EVPD[Device identification] " 648 "buffer size %zd\n", req->cmd.xfer); 649 650 outbuf[buflen++] = 0x2; /* ASCII */ 651 outbuf[buflen++] = 0; /* not officially assigned */ 652 outbuf[buflen++] = 0; /* reserved */ 653 outbuf[buflen++] = id_len; /* length of data following */ 654 memcpy(outbuf + buflen, str, id_len); 655 buflen += id_len; 656 657 if (s->qdev.wwn) { 658 outbuf[buflen++] = 0x1; /* Binary */ 659 outbuf[buflen++] = 0x3; /* NAA */ 660 outbuf[buflen++] = 0; /* reserved */ 661 outbuf[buflen++] = 8; 662 stq_be_p(&outbuf[buflen], s->qdev.wwn); 663 buflen += 8; 664 } 665 666 if (s->qdev.port_wwn) { 667 outbuf[buflen++] = 0x61; /* SAS / Binary */ 668 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 669 outbuf[buflen++] = 0; /* reserved */ 670 outbuf[buflen++] = 8; 671 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 672 buflen += 8; 673 } 674 675 if (s->port_index) { 676 outbuf[buflen++] = 0x61; /* SAS / Binary */ 677 678 /* PIV/Target port/relative target port */ 679 outbuf[buflen++] = 0x94; 680 681 outbuf[buflen++] = 0; /* reserved */ 682 outbuf[buflen++] = 4; 683 stw_be_p(&outbuf[buflen + 2], s->port_index); 684 buflen += 4; 685 } 686 break; 687 } 688 case 0xb0: /* block limits */ 689 { 690 unsigned int unmap_sectors = 691 s->qdev.conf.discard_granularity / s->qdev.blocksize; 692 unsigned int min_io_size = 693 s->qdev.conf.min_io_size / s->qdev.blocksize; 694 unsigned int opt_io_size = 695 s->qdev.conf.opt_io_size / s->qdev.blocksize; 696 unsigned int max_unmap_sectors = 697 s->max_unmap_size / s->qdev.blocksize; 698 unsigned int max_io_sectors = 699 s->max_io_size / s->qdev.blocksize; 700 701 if (s->qdev.type == TYPE_ROM) { 702 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n", 703 page_code); 704 return -1; 705 } 706 if (s->qdev.type == TYPE_DISK) { 707 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 708 int max_io_sectors_blk = 709 max_transfer_blk / s->qdev.blocksize; 710 711 max_io_sectors = 712 MIN_NON_ZERO(max_io_sectors_blk, max_io_sectors); 713 714 /* min_io_size and opt_io_size can't be greater than 715 * max_io_sectors */ 716 if (min_io_size) { 717 min_io_size = MIN(min_io_size, max_io_sectors); 718 } 719 if (opt_io_size) { 720 opt_io_size = MIN(opt_io_size, max_io_sectors); 721 } 722 } 723 /* required VPD size with unmap support */ 724 buflen = 0x40; 725 memset(outbuf + 4, 0, buflen - 4); 726 727 outbuf[4] = 0x1; /* wsnz */ 728 729 /* optimal transfer length granularity */ 730 outbuf[6] = (min_io_size >> 8) & 0xff; 731 outbuf[7] = min_io_size & 0xff; 732 733 /* maximum transfer length */ 734 outbuf[8] = (max_io_sectors >> 24) & 0xff; 735 outbuf[9] = (max_io_sectors >> 16) & 0xff; 736 outbuf[10] = (max_io_sectors >> 8) & 0xff; 737 outbuf[11] = max_io_sectors & 0xff; 738 739 /* optimal transfer length */ 740 outbuf[12] = (opt_io_size >> 24) & 0xff; 741 outbuf[13] = (opt_io_size >> 16) & 0xff; 742 outbuf[14] = (opt_io_size >> 8) & 0xff; 743 outbuf[15] = opt_io_size & 0xff; 744 745 /* max unmap LBA count, default is 1GB */ 746 outbuf[20] = (max_unmap_sectors >> 24) & 0xff; 747 outbuf[21] = (max_unmap_sectors >> 16) & 0xff; 748 outbuf[22] = (max_unmap_sectors >> 8) & 0xff; 749 outbuf[23] = max_unmap_sectors & 0xff; 750 751 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header */ 752 outbuf[24] = 0; 753 outbuf[25] = 0; 754 outbuf[26] = 0; 755 outbuf[27] = 255; 756 757 /* optimal unmap granularity */ 758 outbuf[28] = (unmap_sectors >> 24) & 0xff; 759 outbuf[29] = (unmap_sectors >> 16) & 0xff; 760 outbuf[30] = (unmap_sectors >> 8) & 0xff; 761 outbuf[31] = unmap_sectors & 0xff; 762 763 /* max write same size */ 764 outbuf[36] = 0; 765 outbuf[37] = 0; 766 outbuf[38] = 0; 767 outbuf[39] = 0; 768 769 outbuf[40] = (max_io_sectors >> 24) & 0xff; 770 outbuf[41] = (max_io_sectors >> 16) & 0xff; 771 outbuf[42] = (max_io_sectors >> 8) & 0xff; 772 outbuf[43] = max_io_sectors & 0xff; 773 break; 774 } 775 case 0xb1: /* block device characteristics */ 776 { 777 buflen = 8; 778 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 779 outbuf[5] = s->rotation_rate & 0xff; 780 outbuf[6] = 0; 781 outbuf[7] = 0; 782 break; 783 } 784 case 0xb2: /* thin provisioning */ 785 { 786 buflen = 8; 787 outbuf[4] = 0; 788 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 789 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 790 outbuf[7] = 0; 791 break; 792 } 793 default: 794 return -1; 795 } 796 /* done with EVPD */ 797 assert(buflen - start <= 255); 798 outbuf[start - 1] = buflen - start; 799 return buflen; 800 } 801 802 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 803 { 804 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 805 int buflen = 0; 806 807 if (req->cmd.buf[1] & 0x1) { 808 /* Vital product data */ 809 return scsi_disk_emulate_vpd_page(req, outbuf); 810 } 811 812 /* Standard INQUIRY data */ 813 if (req->cmd.buf[2] != 0) { 814 return -1; 815 } 816 817 /* PAGE CODE == 0 */ 818 buflen = req->cmd.xfer; 819 if (buflen > SCSI_MAX_INQUIRY_LEN) { 820 buflen = SCSI_MAX_INQUIRY_LEN; 821 } 822 823 outbuf[0] = s->qdev.type & 0x1f; 824 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 825 826 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 827 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 828 829 memset(&outbuf[32], 0, 4); 830 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 831 /* 832 * We claim conformance to SPC-3, which is required for guests 833 * to ask for modern features like READ CAPACITY(16) or the 834 * block characteristics VPD page by default. Not all of SPC-3 835 * is actually implemented, but we're good enough. 836 */ 837 outbuf[2] = s->qdev.default_scsi_version; 838 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 839 840 if (buflen > 36) { 841 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 842 } else { 843 /* If the allocation length of CDB is too small, 844 the additional length is not adjusted */ 845 outbuf[4] = 36 - 5; 846 } 847 848 /* Sync data transfer and TCQ. */ 849 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 850 return buflen; 851 } 852 853 static inline bool media_is_dvd(SCSIDiskState *s) 854 { 855 uint64_t nb_sectors; 856 if (s->qdev.type != TYPE_ROM) { 857 return false; 858 } 859 if (!blk_is_available(s->qdev.conf.blk)) { 860 return false; 861 } 862 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 863 return nb_sectors > CD_MAX_SECTORS; 864 } 865 866 static inline bool media_is_cd(SCSIDiskState *s) 867 { 868 uint64_t nb_sectors; 869 if (s->qdev.type != TYPE_ROM) { 870 return false; 871 } 872 if (!blk_is_available(s->qdev.conf.blk)) { 873 return false; 874 } 875 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 876 return nb_sectors <= CD_MAX_SECTORS; 877 } 878 879 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 880 uint8_t *outbuf) 881 { 882 uint8_t type = r->req.cmd.buf[1] & 7; 883 884 if (s->qdev.type != TYPE_ROM) { 885 return -1; 886 } 887 888 /* Types 1/2 are only defined for Blu-Ray. */ 889 if (type != 0) { 890 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 891 return -1; 892 } 893 894 memset(outbuf, 0, 34); 895 outbuf[1] = 32; 896 outbuf[2] = 0xe; /* last session complete, disc finalized */ 897 outbuf[3] = 1; /* first track on disc */ 898 outbuf[4] = 1; /* # of sessions */ 899 outbuf[5] = 1; /* first track of last session */ 900 outbuf[6] = 1; /* last track of last session */ 901 outbuf[7] = 0x20; /* unrestricted use */ 902 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 903 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 904 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 905 /* 24-31: disc bar code */ 906 /* 32: disc application code */ 907 /* 33: number of OPC tables */ 908 909 return 34; 910 } 911 912 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 913 uint8_t *outbuf) 914 { 915 static const int rds_caps_size[5] = { 916 [0] = 2048 + 4, 917 [1] = 4 + 4, 918 [3] = 188 + 4, 919 [4] = 2048 + 4, 920 }; 921 922 uint8_t media = r->req.cmd.buf[1]; 923 uint8_t layer = r->req.cmd.buf[6]; 924 uint8_t format = r->req.cmd.buf[7]; 925 int size = -1; 926 927 if (s->qdev.type != TYPE_ROM) { 928 return -1; 929 } 930 if (media != 0) { 931 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 932 return -1; 933 } 934 935 if (format != 0xff) { 936 if (!blk_is_available(s->qdev.conf.blk)) { 937 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 938 return -1; 939 } 940 if (media_is_cd(s)) { 941 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 942 return -1; 943 } 944 if (format >= ARRAY_SIZE(rds_caps_size)) { 945 return -1; 946 } 947 size = rds_caps_size[format]; 948 memset(outbuf, 0, size); 949 } 950 951 switch (format) { 952 case 0x00: { 953 /* Physical format information */ 954 uint64_t nb_sectors; 955 if (layer != 0) { 956 goto fail; 957 } 958 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 959 960 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 961 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 962 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 963 outbuf[7] = 0; /* default densities */ 964 965 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 966 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 967 break; 968 } 969 970 case 0x01: /* DVD copyright information, all zeros */ 971 break; 972 973 case 0x03: /* BCA information - invalid field for no BCA info */ 974 return -1; 975 976 case 0x04: /* DVD disc manufacturing information, all zeros */ 977 break; 978 979 case 0xff: { /* List capabilities */ 980 int i; 981 size = 4; 982 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 983 if (!rds_caps_size[i]) { 984 continue; 985 } 986 outbuf[size] = i; 987 outbuf[size + 1] = 0x40; /* Not writable, readable */ 988 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 989 size += 4; 990 } 991 break; 992 } 993 994 default: 995 return -1; 996 } 997 998 /* Size of buffer, not including 2 byte size field */ 999 stw_be_p(outbuf, size - 2); 1000 return size; 1001 1002 fail: 1003 return -1; 1004 } 1005 1006 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 1007 { 1008 uint8_t event_code, media_status; 1009 1010 media_status = 0; 1011 if (s->tray_open) { 1012 media_status = MS_TRAY_OPEN; 1013 } else if (blk_is_inserted(s->qdev.conf.blk)) { 1014 media_status = MS_MEDIA_PRESENT; 1015 } 1016 1017 /* Event notification descriptor */ 1018 event_code = MEC_NO_CHANGE; 1019 if (media_status != MS_TRAY_OPEN) { 1020 if (s->media_event) { 1021 event_code = MEC_NEW_MEDIA; 1022 s->media_event = false; 1023 } else if (s->eject_request) { 1024 event_code = MEC_EJECT_REQUESTED; 1025 s->eject_request = false; 1026 } 1027 } 1028 1029 outbuf[0] = event_code; 1030 outbuf[1] = media_status; 1031 1032 /* These fields are reserved, just clear them. */ 1033 outbuf[2] = 0; 1034 outbuf[3] = 0; 1035 return 4; 1036 } 1037 1038 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 1039 uint8_t *outbuf) 1040 { 1041 int size; 1042 uint8_t *buf = r->req.cmd.buf; 1043 uint8_t notification_class_request = buf[4]; 1044 if (s->qdev.type != TYPE_ROM) { 1045 return -1; 1046 } 1047 if ((buf[1] & 1) == 0) { 1048 /* asynchronous */ 1049 return -1; 1050 } 1051 1052 size = 4; 1053 outbuf[0] = outbuf[1] = 0; 1054 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1055 if (notification_class_request & (1 << GESN_MEDIA)) { 1056 outbuf[2] = GESN_MEDIA; 1057 size += scsi_event_status_media(s, &outbuf[size]); 1058 } else { 1059 outbuf[2] = 0x80; 1060 } 1061 stw_be_p(outbuf, size - 4); 1062 return size; 1063 } 1064 1065 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1066 { 1067 int current; 1068 1069 if (s->qdev.type != TYPE_ROM) { 1070 return -1; 1071 } 1072 1073 if (media_is_dvd(s)) { 1074 current = MMC_PROFILE_DVD_ROM; 1075 } else if (media_is_cd(s)) { 1076 current = MMC_PROFILE_CD_ROM; 1077 } else { 1078 current = MMC_PROFILE_NONE; 1079 } 1080 1081 memset(outbuf, 0, 40); 1082 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1083 stw_be_p(&outbuf[6], current); 1084 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1085 outbuf[10] = 0x03; /* persistent, current */ 1086 outbuf[11] = 8; /* two profiles */ 1087 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1088 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1089 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1090 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1091 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1092 stw_be_p(&outbuf[20], 1); 1093 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1094 outbuf[23] = 8; 1095 stl_be_p(&outbuf[24], 1); /* SCSI */ 1096 outbuf[28] = 1; /* DBE = 1, mandatory */ 1097 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1098 stw_be_p(&outbuf[32], 3); 1099 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1100 outbuf[35] = 4; 1101 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1102 /* TODO: Random readable, CD read, DVD read, drive serial number, 1103 power management */ 1104 return 40; 1105 } 1106 1107 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1108 { 1109 if (s->qdev.type != TYPE_ROM) { 1110 return -1; 1111 } 1112 memset(outbuf, 0, 8); 1113 outbuf[5] = 1; /* CD-ROM */ 1114 return 8; 1115 } 1116 1117 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1118 int page_control) 1119 { 1120 static const int mode_sense_valid[0x3f] = { 1121 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1122 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1123 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1124 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1125 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1126 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1127 }; 1128 1129 uint8_t *p = *p_outbuf + 2; 1130 int length; 1131 1132 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1133 return -1; 1134 } 1135 1136 /* 1137 * If Changeable Values are requested, a mask denoting those mode parameters 1138 * that are changeable shall be returned. As we currently don't support 1139 * parameter changes via MODE_SELECT all bits are returned set to zero. 1140 * The buffer was already menset to zero by the caller of this function. 1141 * 1142 * The offsets here are off by two compared to the descriptions in the 1143 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1144 * but it is done so that offsets are consistent within our implementation 1145 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1146 * 2-byte and 4-byte headers. 1147 */ 1148 switch (page) { 1149 case MODE_PAGE_HD_GEOMETRY: 1150 length = 0x16; 1151 if (page_control == 1) { /* Changeable Values */ 1152 break; 1153 } 1154 /* if a geometry hint is available, use it */ 1155 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1156 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1157 p[2] = s->qdev.conf.cyls & 0xff; 1158 p[3] = s->qdev.conf.heads & 0xff; 1159 /* Write precomp start cylinder, disabled */ 1160 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1161 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1162 p[6] = s->qdev.conf.cyls & 0xff; 1163 /* Reduced current start cylinder, disabled */ 1164 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1165 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1166 p[9] = s->qdev.conf.cyls & 0xff; 1167 /* Device step rate [ns], 200ns */ 1168 p[10] = 0; 1169 p[11] = 200; 1170 /* Landing zone cylinder */ 1171 p[12] = 0xff; 1172 p[13] = 0xff; 1173 p[14] = 0xff; 1174 /* Medium rotation rate [rpm], 5400 rpm */ 1175 p[18] = (5400 >> 8) & 0xff; 1176 p[19] = 5400 & 0xff; 1177 break; 1178 1179 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1180 length = 0x1e; 1181 if (page_control == 1) { /* Changeable Values */ 1182 break; 1183 } 1184 /* Transfer rate [kbit/s], 5Mbit/s */ 1185 p[0] = 5000 >> 8; 1186 p[1] = 5000 & 0xff; 1187 /* if a geometry hint is available, use it */ 1188 p[2] = s->qdev.conf.heads & 0xff; 1189 p[3] = s->qdev.conf.secs & 0xff; 1190 p[4] = s->qdev.blocksize >> 8; 1191 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1192 p[7] = s->qdev.conf.cyls & 0xff; 1193 /* Write precomp start cylinder, disabled */ 1194 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1195 p[9] = s->qdev.conf.cyls & 0xff; 1196 /* Reduced current start cylinder, disabled */ 1197 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1198 p[11] = s->qdev.conf.cyls & 0xff; 1199 /* Device step rate [100us], 100us */ 1200 p[12] = 0; 1201 p[13] = 1; 1202 /* Device step pulse width [us], 1us */ 1203 p[14] = 1; 1204 /* Device head settle delay [100us], 100us */ 1205 p[15] = 0; 1206 p[16] = 1; 1207 /* Motor on delay [0.1s], 0.1s */ 1208 p[17] = 1; 1209 /* Motor off delay [0.1s], 0.1s */ 1210 p[18] = 1; 1211 /* Medium rotation rate [rpm], 5400 rpm */ 1212 p[26] = (5400 >> 8) & 0xff; 1213 p[27] = 5400 & 0xff; 1214 break; 1215 1216 case MODE_PAGE_CACHING: 1217 length = 0x12; 1218 if (page_control == 1 || /* Changeable Values */ 1219 blk_enable_write_cache(s->qdev.conf.blk)) { 1220 p[0] = 4; /* WCE */ 1221 } 1222 break; 1223 1224 case MODE_PAGE_R_W_ERROR: 1225 length = 10; 1226 if (page_control == 1) { /* Changeable Values */ 1227 break; 1228 } 1229 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1230 if (s->qdev.type == TYPE_ROM) { 1231 p[1] = 0x20; /* Read Retry Count */ 1232 } 1233 break; 1234 1235 case MODE_PAGE_AUDIO_CTL: 1236 length = 14; 1237 break; 1238 1239 case MODE_PAGE_CAPABILITIES: 1240 length = 0x14; 1241 if (page_control == 1) { /* Changeable Values */ 1242 break; 1243 } 1244 1245 p[0] = 0x3b; /* CD-R & CD-RW read */ 1246 p[1] = 0; /* Writing not supported */ 1247 p[2] = 0x7f; /* Audio, composite, digital out, 1248 mode 2 form 1&2, multi session */ 1249 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1250 RW corrected, C2 errors, ISRC, 1251 UPC, Bar code */ 1252 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1253 /* Locking supported, jumper present, eject, tray */ 1254 p[5] = 0; /* no volume & mute control, no 1255 changer */ 1256 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1257 p[7] = (50 * 176) & 0xff; 1258 p[8] = 2 >> 8; /* Two volume levels */ 1259 p[9] = 2 & 0xff; 1260 p[10] = 2048 >> 8; /* 2M buffer */ 1261 p[11] = 2048 & 0xff; 1262 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1263 p[13] = (16 * 176) & 0xff; 1264 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1265 p[17] = (16 * 176) & 0xff; 1266 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1267 p[19] = (16 * 176) & 0xff; 1268 break; 1269 1270 default: 1271 return -1; 1272 } 1273 1274 assert(length < 256); 1275 (*p_outbuf)[0] = page; 1276 (*p_outbuf)[1] = length; 1277 *p_outbuf += length + 2; 1278 return length + 2; 1279 } 1280 1281 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1282 { 1283 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1284 uint64_t nb_sectors; 1285 bool dbd; 1286 int page, buflen, ret, page_control; 1287 uint8_t *p; 1288 uint8_t dev_specific_param; 1289 1290 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1291 page = r->req.cmd.buf[2] & 0x3f; 1292 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1293 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n", 1294 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control); 1295 memset(outbuf, 0, r->req.cmd.xfer); 1296 p = outbuf; 1297 1298 if (s->qdev.type == TYPE_DISK) { 1299 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1300 if (blk_is_read_only(s->qdev.conf.blk)) { 1301 dev_specific_param |= 0x80; /* Readonly. */ 1302 } 1303 } else { 1304 /* MMC prescribes that CD/DVD drives have no block descriptors, 1305 * and defines no device-specific parameter. */ 1306 dev_specific_param = 0x00; 1307 dbd = true; 1308 } 1309 1310 if (r->req.cmd.buf[0] == MODE_SENSE) { 1311 p[1] = 0; /* Default media type. */ 1312 p[2] = dev_specific_param; 1313 p[3] = 0; /* Block descriptor length. */ 1314 p += 4; 1315 } else { /* MODE_SENSE_10 */ 1316 p[2] = 0; /* Default media type. */ 1317 p[3] = dev_specific_param; 1318 p[6] = p[7] = 0; /* Block descriptor length. */ 1319 p += 8; 1320 } 1321 1322 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1323 if (!dbd && nb_sectors) { 1324 if (r->req.cmd.buf[0] == MODE_SENSE) { 1325 outbuf[3] = 8; /* Block descriptor length */ 1326 } else { /* MODE_SENSE_10 */ 1327 outbuf[7] = 8; /* Block descriptor length */ 1328 } 1329 nb_sectors /= (s->qdev.blocksize / 512); 1330 if (nb_sectors > 0xffffff) { 1331 nb_sectors = 0; 1332 } 1333 p[0] = 0; /* media density code */ 1334 p[1] = (nb_sectors >> 16) & 0xff; 1335 p[2] = (nb_sectors >> 8) & 0xff; 1336 p[3] = nb_sectors & 0xff; 1337 p[4] = 0; /* reserved */ 1338 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1339 p[6] = s->qdev.blocksize >> 8; 1340 p[7] = 0; 1341 p += 8; 1342 } 1343 1344 if (page_control == 3) { 1345 /* Saved Values */ 1346 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1347 return -1; 1348 } 1349 1350 if (page == 0x3f) { 1351 for (page = 0; page <= 0x3e; page++) { 1352 mode_sense_page(s, page, &p, page_control); 1353 } 1354 } else { 1355 ret = mode_sense_page(s, page, &p, page_control); 1356 if (ret == -1) { 1357 return -1; 1358 } 1359 } 1360 1361 buflen = p - outbuf; 1362 /* 1363 * The mode data length field specifies the length in bytes of the 1364 * following data that is available to be transferred. The mode data 1365 * length does not include itself. 1366 */ 1367 if (r->req.cmd.buf[0] == MODE_SENSE) { 1368 outbuf[0] = buflen - 1; 1369 } else { /* MODE_SENSE_10 */ 1370 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1371 outbuf[1] = (buflen - 2) & 0xff; 1372 } 1373 return buflen; 1374 } 1375 1376 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1377 { 1378 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1379 int start_track, format, msf, toclen; 1380 uint64_t nb_sectors; 1381 1382 msf = req->cmd.buf[1] & 2; 1383 format = req->cmd.buf[2] & 0xf; 1384 start_track = req->cmd.buf[6]; 1385 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1386 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1); 1387 nb_sectors /= s->qdev.blocksize / 512; 1388 switch (format) { 1389 case 0: 1390 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1391 break; 1392 case 1: 1393 /* multi session : only a single session defined */ 1394 toclen = 12; 1395 memset(outbuf, 0, 12); 1396 outbuf[1] = 0x0a; 1397 outbuf[2] = 0x01; 1398 outbuf[3] = 0x01; 1399 break; 1400 case 2: 1401 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1402 break; 1403 default: 1404 return -1; 1405 } 1406 return toclen; 1407 } 1408 1409 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1410 { 1411 SCSIRequest *req = &r->req; 1412 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1413 bool start = req->cmd.buf[4] & 1; 1414 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1415 int pwrcnd = req->cmd.buf[4] & 0xf0; 1416 1417 if (pwrcnd) { 1418 /* eject/load only happens for power condition == 0 */ 1419 return 0; 1420 } 1421 1422 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1423 if (!start && !s->tray_open && s->tray_locked) { 1424 scsi_check_condition(r, 1425 blk_is_inserted(s->qdev.conf.blk) 1426 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1427 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1428 return -1; 1429 } 1430 1431 if (s->tray_open != !start) { 1432 blk_eject(s->qdev.conf.blk, !start); 1433 s->tray_open = !start; 1434 } 1435 } 1436 return 0; 1437 } 1438 1439 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1440 { 1441 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1442 int buflen = r->iov.iov_len; 1443 1444 if (buflen) { 1445 DPRINTF("Read buf_len=%d\n", buflen); 1446 r->iov.iov_len = 0; 1447 r->started = true; 1448 scsi_req_data(&r->req, buflen); 1449 return; 1450 } 1451 1452 /* This also clears the sense buffer for REQUEST SENSE. */ 1453 scsi_req_complete(&r->req, GOOD); 1454 } 1455 1456 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1457 uint8_t *inbuf, int inlen) 1458 { 1459 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1460 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1461 uint8_t *p; 1462 int len, expected_len, changeable_len, i; 1463 1464 /* The input buffer does not include the page header, so it is 1465 * off by 2 bytes. 1466 */ 1467 expected_len = inlen + 2; 1468 if (expected_len > SCSI_MAX_MODE_LEN) { 1469 return -1; 1470 } 1471 1472 p = mode_current; 1473 memset(mode_current, 0, inlen + 2); 1474 len = mode_sense_page(s, page, &p, 0); 1475 if (len < 0 || len != expected_len) { 1476 return -1; 1477 } 1478 1479 p = mode_changeable; 1480 memset(mode_changeable, 0, inlen + 2); 1481 changeable_len = mode_sense_page(s, page, &p, 1); 1482 assert(changeable_len == len); 1483 1484 /* Check that unchangeable bits are the same as what MODE SENSE 1485 * would return. 1486 */ 1487 for (i = 2; i < len; i++) { 1488 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1489 return -1; 1490 } 1491 } 1492 return 0; 1493 } 1494 1495 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1496 { 1497 switch (page) { 1498 case MODE_PAGE_CACHING: 1499 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1500 break; 1501 1502 default: 1503 break; 1504 } 1505 } 1506 1507 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1508 { 1509 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1510 1511 while (len > 0) { 1512 int page, subpage, page_len; 1513 1514 /* Parse both possible formats for the mode page headers. */ 1515 page = p[0] & 0x3f; 1516 if (p[0] & 0x40) { 1517 if (len < 4) { 1518 goto invalid_param_len; 1519 } 1520 subpage = p[1]; 1521 page_len = lduw_be_p(&p[2]); 1522 p += 4; 1523 len -= 4; 1524 } else { 1525 if (len < 2) { 1526 goto invalid_param_len; 1527 } 1528 subpage = 0; 1529 page_len = p[1]; 1530 p += 2; 1531 len -= 2; 1532 } 1533 1534 if (subpage) { 1535 goto invalid_param; 1536 } 1537 if (page_len > len) { 1538 goto invalid_param_len; 1539 } 1540 1541 if (!change) { 1542 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1543 goto invalid_param; 1544 } 1545 } else { 1546 scsi_disk_apply_mode_select(s, page, p); 1547 } 1548 1549 p += page_len; 1550 len -= page_len; 1551 } 1552 return 0; 1553 1554 invalid_param: 1555 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1556 return -1; 1557 1558 invalid_param_len: 1559 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1560 return -1; 1561 } 1562 1563 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1564 { 1565 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1566 uint8_t *p = inbuf; 1567 int cmd = r->req.cmd.buf[0]; 1568 int len = r->req.cmd.xfer; 1569 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1570 int bd_len; 1571 int pass; 1572 1573 /* We only support PF=1, SP=0. */ 1574 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1575 goto invalid_field; 1576 } 1577 1578 if (len < hdr_len) { 1579 goto invalid_param_len; 1580 } 1581 1582 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1583 len -= hdr_len; 1584 p += hdr_len; 1585 if (len < bd_len) { 1586 goto invalid_param_len; 1587 } 1588 if (bd_len != 0 && bd_len != 8) { 1589 goto invalid_param; 1590 } 1591 1592 len -= bd_len; 1593 p += bd_len; 1594 1595 /* Ensure no change is made if there is an error! */ 1596 for (pass = 0; pass < 2; pass++) { 1597 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1598 assert(pass == 0); 1599 return; 1600 } 1601 } 1602 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1603 /* The request is used as the AIO opaque value, so add a ref. */ 1604 scsi_req_ref(&r->req); 1605 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1606 BLOCK_ACCT_FLUSH); 1607 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1608 return; 1609 } 1610 1611 scsi_req_complete(&r->req, GOOD); 1612 return; 1613 1614 invalid_param: 1615 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1616 return; 1617 1618 invalid_param_len: 1619 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1620 return; 1621 1622 invalid_field: 1623 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1624 } 1625 1626 static inline bool check_lba_range(SCSIDiskState *s, 1627 uint64_t sector_num, uint32_t nb_sectors) 1628 { 1629 /* 1630 * The first line tests that no overflow happens when computing the last 1631 * sector. The second line tests that the last accessed sector is in 1632 * range. 1633 * 1634 * Careful, the computations should not underflow for nb_sectors == 0, 1635 * and a 0-block read to the first LBA beyond the end of device is 1636 * valid. 1637 */ 1638 return (sector_num <= sector_num + nb_sectors && 1639 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1640 } 1641 1642 typedef struct UnmapCBData { 1643 SCSIDiskReq *r; 1644 uint8_t *inbuf; 1645 int count; 1646 } UnmapCBData; 1647 1648 static void scsi_unmap_complete(void *opaque, int ret); 1649 1650 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1651 { 1652 SCSIDiskReq *r = data->r; 1653 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1654 uint64_t sector_num; 1655 uint32_t nb_sectors; 1656 1657 assert(r->req.aiocb == NULL); 1658 if (scsi_disk_req_check_error(r, ret, false)) { 1659 goto done; 1660 } 1661 1662 if (data->count > 0) { 1663 sector_num = ldq_be_p(&data->inbuf[0]); 1664 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1665 if (!check_lba_range(s, sector_num, nb_sectors)) { 1666 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1667 goto done; 1668 } 1669 1670 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1671 sector_num * s->qdev.blocksize, 1672 nb_sectors * s->qdev.blocksize, 1673 scsi_unmap_complete, data); 1674 data->count--; 1675 data->inbuf += 16; 1676 return; 1677 } 1678 1679 scsi_req_complete(&r->req, GOOD); 1680 1681 done: 1682 scsi_req_unref(&r->req); 1683 g_free(data); 1684 } 1685 1686 static void scsi_unmap_complete(void *opaque, int ret) 1687 { 1688 UnmapCBData *data = opaque; 1689 SCSIDiskReq *r = data->r; 1690 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1691 1692 assert(r->req.aiocb != NULL); 1693 r->req.aiocb = NULL; 1694 1695 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1696 scsi_unmap_complete_noio(data, ret); 1697 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1698 } 1699 1700 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1701 { 1702 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1703 uint8_t *p = inbuf; 1704 int len = r->req.cmd.xfer; 1705 UnmapCBData *data; 1706 1707 /* Reject ANCHOR=1. */ 1708 if (r->req.cmd.buf[1] & 0x1) { 1709 goto invalid_field; 1710 } 1711 1712 if (len < 8) { 1713 goto invalid_param_len; 1714 } 1715 if (len < lduw_be_p(&p[0]) + 2) { 1716 goto invalid_param_len; 1717 } 1718 if (len < lduw_be_p(&p[2]) + 8) { 1719 goto invalid_param_len; 1720 } 1721 if (lduw_be_p(&p[2]) & 15) { 1722 goto invalid_param_len; 1723 } 1724 1725 if (blk_is_read_only(s->qdev.conf.blk)) { 1726 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1727 return; 1728 } 1729 1730 data = g_new0(UnmapCBData, 1); 1731 data->r = r; 1732 data->inbuf = &p[8]; 1733 data->count = lduw_be_p(&p[2]) >> 4; 1734 1735 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1736 scsi_req_ref(&r->req); 1737 scsi_unmap_complete_noio(data, 0); 1738 return; 1739 1740 invalid_param_len: 1741 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1742 return; 1743 1744 invalid_field: 1745 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1746 } 1747 1748 typedef struct WriteSameCBData { 1749 SCSIDiskReq *r; 1750 int64_t sector; 1751 int nb_sectors; 1752 QEMUIOVector qiov; 1753 struct iovec iov; 1754 } WriteSameCBData; 1755 1756 static void scsi_write_same_complete(void *opaque, int ret) 1757 { 1758 WriteSameCBData *data = opaque; 1759 SCSIDiskReq *r = data->r; 1760 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1761 1762 assert(r->req.aiocb != NULL); 1763 r->req.aiocb = NULL; 1764 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1765 if (scsi_disk_req_check_error(r, ret, true)) { 1766 goto done; 1767 } 1768 1769 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1770 1771 data->nb_sectors -= data->iov.iov_len / 512; 1772 data->sector += data->iov.iov_len / 512; 1773 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1774 if (data->iov.iov_len) { 1775 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1776 data->iov.iov_len, BLOCK_ACCT_WRITE); 1777 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1778 * where final qiov may need smaller size */ 1779 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1780 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1781 data->sector << BDRV_SECTOR_BITS, 1782 &data->qiov, 0, 1783 scsi_write_same_complete, data); 1784 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1785 return; 1786 } 1787 1788 scsi_req_complete(&r->req, GOOD); 1789 1790 done: 1791 scsi_req_unref(&r->req); 1792 qemu_vfree(data->iov.iov_base); 1793 g_free(data); 1794 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1795 } 1796 1797 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1798 { 1799 SCSIRequest *req = &r->req; 1800 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1801 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1802 WriteSameCBData *data; 1803 uint8_t *buf; 1804 int i; 1805 1806 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1807 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1808 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1809 return; 1810 } 1811 1812 if (blk_is_read_only(s->qdev.conf.blk)) { 1813 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1814 return; 1815 } 1816 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1817 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1818 return; 1819 } 1820 1821 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1822 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1823 1824 /* The request is used as the AIO opaque value, so add a ref. */ 1825 scsi_req_ref(&r->req); 1826 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1827 nb_sectors * s->qdev.blocksize, 1828 BLOCK_ACCT_WRITE); 1829 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1830 r->req.cmd.lba * s->qdev.blocksize, 1831 nb_sectors * s->qdev.blocksize, 1832 flags, scsi_aio_complete, r); 1833 return; 1834 } 1835 1836 data = g_new0(WriteSameCBData, 1); 1837 data->r = r; 1838 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1839 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1840 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1841 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1842 data->iov.iov_len); 1843 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1844 1845 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1846 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1847 } 1848 1849 scsi_req_ref(&r->req); 1850 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1851 data->iov.iov_len, BLOCK_ACCT_WRITE); 1852 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1853 data->sector << BDRV_SECTOR_BITS, 1854 &data->qiov, 0, 1855 scsi_write_same_complete, data); 1856 } 1857 1858 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1859 { 1860 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1861 1862 if (r->iov.iov_len) { 1863 int buflen = r->iov.iov_len; 1864 DPRINTF("Write buf_len=%d\n", buflen); 1865 r->iov.iov_len = 0; 1866 scsi_req_data(&r->req, buflen); 1867 return; 1868 } 1869 1870 switch (req->cmd.buf[0]) { 1871 case MODE_SELECT: 1872 case MODE_SELECT_10: 1873 /* This also clears the sense buffer for REQUEST SENSE. */ 1874 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1875 break; 1876 1877 case UNMAP: 1878 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1879 break; 1880 1881 case VERIFY_10: 1882 case VERIFY_12: 1883 case VERIFY_16: 1884 if (r->req.status == -1) { 1885 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1886 } 1887 break; 1888 1889 case WRITE_SAME_10: 1890 case WRITE_SAME_16: 1891 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1892 break; 1893 1894 default: 1895 abort(); 1896 } 1897 } 1898 1899 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1900 { 1901 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1902 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1903 uint64_t nb_sectors; 1904 uint8_t *outbuf; 1905 int buflen; 1906 1907 switch (req->cmd.buf[0]) { 1908 case INQUIRY: 1909 case MODE_SENSE: 1910 case MODE_SENSE_10: 1911 case RESERVE: 1912 case RESERVE_10: 1913 case RELEASE: 1914 case RELEASE_10: 1915 case START_STOP: 1916 case ALLOW_MEDIUM_REMOVAL: 1917 case GET_CONFIGURATION: 1918 case GET_EVENT_STATUS_NOTIFICATION: 1919 case MECHANISM_STATUS: 1920 case REQUEST_SENSE: 1921 break; 1922 1923 default: 1924 if (!blk_is_available(s->qdev.conf.blk)) { 1925 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1926 return 0; 1927 } 1928 break; 1929 } 1930 1931 /* 1932 * FIXME: we shouldn't return anything bigger than 4k, but the code 1933 * requires the buffer to be as big as req->cmd.xfer in several 1934 * places. So, do not allow CDBs with a very large ALLOCATION 1935 * LENGTH. The real fix would be to modify scsi_read_data and 1936 * dma_buf_read, so that they return data beyond the buflen 1937 * as all zeros. 1938 */ 1939 if (req->cmd.xfer > 65536) { 1940 goto illegal_request; 1941 } 1942 r->buflen = MAX(4096, req->cmd.xfer); 1943 1944 if (!r->iov.iov_base) { 1945 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1946 } 1947 1948 buflen = req->cmd.xfer; 1949 outbuf = r->iov.iov_base; 1950 memset(outbuf, 0, r->buflen); 1951 switch (req->cmd.buf[0]) { 1952 case TEST_UNIT_READY: 1953 assert(blk_is_available(s->qdev.conf.blk)); 1954 break; 1955 case INQUIRY: 1956 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1957 if (buflen < 0) { 1958 goto illegal_request; 1959 } 1960 break; 1961 case MODE_SENSE: 1962 case MODE_SENSE_10: 1963 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1964 if (buflen < 0) { 1965 goto illegal_request; 1966 } 1967 break; 1968 case READ_TOC: 1969 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1970 if (buflen < 0) { 1971 goto illegal_request; 1972 } 1973 break; 1974 case RESERVE: 1975 if (req->cmd.buf[1] & 1) { 1976 goto illegal_request; 1977 } 1978 break; 1979 case RESERVE_10: 1980 if (req->cmd.buf[1] & 3) { 1981 goto illegal_request; 1982 } 1983 break; 1984 case RELEASE: 1985 if (req->cmd.buf[1] & 1) { 1986 goto illegal_request; 1987 } 1988 break; 1989 case RELEASE_10: 1990 if (req->cmd.buf[1] & 3) { 1991 goto illegal_request; 1992 } 1993 break; 1994 case START_STOP: 1995 if (scsi_disk_emulate_start_stop(r) < 0) { 1996 return 0; 1997 } 1998 break; 1999 case ALLOW_MEDIUM_REMOVAL: 2000 s->tray_locked = req->cmd.buf[4] & 1; 2001 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 2002 break; 2003 case READ_CAPACITY_10: 2004 /* The normal LEN field for this command is zero. */ 2005 memset(outbuf, 0, 8); 2006 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2007 if (!nb_sectors) { 2008 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2009 return 0; 2010 } 2011 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 2012 goto illegal_request; 2013 } 2014 nb_sectors /= s->qdev.blocksize / 512; 2015 /* Returned value is the address of the last sector. */ 2016 nb_sectors--; 2017 /* Remember the new size for read/write sanity checking. */ 2018 s->qdev.max_lba = nb_sectors; 2019 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 2020 if (nb_sectors > UINT32_MAX) { 2021 nb_sectors = UINT32_MAX; 2022 } 2023 outbuf[0] = (nb_sectors >> 24) & 0xff; 2024 outbuf[1] = (nb_sectors >> 16) & 0xff; 2025 outbuf[2] = (nb_sectors >> 8) & 0xff; 2026 outbuf[3] = nb_sectors & 0xff; 2027 outbuf[4] = 0; 2028 outbuf[5] = 0; 2029 outbuf[6] = s->qdev.blocksize >> 8; 2030 outbuf[7] = 0; 2031 break; 2032 case REQUEST_SENSE: 2033 /* Just return "NO SENSE". */ 2034 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2035 (req->cmd.buf[1] & 1) == 0); 2036 if (buflen < 0) { 2037 goto illegal_request; 2038 } 2039 break; 2040 case MECHANISM_STATUS: 2041 buflen = scsi_emulate_mechanism_status(s, outbuf); 2042 if (buflen < 0) { 2043 goto illegal_request; 2044 } 2045 break; 2046 case GET_CONFIGURATION: 2047 buflen = scsi_get_configuration(s, outbuf); 2048 if (buflen < 0) { 2049 goto illegal_request; 2050 } 2051 break; 2052 case GET_EVENT_STATUS_NOTIFICATION: 2053 buflen = scsi_get_event_status_notification(s, r, outbuf); 2054 if (buflen < 0) { 2055 goto illegal_request; 2056 } 2057 break; 2058 case READ_DISC_INFORMATION: 2059 buflen = scsi_read_disc_information(s, r, outbuf); 2060 if (buflen < 0) { 2061 goto illegal_request; 2062 } 2063 break; 2064 case READ_DVD_STRUCTURE: 2065 buflen = scsi_read_dvd_structure(s, r, outbuf); 2066 if (buflen < 0) { 2067 goto illegal_request; 2068 } 2069 break; 2070 case SERVICE_ACTION_IN_16: 2071 /* Service Action In subcommands. */ 2072 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2073 DPRINTF("SAI READ CAPACITY(16)\n"); 2074 memset(outbuf, 0, req->cmd.xfer); 2075 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2076 if (!nb_sectors) { 2077 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2078 return 0; 2079 } 2080 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2081 goto illegal_request; 2082 } 2083 nb_sectors /= s->qdev.blocksize / 512; 2084 /* Returned value is the address of the last sector. */ 2085 nb_sectors--; 2086 /* Remember the new size for read/write sanity checking. */ 2087 s->qdev.max_lba = nb_sectors; 2088 outbuf[0] = (nb_sectors >> 56) & 0xff; 2089 outbuf[1] = (nb_sectors >> 48) & 0xff; 2090 outbuf[2] = (nb_sectors >> 40) & 0xff; 2091 outbuf[3] = (nb_sectors >> 32) & 0xff; 2092 outbuf[4] = (nb_sectors >> 24) & 0xff; 2093 outbuf[5] = (nb_sectors >> 16) & 0xff; 2094 outbuf[6] = (nb_sectors >> 8) & 0xff; 2095 outbuf[7] = nb_sectors & 0xff; 2096 outbuf[8] = 0; 2097 outbuf[9] = 0; 2098 outbuf[10] = s->qdev.blocksize >> 8; 2099 outbuf[11] = 0; 2100 outbuf[12] = 0; 2101 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2102 2103 /* set TPE bit if the format supports discard */ 2104 if (s->qdev.conf.discard_granularity) { 2105 outbuf[14] = 0x80; 2106 } 2107 2108 /* Protection, exponent and lowest lba field left blank. */ 2109 break; 2110 } 2111 DPRINTF("Unsupported Service Action In\n"); 2112 goto illegal_request; 2113 case SYNCHRONIZE_CACHE: 2114 /* The request is used as the AIO opaque value, so add a ref. */ 2115 scsi_req_ref(&r->req); 2116 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2117 BLOCK_ACCT_FLUSH); 2118 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2119 return 0; 2120 case SEEK_10: 2121 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba); 2122 if (r->req.cmd.lba > s->qdev.max_lba) { 2123 goto illegal_lba; 2124 } 2125 break; 2126 case MODE_SELECT: 2127 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2128 break; 2129 case MODE_SELECT_10: 2130 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2131 break; 2132 case UNMAP: 2133 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2134 break; 2135 case VERIFY_10: 2136 case VERIFY_12: 2137 case VERIFY_16: 2138 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3); 2139 if (req->cmd.buf[1] & 6) { 2140 goto illegal_request; 2141 } 2142 break; 2143 case WRITE_SAME_10: 2144 case WRITE_SAME_16: 2145 DPRINTF("WRITE SAME %d (len %lu)\n", 2146 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, 2147 (unsigned long)r->req.cmd.xfer); 2148 break; 2149 default: 2150 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0], 2151 scsi_command_name(buf[0])); 2152 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2153 return 0; 2154 } 2155 assert(!r->req.aiocb); 2156 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2157 if (r->iov.iov_len == 0) { 2158 scsi_req_complete(&r->req, GOOD); 2159 } 2160 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2161 assert(r->iov.iov_len == req->cmd.xfer); 2162 return -r->iov.iov_len; 2163 } else { 2164 return r->iov.iov_len; 2165 } 2166 2167 illegal_request: 2168 if (r->req.status == -1) { 2169 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2170 } 2171 return 0; 2172 2173 illegal_lba: 2174 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2175 return 0; 2176 } 2177 2178 /* Execute a scsi command. Returns the length of the data expected by the 2179 command. This will be Positive for data transfers from the device 2180 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2181 and zero if the command does not transfer any data. */ 2182 2183 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2184 { 2185 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2186 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2187 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2188 uint32_t len; 2189 uint8_t command; 2190 2191 command = buf[0]; 2192 2193 if (!blk_is_available(s->qdev.conf.blk)) { 2194 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2195 return 0; 2196 } 2197 2198 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2199 switch (command) { 2200 case READ_6: 2201 case READ_10: 2202 case READ_12: 2203 case READ_16: 2204 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len); 2205 /* Protection information is not supported. For SCSI versions 2 and 2206 * older (as determined by snooping the guest's INQUIRY commands), 2207 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2208 */ 2209 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2210 goto illegal_request; 2211 } 2212 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2213 goto illegal_lba; 2214 } 2215 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2216 r->sector_count = len * (s->qdev.blocksize / 512); 2217 break; 2218 case WRITE_6: 2219 case WRITE_10: 2220 case WRITE_12: 2221 case WRITE_16: 2222 case WRITE_VERIFY_10: 2223 case WRITE_VERIFY_12: 2224 case WRITE_VERIFY_16: 2225 if (blk_is_read_only(s->qdev.conf.blk)) { 2226 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2227 return 0; 2228 } 2229 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n", 2230 (command & 0xe) == 0xe ? "And Verify " : "", 2231 r->req.cmd.lba, len); 2232 /* fall through */ 2233 case VERIFY_10: 2234 case VERIFY_12: 2235 case VERIFY_16: 2236 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2237 * As far as DMA is concerned, we can treat it the same as a write; 2238 * scsi_block_do_sgio will send VERIFY commands. 2239 */ 2240 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2241 goto illegal_request; 2242 } 2243 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2244 goto illegal_lba; 2245 } 2246 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2247 r->sector_count = len * (s->qdev.blocksize / 512); 2248 break; 2249 default: 2250 abort(); 2251 illegal_request: 2252 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2253 return 0; 2254 illegal_lba: 2255 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2256 return 0; 2257 } 2258 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2259 if (r->sector_count == 0) { 2260 scsi_req_complete(&r->req, GOOD); 2261 } 2262 assert(r->iov.iov_len == 0); 2263 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2264 return -r->sector_count * 512; 2265 } else { 2266 return r->sector_count * 512; 2267 } 2268 } 2269 2270 static void scsi_disk_reset(DeviceState *dev) 2271 { 2272 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2273 uint64_t nb_sectors; 2274 2275 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2276 2277 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2278 nb_sectors /= s->qdev.blocksize / 512; 2279 if (nb_sectors) { 2280 nb_sectors--; 2281 } 2282 s->qdev.max_lba = nb_sectors; 2283 /* reset tray statuses */ 2284 s->tray_locked = 0; 2285 s->tray_open = 0; 2286 2287 s->qdev.scsi_version = s->qdev.default_scsi_version; 2288 } 2289 2290 static void scsi_disk_resize_cb(void *opaque) 2291 { 2292 SCSIDiskState *s = opaque; 2293 2294 /* SPC lists this sense code as available only for 2295 * direct-access devices. 2296 */ 2297 if (s->qdev.type == TYPE_DISK) { 2298 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2299 } 2300 } 2301 2302 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2303 { 2304 SCSIDiskState *s = opaque; 2305 2306 /* 2307 * When a CD gets changed, we have to report an ejected state and 2308 * then a loaded state to guests so that they detect tray 2309 * open/close and media change events. Guests that do not use 2310 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2311 * states rely on this behavior. 2312 * 2313 * media_changed governs the state machine used for unit attention 2314 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2315 */ 2316 s->media_changed = load; 2317 s->tray_open = !load; 2318 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2319 s->media_event = true; 2320 s->eject_request = false; 2321 } 2322 2323 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2324 { 2325 SCSIDiskState *s = opaque; 2326 2327 s->eject_request = true; 2328 if (force) { 2329 s->tray_locked = false; 2330 } 2331 } 2332 2333 static bool scsi_cd_is_tray_open(void *opaque) 2334 { 2335 return ((SCSIDiskState *)opaque)->tray_open; 2336 } 2337 2338 static bool scsi_cd_is_medium_locked(void *opaque) 2339 { 2340 return ((SCSIDiskState *)opaque)->tray_locked; 2341 } 2342 2343 static const BlockDevOps scsi_disk_removable_block_ops = { 2344 .change_media_cb = scsi_cd_change_media_cb, 2345 .eject_request_cb = scsi_cd_eject_request_cb, 2346 .is_tray_open = scsi_cd_is_tray_open, 2347 .is_medium_locked = scsi_cd_is_medium_locked, 2348 2349 .resize_cb = scsi_disk_resize_cb, 2350 }; 2351 2352 static const BlockDevOps scsi_disk_block_ops = { 2353 .resize_cb = scsi_disk_resize_cb, 2354 }; 2355 2356 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2357 { 2358 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2359 if (s->media_changed) { 2360 s->media_changed = false; 2361 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2362 } 2363 } 2364 2365 static void scsi_realize(SCSIDevice *dev, Error **errp) 2366 { 2367 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2368 2369 if (!s->qdev.conf.blk) { 2370 error_setg(errp, "drive property not set"); 2371 return; 2372 } 2373 2374 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2375 !blk_is_inserted(s->qdev.conf.blk)) { 2376 error_setg(errp, "Device needs media, but drive is empty"); 2377 return; 2378 } 2379 2380 blkconf_blocksizes(&s->qdev.conf); 2381 2382 if (s->qdev.conf.logical_block_size > 2383 s->qdev.conf.physical_block_size) { 2384 error_setg(errp, 2385 "logical_block_size > physical_block_size not supported"); 2386 return; 2387 } 2388 2389 if (dev->type == TYPE_DISK) { 2390 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2391 return; 2392 } 2393 } 2394 if (!blkconf_apply_backend_options(&dev->conf, 2395 blk_is_read_only(s->qdev.conf.blk), 2396 dev->type == TYPE_DISK, errp)) { 2397 return; 2398 } 2399 2400 if (s->qdev.conf.discard_granularity == -1) { 2401 s->qdev.conf.discard_granularity = 2402 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2403 } 2404 2405 if (!s->version) { 2406 s->version = g_strdup(qemu_hw_version()); 2407 } 2408 if (!s->vendor) { 2409 s->vendor = g_strdup("QEMU"); 2410 } 2411 2412 if (blk_is_sg(s->qdev.conf.blk)) { 2413 error_setg(errp, "unwanted /dev/sg*"); 2414 return; 2415 } 2416 2417 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2418 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2419 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2420 } else { 2421 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2422 } 2423 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2424 2425 blk_iostatus_enable(s->qdev.conf.blk); 2426 } 2427 2428 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2429 { 2430 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2431 /* can happen for devices without drive. The error message for missing 2432 * backend will be issued in scsi_realize 2433 */ 2434 if (s->qdev.conf.blk) { 2435 blkconf_blocksizes(&s->qdev.conf); 2436 } 2437 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2438 s->qdev.type = TYPE_DISK; 2439 if (!s->product) { 2440 s->product = g_strdup("QEMU HARDDISK"); 2441 } 2442 scsi_realize(&s->qdev, errp); 2443 } 2444 2445 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2446 { 2447 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2448 int ret; 2449 2450 if (!dev->conf.blk) { 2451 /* Anonymous BlockBackend for an empty drive. As we put it into 2452 * dev->conf, qdev takes care of detaching on unplug. */ 2453 dev->conf.blk = blk_new(0, BLK_PERM_ALL); 2454 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2455 assert(ret == 0); 2456 } 2457 2458 s->qdev.blocksize = 2048; 2459 s->qdev.type = TYPE_ROM; 2460 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2461 if (!s->product) { 2462 s->product = g_strdup("QEMU CD-ROM"); 2463 } 2464 scsi_realize(&s->qdev, errp); 2465 } 2466 2467 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2468 { 2469 DriveInfo *dinfo; 2470 Error *local_err = NULL; 2471 2472 if (!dev->conf.blk) { 2473 scsi_realize(dev, &local_err); 2474 assert(local_err); 2475 error_propagate(errp, local_err); 2476 return; 2477 } 2478 2479 dinfo = blk_legacy_dinfo(dev->conf.blk); 2480 if (dinfo && dinfo->media_cd) { 2481 scsi_cd_realize(dev, errp); 2482 } else { 2483 scsi_hd_realize(dev, errp); 2484 } 2485 } 2486 2487 static const SCSIReqOps scsi_disk_emulate_reqops = { 2488 .size = sizeof(SCSIDiskReq), 2489 .free_req = scsi_free_request, 2490 .send_command = scsi_disk_emulate_command, 2491 .read_data = scsi_disk_emulate_read_data, 2492 .write_data = scsi_disk_emulate_write_data, 2493 .get_buf = scsi_get_buf, 2494 }; 2495 2496 static const SCSIReqOps scsi_disk_dma_reqops = { 2497 .size = sizeof(SCSIDiskReq), 2498 .free_req = scsi_free_request, 2499 .send_command = scsi_disk_dma_command, 2500 .read_data = scsi_read_data, 2501 .write_data = scsi_write_data, 2502 .get_buf = scsi_get_buf, 2503 .load_request = scsi_disk_load_request, 2504 .save_request = scsi_disk_save_request, 2505 }; 2506 2507 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2508 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2509 [INQUIRY] = &scsi_disk_emulate_reqops, 2510 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2511 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2512 [START_STOP] = &scsi_disk_emulate_reqops, 2513 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2514 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2515 [READ_TOC] = &scsi_disk_emulate_reqops, 2516 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2517 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2518 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2519 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2520 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2521 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2522 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2523 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2524 [SEEK_10] = &scsi_disk_emulate_reqops, 2525 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2526 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2527 [UNMAP] = &scsi_disk_emulate_reqops, 2528 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2529 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2530 [VERIFY_10] = &scsi_disk_emulate_reqops, 2531 [VERIFY_12] = &scsi_disk_emulate_reqops, 2532 [VERIFY_16] = &scsi_disk_emulate_reqops, 2533 2534 [READ_6] = &scsi_disk_dma_reqops, 2535 [READ_10] = &scsi_disk_dma_reqops, 2536 [READ_12] = &scsi_disk_dma_reqops, 2537 [READ_16] = &scsi_disk_dma_reqops, 2538 [WRITE_6] = &scsi_disk_dma_reqops, 2539 [WRITE_10] = &scsi_disk_dma_reqops, 2540 [WRITE_12] = &scsi_disk_dma_reqops, 2541 [WRITE_16] = &scsi_disk_dma_reqops, 2542 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2543 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2544 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2545 }; 2546 2547 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2548 uint8_t *buf, void *hba_private) 2549 { 2550 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2551 SCSIRequest *req; 2552 const SCSIReqOps *ops; 2553 uint8_t command; 2554 2555 command = buf[0]; 2556 ops = scsi_disk_reqops_dispatch[command]; 2557 if (!ops) { 2558 ops = &scsi_disk_emulate_reqops; 2559 } 2560 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2561 2562 #ifdef DEBUG_SCSI 2563 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]); 2564 { 2565 int i; 2566 for (i = 1; i < scsi_cdb_length(buf); i++) { 2567 printf(" 0x%02x", buf[i]); 2568 } 2569 printf("\n"); 2570 } 2571 #endif 2572 2573 return req; 2574 } 2575 2576 #ifdef __linux__ 2577 static int get_device_type(SCSIDiskState *s) 2578 { 2579 uint8_t cmd[16]; 2580 uint8_t buf[36]; 2581 int ret; 2582 2583 memset(cmd, 0, sizeof(cmd)); 2584 memset(buf, 0, sizeof(buf)); 2585 cmd[0] = INQUIRY; 2586 cmd[4] = sizeof(buf); 2587 2588 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2589 buf, sizeof(buf)); 2590 if (ret < 0) { 2591 return -1; 2592 } 2593 s->qdev.type = buf[0]; 2594 if (buf[1] & 0x80) { 2595 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2596 } 2597 return 0; 2598 } 2599 2600 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2601 { 2602 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2603 int sg_version; 2604 int rc; 2605 2606 if (!s->qdev.conf.blk) { 2607 error_setg(errp, "drive property not set"); 2608 return; 2609 } 2610 2611 /* check we are using a driver managing SG_IO (version 3 and after) */ 2612 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2613 if (rc < 0) { 2614 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2615 if (rc != -EPERM) { 2616 error_append_hint(errp, "Is this a SCSI device?\n"); 2617 } 2618 return; 2619 } 2620 if (sg_version < 30000) { 2621 error_setg(errp, "scsi generic interface too old"); 2622 return; 2623 } 2624 2625 /* get device type from INQUIRY data */ 2626 rc = get_device_type(s); 2627 if (rc < 0) { 2628 error_setg(errp, "INQUIRY failed"); 2629 return; 2630 } 2631 2632 /* Make a guess for the block size, we'll fix it when the guest sends. 2633 * READ CAPACITY. If they don't, they likely would assume these sizes 2634 * anyway. (TODO: check in /sys). 2635 */ 2636 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2637 s->qdev.blocksize = 2048; 2638 } else { 2639 s->qdev.blocksize = 512; 2640 } 2641 2642 /* Makes the scsi-block device not removable by using HMP and QMP eject 2643 * command. 2644 */ 2645 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2646 2647 scsi_realize(&s->qdev, errp); 2648 scsi_generic_read_device_inquiry(&s->qdev); 2649 } 2650 2651 typedef struct SCSIBlockReq { 2652 SCSIDiskReq req; 2653 sg_io_hdr_t io_header; 2654 2655 /* Selected bytes of the original CDB, copied into our own CDB. */ 2656 uint8_t cmd, cdb1, group_number; 2657 2658 /* CDB passed to SG_IO. */ 2659 uint8_t cdb[16]; 2660 } SCSIBlockReq; 2661 2662 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2663 int64_t offset, QEMUIOVector *iov, 2664 int direction, 2665 BlockCompletionFunc *cb, void *opaque) 2666 { 2667 sg_io_hdr_t *io_header = &req->io_header; 2668 SCSIDiskReq *r = &req->req; 2669 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2670 int nb_logical_blocks; 2671 uint64_t lba; 2672 BlockAIOCB *aiocb; 2673 2674 /* This is not supported yet. It can only happen if the guest does 2675 * reads and writes that are not aligned to one logical sectors 2676 * _and_ cover multiple MemoryRegions. 2677 */ 2678 assert(offset % s->qdev.blocksize == 0); 2679 assert(iov->size % s->qdev.blocksize == 0); 2680 2681 io_header->interface_id = 'S'; 2682 2683 /* The data transfer comes from the QEMUIOVector. */ 2684 io_header->dxfer_direction = direction; 2685 io_header->dxfer_len = iov->size; 2686 io_header->dxferp = (void *)iov->iov; 2687 io_header->iovec_count = iov->niov; 2688 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2689 2690 /* Build a new CDB with the LBA and length patched in, in case 2691 * DMA helpers split the transfer in multiple segments. Do not 2692 * build a CDB smaller than what the guest wanted, and only build 2693 * a larger one if strictly necessary. 2694 */ 2695 io_header->cmdp = req->cdb; 2696 lba = offset / s->qdev.blocksize; 2697 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2698 2699 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2700 /* 6-byte CDB */ 2701 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2702 req->cdb[4] = nb_logical_blocks; 2703 req->cdb[5] = 0; 2704 io_header->cmd_len = 6; 2705 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2706 /* 10-byte CDB */ 2707 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2708 req->cdb[1] = req->cdb1; 2709 stl_be_p(&req->cdb[2], lba); 2710 req->cdb[6] = req->group_number; 2711 stw_be_p(&req->cdb[7], nb_logical_blocks); 2712 req->cdb[9] = 0; 2713 io_header->cmd_len = 10; 2714 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2715 /* 12-byte CDB */ 2716 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2717 req->cdb[1] = req->cdb1; 2718 stl_be_p(&req->cdb[2], lba); 2719 stl_be_p(&req->cdb[6], nb_logical_blocks); 2720 req->cdb[10] = req->group_number; 2721 req->cdb[11] = 0; 2722 io_header->cmd_len = 12; 2723 } else { 2724 /* 16-byte CDB */ 2725 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2726 req->cdb[1] = req->cdb1; 2727 stq_be_p(&req->cdb[2], lba); 2728 stl_be_p(&req->cdb[10], nb_logical_blocks); 2729 req->cdb[14] = req->group_number; 2730 req->cdb[15] = 0; 2731 io_header->cmd_len = 16; 2732 } 2733 2734 /* The rest is as in scsi-generic.c. */ 2735 io_header->mx_sb_len = sizeof(r->req.sense); 2736 io_header->sbp = r->req.sense; 2737 io_header->timeout = UINT_MAX; 2738 io_header->usr_ptr = r; 2739 io_header->flags |= SG_FLAG_DIRECT_IO; 2740 2741 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2742 assert(aiocb != NULL); 2743 return aiocb; 2744 } 2745 2746 static bool scsi_block_no_fua(SCSICommand *cmd) 2747 { 2748 return false; 2749 } 2750 2751 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2752 QEMUIOVector *iov, 2753 BlockCompletionFunc *cb, void *cb_opaque, 2754 void *opaque) 2755 { 2756 SCSIBlockReq *r = opaque; 2757 return scsi_block_do_sgio(r, offset, iov, 2758 SG_DXFER_FROM_DEV, cb, cb_opaque); 2759 } 2760 2761 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2762 QEMUIOVector *iov, 2763 BlockCompletionFunc *cb, void *cb_opaque, 2764 void *opaque) 2765 { 2766 SCSIBlockReq *r = opaque; 2767 return scsi_block_do_sgio(r, offset, iov, 2768 SG_DXFER_TO_DEV, cb, cb_opaque); 2769 } 2770 2771 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2772 { 2773 switch (buf[0]) { 2774 case VERIFY_10: 2775 case VERIFY_12: 2776 case VERIFY_16: 2777 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2778 * for the number of logical blocks specified in the length 2779 * field). For other modes, do not use scatter/gather operation. 2780 */ 2781 if ((buf[1] & 6) == 2) { 2782 return false; 2783 } 2784 break; 2785 2786 case READ_6: 2787 case READ_10: 2788 case READ_12: 2789 case READ_16: 2790 case WRITE_6: 2791 case WRITE_10: 2792 case WRITE_12: 2793 case WRITE_16: 2794 case WRITE_VERIFY_10: 2795 case WRITE_VERIFY_12: 2796 case WRITE_VERIFY_16: 2797 /* MMC writing cannot be done via DMA helpers, because it sometimes 2798 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2799 * We might use scsi_block_dma_reqops as long as no writing commands are 2800 * seen, but performance usually isn't paramount on optical media. So, 2801 * just make scsi-block operate the same as scsi-generic for them. 2802 */ 2803 if (s->qdev.type != TYPE_ROM) { 2804 return false; 2805 } 2806 break; 2807 2808 default: 2809 break; 2810 } 2811 2812 return true; 2813 } 2814 2815 2816 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2817 { 2818 SCSIBlockReq *r = (SCSIBlockReq *)req; 2819 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2820 2821 r->cmd = req->cmd.buf[0]; 2822 switch (r->cmd >> 5) { 2823 case 0: 2824 /* 6-byte CDB. */ 2825 r->cdb1 = r->group_number = 0; 2826 break; 2827 case 1: 2828 /* 10-byte CDB. */ 2829 r->cdb1 = req->cmd.buf[1]; 2830 r->group_number = req->cmd.buf[6]; 2831 break; 2832 case 4: 2833 /* 12-byte CDB. */ 2834 r->cdb1 = req->cmd.buf[1]; 2835 r->group_number = req->cmd.buf[10]; 2836 break; 2837 case 5: 2838 /* 16-byte CDB. */ 2839 r->cdb1 = req->cmd.buf[1]; 2840 r->group_number = req->cmd.buf[14]; 2841 break; 2842 default: 2843 abort(); 2844 } 2845 2846 /* Protection information is not supported. For SCSI versions 2 and 2847 * older (as determined by snooping the guest's INQUIRY commands), 2848 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2849 */ 2850 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2851 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2852 return 0; 2853 } 2854 2855 r->req.status = &r->io_header.status; 2856 return scsi_disk_dma_command(req, buf); 2857 } 2858 2859 static const SCSIReqOps scsi_block_dma_reqops = { 2860 .size = sizeof(SCSIBlockReq), 2861 .free_req = scsi_free_request, 2862 .send_command = scsi_block_dma_command, 2863 .read_data = scsi_read_data, 2864 .write_data = scsi_write_data, 2865 .get_buf = scsi_get_buf, 2866 .load_request = scsi_disk_load_request, 2867 .save_request = scsi_disk_save_request, 2868 }; 2869 2870 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2871 uint32_t lun, uint8_t *buf, 2872 void *hba_private) 2873 { 2874 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2875 2876 if (scsi_block_is_passthrough(s, buf)) { 2877 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2878 hba_private); 2879 } else { 2880 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2881 hba_private); 2882 } 2883 } 2884 2885 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2886 uint8_t *buf, void *hba_private) 2887 { 2888 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2889 2890 if (scsi_block_is_passthrough(s, buf)) { 2891 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2892 } else { 2893 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2894 } 2895 } 2896 2897 #endif 2898 2899 static 2900 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2901 BlockCompletionFunc *cb, void *cb_opaque, 2902 void *opaque) 2903 { 2904 SCSIDiskReq *r = opaque; 2905 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2906 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2907 } 2908 2909 static 2910 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2911 BlockCompletionFunc *cb, void *cb_opaque, 2912 void *opaque) 2913 { 2914 SCSIDiskReq *r = opaque; 2915 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2916 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2917 } 2918 2919 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2920 { 2921 DeviceClass *dc = DEVICE_CLASS(klass); 2922 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2923 2924 dc->fw_name = "disk"; 2925 dc->reset = scsi_disk_reset; 2926 sdc->dma_readv = scsi_dma_readv; 2927 sdc->dma_writev = scsi_dma_writev; 2928 sdc->need_fua_emulation = scsi_is_cmd_fua; 2929 } 2930 2931 static const TypeInfo scsi_disk_base_info = { 2932 .name = TYPE_SCSI_DISK_BASE, 2933 .parent = TYPE_SCSI_DEVICE, 2934 .class_init = scsi_disk_base_class_initfn, 2935 .instance_size = sizeof(SCSIDiskState), 2936 .class_size = sizeof(SCSIDiskClass), 2937 .abstract = true, 2938 }; 2939 2940 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2941 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ 2942 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2943 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2944 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2945 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2946 DEFINE_PROP_STRING("product", SCSIDiskState, product) 2947 2948 static Property scsi_hd_properties[] = { 2949 DEFINE_SCSI_DISK_PROPERTIES(), 2950 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2951 SCSI_DISK_F_REMOVABLE, false), 2952 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2953 SCSI_DISK_F_DPOFUA, false), 2954 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2955 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2956 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2957 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2958 DEFAULT_MAX_UNMAP_SIZE), 2959 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2960 DEFAULT_MAX_IO_SIZE), 2961 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2962 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2963 5), 2964 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2965 DEFINE_PROP_END_OF_LIST(), 2966 }; 2967 2968 static const VMStateDescription vmstate_scsi_disk_state = { 2969 .name = "scsi-disk", 2970 .version_id = 1, 2971 .minimum_version_id = 1, 2972 .fields = (VMStateField[]) { 2973 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2974 VMSTATE_BOOL(media_changed, SCSIDiskState), 2975 VMSTATE_BOOL(media_event, SCSIDiskState), 2976 VMSTATE_BOOL(eject_request, SCSIDiskState), 2977 VMSTATE_BOOL(tray_open, SCSIDiskState), 2978 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2979 VMSTATE_END_OF_LIST() 2980 } 2981 }; 2982 2983 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 2984 { 2985 DeviceClass *dc = DEVICE_CLASS(klass); 2986 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2987 2988 sc->realize = scsi_hd_realize; 2989 sc->alloc_req = scsi_new_request; 2990 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 2991 dc->desc = "virtual SCSI disk"; 2992 dc->props = scsi_hd_properties; 2993 dc->vmsd = &vmstate_scsi_disk_state; 2994 } 2995 2996 static const TypeInfo scsi_hd_info = { 2997 .name = "scsi-hd", 2998 .parent = TYPE_SCSI_DISK_BASE, 2999 .class_init = scsi_hd_class_initfn, 3000 }; 3001 3002 static Property scsi_cd_properties[] = { 3003 DEFINE_SCSI_DISK_PROPERTIES(), 3004 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3005 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3006 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3007 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3008 DEFAULT_MAX_IO_SIZE), 3009 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3010 5), 3011 DEFINE_PROP_END_OF_LIST(), 3012 }; 3013 3014 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3015 { 3016 DeviceClass *dc = DEVICE_CLASS(klass); 3017 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3018 3019 sc->realize = scsi_cd_realize; 3020 sc->alloc_req = scsi_new_request; 3021 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3022 dc->desc = "virtual SCSI CD-ROM"; 3023 dc->props = scsi_cd_properties; 3024 dc->vmsd = &vmstate_scsi_disk_state; 3025 } 3026 3027 static const TypeInfo scsi_cd_info = { 3028 .name = "scsi-cd", 3029 .parent = TYPE_SCSI_DISK_BASE, 3030 .class_init = scsi_cd_class_initfn, 3031 }; 3032 3033 #ifdef __linux__ 3034 static Property scsi_block_properties[] = { 3035 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3036 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3037 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3038 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3039 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3040 DEFAULT_MAX_UNMAP_SIZE), 3041 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3042 DEFAULT_MAX_IO_SIZE), 3043 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3044 -1), 3045 DEFINE_PROP_END_OF_LIST(), 3046 }; 3047 3048 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3049 { 3050 DeviceClass *dc = DEVICE_CLASS(klass); 3051 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3052 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3053 3054 sc->realize = scsi_block_realize; 3055 sc->alloc_req = scsi_block_new_request; 3056 sc->parse_cdb = scsi_block_parse_cdb; 3057 sdc->dma_readv = scsi_block_dma_readv; 3058 sdc->dma_writev = scsi_block_dma_writev; 3059 sdc->need_fua_emulation = scsi_block_no_fua; 3060 dc->desc = "SCSI block device passthrough"; 3061 dc->props = scsi_block_properties; 3062 dc->vmsd = &vmstate_scsi_disk_state; 3063 } 3064 3065 static const TypeInfo scsi_block_info = { 3066 .name = "scsi-block", 3067 .parent = TYPE_SCSI_DISK_BASE, 3068 .class_init = scsi_block_class_initfn, 3069 }; 3070 #endif 3071 3072 static Property scsi_disk_properties[] = { 3073 DEFINE_SCSI_DISK_PROPERTIES(), 3074 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3075 SCSI_DISK_F_REMOVABLE, false), 3076 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3077 SCSI_DISK_F_DPOFUA, false), 3078 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3079 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3080 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3081 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3082 DEFAULT_MAX_UNMAP_SIZE), 3083 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3084 DEFAULT_MAX_IO_SIZE), 3085 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3086 5), 3087 DEFINE_PROP_END_OF_LIST(), 3088 }; 3089 3090 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3091 { 3092 DeviceClass *dc = DEVICE_CLASS(klass); 3093 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3094 3095 sc->realize = scsi_disk_realize; 3096 sc->alloc_req = scsi_new_request; 3097 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3098 dc->fw_name = "disk"; 3099 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3100 dc->reset = scsi_disk_reset; 3101 dc->props = scsi_disk_properties; 3102 dc->vmsd = &vmstate_scsi_disk_state; 3103 } 3104 3105 static const TypeInfo scsi_disk_info = { 3106 .name = "scsi-disk", 3107 .parent = TYPE_SCSI_DISK_BASE, 3108 .class_init = scsi_disk_class_initfn, 3109 }; 3110 3111 static void scsi_disk_register_types(void) 3112 { 3113 type_register_static(&scsi_disk_base_info); 3114 type_register_static(&scsi_hd_info); 3115 type_register_static(&scsi_cd_info); 3116 #ifdef __linux__ 3117 type_register_static(&scsi_block_info); 3118 #endif 3119 type_register_static(&scsi_disk_info); 3120 } 3121 3122 type_init(scsi_disk_register_types) 3123