1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 //#define DEBUG_SCSI 23 24 #ifdef DEBUG_SCSI 25 #define DPRINTF(fmt, ...) \ 26 do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0) 27 #else 28 #define DPRINTF(fmt, ...) do {} while(0) 29 #endif 30 31 #include "qemu/osdep.h" 32 #include "qapi/error.h" 33 #include "qemu/error-report.h" 34 #include "hw/scsi/scsi.h" 35 #include "block/scsi.h" 36 #include "sysemu/sysemu.h" 37 #include "sysemu/block-backend.h" 38 #include "sysemu/blockdev.h" 39 #include "hw/block/block.h" 40 #include "sysemu/dma.h" 41 #include "qemu/cutils.h" 42 43 #ifdef __linux 44 #include <scsi/sg.h> 45 #endif 46 47 #define SCSI_WRITE_SAME_MAX 524288 48 #define SCSI_DMA_BUF_SIZE 131072 49 #define SCSI_MAX_INQUIRY_LEN 256 50 #define SCSI_MAX_MODE_LEN 256 51 52 #define DEFAULT_DISCARD_GRANULARITY 4096 53 #define DEFAULT_MAX_UNMAP_SIZE (1 << 30) /* 1 GB */ 54 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 55 56 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 57 58 #define SCSI_DISK_BASE(obj) \ 59 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 60 #define SCSI_DISK_BASE_CLASS(klass) \ 61 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 62 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 63 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 64 65 typedef struct SCSIDiskClass { 66 SCSIDeviceClass parent_class; 67 DMAIOFunc *dma_readv; 68 DMAIOFunc *dma_writev; 69 bool (*need_fua_emulation)(SCSICommand *cmd); 70 } SCSIDiskClass; 71 72 typedef struct SCSIDiskReq { 73 SCSIRequest req; 74 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 75 uint64_t sector; 76 uint32_t sector_count; 77 uint32_t buflen; 78 bool started; 79 bool need_fua_emulation; 80 struct iovec iov; 81 QEMUIOVector qiov; 82 BlockAcctCookie acct; 83 unsigned char *status; 84 } SCSIDiskReq; 85 86 #define SCSI_DISK_F_REMOVABLE 0 87 #define SCSI_DISK_F_DPOFUA 1 88 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 89 90 typedef struct SCSIDiskState 91 { 92 SCSIDevice qdev; 93 uint32_t features; 94 bool media_changed; 95 bool media_event; 96 bool eject_request; 97 uint16_t port_index; 98 uint64_t max_unmap_size; 99 uint64_t max_io_size; 100 QEMUBH *bh; 101 char *version; 102 char *serial; 103 char *vendor; 104 char *product; 105 bool tray_open; 106 bool tray_locked; 107 } SCSIDiskState; 108 109 static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 110 111 static void scsi_free_request(SCSIRequest *req) 112 { 113 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 114 115 qemu_vfree(r->iov.iov_base); 116 } 117 118 /* Helper function for command completion with sense. */ 119 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 120 { 121 DPRINTF("Command complete tag=0x%x sense=%d/%d/%d\n", 122 r->req.tag, sense.key, sense.asc, sense.ascq); 123 scsi_req_build_sense(&r->req, sense); 124 scsi_req_complete(&r->req, CHECK_CONDITION); 125 } 126 127 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 128 { 129 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 130 131 if (!r->iov.iov_base) { 132 r->buflen = size; 133 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 134 } 135 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 136 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 137 } 138 139 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 140 { 141 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 142 143 qemu_put_be64s(f, &r->sector); 144 qemu_put_be32s(f, &r->sector_count); 145 qemu_put_be32s(f, &r->buflen); 146 if (r->buflen) { 147 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 148 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 149 } else if (!req->retry) { 150 uint32_t len = r->iov.iov_len; 151 qemu_put_be32s(f, &len); 152 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 153 } 154 } 155 } 156 157 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 158 { 159 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 160 161 qemu_get_be64s(f, &r->sector); 162 qemu_get_be32s(f, &r->sector_count); 163 qemu_get_be32s(f, &r->buflen); 164 if (r->buflen) { 165 scsi_init_iovec(r, r->buflen); 166 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 167 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 168 } else if (!r->req.retry) { 169 uint32_t len; 170 qemu_get_be32s(f, &len); 171 r->iov.iov_len = len; 172 assert(r->iov.iov_len <= r->buflen); 173 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 174 } 175 } 176 177 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 178 } 179 180 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 181 { 182 if (r->req.io_canceled) { 183 scsi_req_cancel_complete(&r->req); 184 return true; 185 } 186 187 if (ret < 0) { 188 return scsi_handle_rw_error(r, -ret, acct_failed); 189 } 190 191 if (r->status && *r->status) { 192 if (acct_failed) { 193 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 194 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 195 } 196 scsi_req_complete(&r->req, *r->status); 197 return true; 198 } 199 200 return false; 201 } 202 203 static void scsi_aio_complete(void *opaque, int ret) 204 { 205 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 206 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 207 208 assert(r->req.aiocb != NULL); 209 r->req.aiocb = NULL; 210 if (scsi_disk_req_check_error(r, ret, true)) { 211 goto done; 212 } 213 214 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 215 scsi_req_complete(&r->req, GOOD); 216 217 done: 218 scsi_req_unref(&r->req); 219 } 220 221 static bool scsi_is_cmd_fua(SCSICommand *cmd) 222 { 223 switch (cmd->buf[0]) { 224 case READ_10: 225 case READ_12: 226 case READ_16: 227 case WRITE_10: 228 case WRITE_12: 229 case WRITE_16: 230 return (cmd->buf[1] & 8) != 0; 231 232 case VERIFY_10: 233 case VERIFY_12: 234 case VERIFY_16: 235 case WRITE_VERIFY_10: 236 case WRITE_VERIFY_12: 237 case WRITE_VERIFY_16: 238 return true; 239 240 case READ_6: 241 case WRITE_6: 242 default: 243 return false; 244 } 245 } 246 247 static void scsi_write_do_fua(SCSIDiskReq *r) 248 { 249 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 250 251 assert(r->req.aiocb == NULL); 252 assert(!r->req.io_canceled); 253 254 if (r->need_fua_emulation) { 255 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 256 BLOCK_ACCT_FLUSH); 257 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 258 return; 259 } 260 261 scsi_req_complete(&r->req, GOOD); 262 scsi_req_unref(&r->req); 263 } 264 265 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 266 { 267 assert(r->req.aiocb == NULL); 268 if (scsi_disk_req_check_error(r, ret, false)) { 269 goto done; 270 } 271 272 r->sector += r->sector_count; 273 r->sector_count = 0; 274 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 275 scsi_write_do_fua(r); 276 return; 277 } else { 278 scsi_req_complete(&r->req, GOOD); 279 } 280 281 done: 282 scsi_req_unref(&r->req); 283 } 284 285 static void scsi_dma_complete(void *opaque, int ret) 286 { 287 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 288 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 289 290 assert(r->req.aiocb != NULL); 291 r->req.aiocb = NULL; 292 293 if (ret < 0) { 294 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 295 } else { 296 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 297 } 298 scsi_dma_complete_noio(r, ret); 299 } 300 301 static void scsi_read_complete(void * opaque, int ret) 302 { 303 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 304 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 305 int n; 306 307 assert(r->req.aiocb != NULL); 308 r->req.aiocb = NULL; 309 if (scsi_disk_req_check_error(r, ret, true)) { 310 goto done; 311 } 312 313 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 314 DPRINTF("Data ready tag=0x%x len=%zd\n", r->req.tag, r->qiov.size); 315 316 n = r->qiov.size / 512; 317 r->sector += n; 318 r->sector_count -= n; 319 scsi_req_data(&r->req, r->qiov.size); 320 321 done: 322 scsi_req_unref(&r->req); 323 } 324 325 /* Actually issue a read to the block device. */ 326 static void scsi_do_read(SCSIDiskReq *r, int ret) 327 { 328 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 329 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 330 331 assert (r->req.aiocb == NULL); 332 if (scsi_disk_req_check_error(r, ret, false)) { 333 goto done; 334 } 335 336 /* The request is used as the AIO opaque value, so add a ref. */ 337 scsi_req_ref(&r->req); 338 339 if (r->req.sg) { 340 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 341 r->req.resid -= r->req.sg->size; 342 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 343 r->req.sg, r->sector << BDRV_SECTOR_BITS, 344 BDRV_SECTOR_SIZE, 345 sdc->dma_readv, r, scsi_dma_complete, r, 346 DMA_DIRECTION_FROM_DEVICE); 347 } else { 348 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 349 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 350 r->qiov.size, BLOCK_ACCT_READ); 351 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 352 scsi_read_complete, r, r); 353 } 354 355 done: 356 scsi_req_unref(&r->req); 357 } 358 359 static void scsi_do_read_cb(void *opaque, int ret) 360 { 361 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 362 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 363 364 assert (r->req.aiocb != NULL); 365 r->req.aiocb = NULL; 366 367 if (ret < 0) { 368 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 369 } else { 370 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 371 } 372 scsi_do_read(opaque, ret); 373 } 374 375 /* Read more data from scsi device into buffer. */ 376 static void scsi_read_data(SCSIRequest *req) 377 { 378 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 379 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 380 bool first; 381 382 DPRINTF("Read sector_count=%d\n", r->sector_count); 383 if (r->sector_count == 0) { 384 /* This also clears the sense buffer for REQUEST SENSE. */ 385 scsi_req_complete(&r->req, GOOD); 386 return; 387 } 388 389 /* No data transfer may already be in progress */ 390 assert(r->req.aiocb == NULL); 391 392 /* The request is used as the AIO opaque value, so add a ref. */ 393 scsi_req_ref(&r->req); 394 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 395 DPRINTF("Data transfer direction invalid\n"); 396 scsi_read_complete(r, -EINVAL); 397 return; 398 } 399 400 if (!blk_is_available(req->dev->conf.blk)) { 401 scsi_read_complete(r, -ENOMEDIUM); 402 return; 403 } 404 405 first = !r->started; 406 r->started = true; 407 if (first && r->need_fua_emulation) { 408 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 409 BLOCK_ACCT_FLUSH); 410 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 411 } else { 412 scsi_do_read(r, 0); 413 } 414 } 415 416 /* 417 * scsi_handle_rw_error has two return values. 0 means that the error 418 * must be ignored, 1 means that the error has been processed and the 419 * caller should not do anything else for this request. Note that 420 * scsi_handle_rw_error always manages its reference counts, independent 421 * of the return value. 422 */ 423 static int scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 424 { 425 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 426 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 427 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 428 is_read, error); 429 430 if (action == BLOCK_ERROR_ACTION_REPORT) { 431 if (acct_failed) { 432 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 433 } 434 switch (error) { 435 case ENOMEDIUM: 436 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 437 break; 438 case ENOMEM: 439 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 440 break; 441 case EINVAL: 442 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 443 break; 444 case ENOSPC: 445 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 446 break; 447 default: 448 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 449 break; 450 } 451 } 452 blk_error_action(s->qdev.conf.blk, action, is_read, error); 453 if (action == BLOCK_ERROR_ACTION_STOP) { 454 scsi_req_retry(&r->req); 455 } 456 return action != BLOCK_ERROR_ACTION_IGNORE; 457 } 458 459 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 460 { 461 uint32_t n; 462 463 assert (r->req.aiocb == NULL); 464 if (scsi_disk_req_check_error(r, ret, false)) { 465 goto done; 466 } 467 468 n = r->qiov.size / 512; 469 r->sector += n; 470 r->sector_count -= n; 471 if (r->sector_count == 0) { 472 scsi_write_do_fua(r); 473 return; 474 } else { 475 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 476 DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size); 477 scsi_req_data(&r->req, r->qiov.size); 478 } 479 480 done: 481 scsi_req_unref(&r->req); 482 } 483 484 static void scsi_write_complete(void * opaque, int ret) 485 { 486 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 487 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 488 489 assert (r->req.aiocb != NULL); 490 r->req.aiocb = NULL; 491 492 if (ret < 0) { 493 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 494 } else { 495 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 496 } 497 scsi_write_complete_noio(r, ret); 498 } 499 500 static void scsi_write_data(SCSIRequest *req) 501 { 502 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 503 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 504 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 505 506 /* No data transfer may already be in progress */ 507 assert(r->req.aiocb == NULL); 508 509 /* The request is used as the AIO opaque value, so add a ref. */ 510 scsi_req_ref(&r->req); 511 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 512 DPRINTF("Data transfer direction invalid\n"); 513 scsi_write_complete_noio(r, -EINVAL); 514 return; 515 } 516 517 if (!r->req.sg && !r->qiov.size) { 518 /* Called for the first time. Ask the driver to send us more data. */ 519 r->started = true; 520 scsi_write_complete_noio(r, 0); 521 return; 522 } 523 if (!blk_is_available(req->dev->conf.blk)) { 524 scsi_write_complete_noio(r, -ENOMEDIUM); 525 return; 526 } 527 528 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 529 r->req.cmd.buf[0] == VERIFY_16) { 530 if (r->req.sg) { 531 scsi_dma_complete_noio(r, 0); 532 } else { 533 scsi_write_complete_noio(r, 0); 534 } 535 return; 536 } 537 538 if (r->req.sg) { 539 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 540 r->req.resid -= r->req.sg->size; 541 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 542 r->req.sg, r->sector << BDRV_SECTOR_BITS, 543 BDRV_SECTOR_SIZE, 544 sdc->dma_writev, r, scsi_dma_complete, r, 545 DMA_DIRECTION_TO_DEVICE); 546 } else { 547 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 548 r->qiov.size, BLOCK_ACCT_WRITE); 549 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 550 scsi_write_complete, r, r); 551 } 552 } 553 554 /* Return a pointer to the data buffer. */ 555 static uint8_t *scsi_get_buf(SCSIRequest *req) 556 { 557 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 558 559 return (uint8_t *)r->iov.iov_base; 560 } 561 562 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 563 { 564 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 565 int buflen = 0; 566 int start; 567 568 if (req->cmd.buf[1] & 0x1) { 569 /* Vital product data */ 570 uint8_t page_code = req->cmd.buf[2]; 571 572 outbuf[buflen++] = s->qdev.type & 0x1f; 573 outbuf[buflen++] = page_code ; // this page 574 outbuf[buflen++] = 0x00; 575 outbuf[buflen++] = 0x00; 576 start = buflen; 577 578 switch (page_code) { 579 case 0x00: /* Supported page codes, mandatory */ 580 { 581 DPRINTF("Inquiry EVPD[Supported pages] " 582 "buffer size %zd\n", req->cmd.xfer); 583 outbuf[buflen++] = 0x00; // list of supported pages (this page) 584 if (s->serial) { 585 outbuf[buflen++] = 0x80; // unit serial number 586 } 587 outbuf[buflen++] = 0x83; // device identification 588 if (s->qdev.type == TYPE_DISK) { 589 outbuf[buflen++] = 0xb0; // block limits 590 outbuf[buflen++] = 0xb2; // thin provisioning 591 } 592 break; 593 } 594 case 0x80: /* Device serial number, optional */ 595 { 596 int l; 597 598 if (!s->serial) { 599 DPRINTF("Inquiry (EVPD[Serial number] not supported\n"); 600 return -1; 601 } 602 603 l = strlen(s->serial); 604 if (l > 36) { 605 l = 36; 606 } 607 608 DPRINTF("Inquiry EVPD[Serial number] " 609 "buffer size %zd\n", req->cmd.xfer); 610 memcpy(outbuf+buflen, s->serial, l); 611 buflen += l; 612 break; 613 } 614 615 case 0x83: /* Device identification page, mandatory */ 616 { 617 const char *str = s->serial ?: blk_name(s->qdev.conf.blk); 618 int max_len = s->serial ? 20 : 255 - 8; 619 int id_len = strlen(str); 620 621 if (id_len > max_len) { 622 id_len = max_len; 623 } 624 DPRINTF("Inquiry EVPD[Device identification] " 625 "buffer size %zd\n", req->cmd.xfer); 626 627 outbuf[buflen++] = 0x2; // ASCII 628 outbuf[buflen++] = 0; // not officially assigned 629 outbuf[buflen++] = 0; // reserved 630 outbuf[buflen++] = id_len; // length of data following 631 memcpy(outbuf+buflen, str, id_len); 632 buflen += id_len; 633 634 if (s->qdev.wwn) { 635 outbuf[buflen++] = 0x1; // Binary 636 outbuf[buflen++] = 0x3; // NAA 637 outbuf[buflen++] = 0; // reserved 638 outbuf[buflen++] = 8; 639 stq_be_p(&outbuf[buflen], s->qdev.wwn); 640 buflen += 8; 641 } 642 643 if (s->qdev.port_wwn) { 644 outbuf[buflen++] = 0x61; // SAS / Binary 645 outbuf[buflen++] = 0x93; // PIV / Target port / NAA 646 outbuf[buflen++] = 0; // reserved 647 outbuf[buflen++] = 8; 648 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 649 buflen += 8; 650 } 651 652 if (s->port_index) { 653 outbuf[buflen++] = 0x61; // SAS / Binary 654 outbuf[buflen++] = 0x94; // PIV / Target port / relative target port 655 outbuf[buflen++] = 0; // reserved 656 outbuf[buflen++] = 4; 657 stw_be_p(&outbuf[buflen + 2], s->port_index); 658 buflen += 4; 659 } 660 break; 661 } 662 case 0xb0: /* block limits */ 663 { 664 unsigned int unmap_sectors = 665 s->qdev.conf.discard_granularity / s->qdev.blocksize; 666 unsigned int min_io_size = 667 s->qdev.conf.min_io_size / s->qdev.blocksize; 668 unsigned int opt_io_size = 669 s->qdev.conf.opt_io_size / s->qdev.blocksize; 670 unsigned int max_unmap_sectors = 671 s->max_unmap_size / s->qdev.blocksize; 672 unsigned int max_io_sectors = 673 s->max_io_size / s->qdev.blocksize; 674 675 if (s->qdev.type == TYPE_ROM) { 676 DPRINTF("Inquiry (EVPD[%02X] not supported for CDROM\n", 677 page_code); 678 return -1; 679 } 680 /* required VPD size with unmap support */ 681 buflen = 0x40; 682 memset(outbuf + 4, 0, buflen - 4); 683 684 outbuf[4] = 0x1; /* wsnz */ 685 686 /* optimal transfer length granularity */ 687 outbuf[6] = (min_io_size >> 8) & 0xff; 688 outbuf[7] = min_io_size & 0xff; 689 690 /* maximum transfer length */ 691 outbuf[8] = (max_io_sectors >> 24) & 0xff; 692 outbuf[9] = (max_io_sectors >> 16) & 0xff; 693 outbuf[10] = (max_io_sectors >> 8) & 0xff; 694 outbuf[11] = max_io_sectors & 0xff; 695 696 /* optimal transfer length */ 697 outbuf[12] = (opt_io_size >> 24) & 0xff; 698 outbuf[13] = (opt_io_size >> 16) & 0xff; 699 outbuf[14] = (opt_io_size >> 8) & 0xff; 700 outbuf[15] = opt_io_size & 0xff; 701 702 /* max unmap LBA count, default is 1GB */ 703 outbuf[20] = (max_unmap_sectors >> 24) & 0xff; 704 outbuf[21] = (max_unmap_sectors >> 16) & 0xff; 705 outbuf[22] = (max_unmap_sectors >> 8) & 0xff; 706 outbuf[23] = max_unmap_sectors & 0xff; 707 708 /* max unmap descriptors, 255 fit in 4 kb with an 8-byte header. */ 709 outbuf[24] = 0; 710 outbuf[25] = 0; 711 outbuf[26] = 0; 712 outbuf[27] = 255; 713 714 /* optimal unmap granularity */ 715 outbuf[28] = (unmap_sectors >> 24) & 0xff; 716 outbuf[29] = (unmap_sectors >> 16) & 0xff; 717 outbuf[30] = (unmap_sectors >> 8) & 0xff; 718 outbuf[31] = unmap_sectors & 0xff; 719 720 /* max write same size */ 721 outbuf[36] = 0; 722 outbuf[37] = 0; 723 outbuf[38] = 0; 724 outbuf[39] = 0; 725 726 outbuf[40] = (max_io_sectors >> 24) & 0xff; 727 outbuf[41] = (max_io_sectors >> 16) & 0xff; 728 outbuf[42] = (max_io_sectors >> 8) & 0xff; 729 outbuf[43] = max_io_sectors & 0xff; 730 break; 731 } 732 case 0xb2: /* thin provisioning */ 733 { 734 buflen = 8; 735 outbuf[4] = 0; 736 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 737 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 738 outbuf[7] = 0; 739 break; 740 } 741 default: 742 return -1; 743 } 744 /* done with EVPD */ 745 assert(buflen - start <= 255); 746 outbuf[start - 1] = buflen - start; 747 return buflen; 748 } 749 750 /* Standard INQUIRY data */ 751 if (req->cmd.buf[2] != 0) { 752 return -1; 753 } 754 755 /* PAGE CODE == 0 */ 756 buflen = req->cmd.xfer; 757 if (buflen > SCSI_MAX_INQUIRY_LEN) { 758 buflen = SCSI_MAX_INQUIRY_LEN; 759 } 760 761 outbuf[0] = s->qdev.type & 0x1f; 762 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 763 764 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 765 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 766 767 memset(&outbuf[32], 0, 4); 768 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 769 /* 770 * We claim conformance to SPC-3, which is required for guests 771 * to ask for modern features like READ CAPACITY(16) or the 772 * block characteristics VPD page by default. Not all of SPC-3 773 * is actually implemented, but we're good enough. 774 */ 775 outbuf[2] = 5; 776 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 777 778 if (buflen > 36) { 779 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 780 } else { 781 /* If the allocation length of CDB is too small, 782 the additional length is not adjusted */ 783 outbuf[4] = 36 - 5; 784 } 785 786 /* Sync data transfer and TCQ. */ 787 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 788 return buflen; 789 } 790 791 static inline bool media_is_dvd(SCSIDiskState *s) 792 { 793 uint64_t nb_sectors; 794 if (s->qdev.type != TYPE_ROM) { 795 return false; 796 } 797 if (!blk_is_available(s->qdev.conf.blk)) { 798 return false; 799 } 800 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 801 return nb_sectors > CD_MAX_SECTORS; 802 } 803 804 static inline bool media_is_cd(SCSIDiskState *s) 805 { 806 uint64_t nb_sectors; 807 if (s->qdev.type != TYPE_ROM) { 808 return false; 809 } 810 if (!blk_is_available(s->qdev.conf.blk)) { 811 return false; 812 } 813 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 814 return nb_sectors <= CD_MAX_SECTORS; 815 } 816 817 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 818 uint8_t *outbuf) 819 { 820 uint8_t type = r->req.cmd.buf[1] & 7; 821 822 if (s->qdev.type != TYPE_ROM) { 823 return -1; 824 } 825 826 /* Types 1/2 are only defined for Blu-Ray. */ 827 if (type != 0) { 828 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 829 return -1; 830 } 831 832 memset(outbuf, 0, 34); 833 outbuf[1] = 32; 834 outbuf[2] = 0xe; /* last session complete, disc finalized */ 835 outbuf[3] = 1; /* first track on disc */ 836 outbuf[4] = 1; /* # of sessions */ 837 outbuf[5] = 1; /* first track of last session */ 838 outbuf[6] = 1; /* last track of last session */ 839 outbuf[7] = 0x20; /* unrestricted use */ 840 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 841 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 842 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 843 /* 24-31: disc bar code */ 844 /* 32: disc application code */ 845 /* 33: number of OPC tables */ 846 847 return 34; 848 } 849 850 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 851 uint8_t *outbuf) 852 { 853 static const int rds_caps_size[5] = { 854 [0] = 2048 + 4, 855 [1] = 4 + 4, 856 [3] = 188 + 4, 857 [4] = 2048 + 4, 858 }; 859 860 uint8_t media = r->req.cmd.buf[1]; 861 uint8_t layer = r->req.cmd.buf[6]; 862 uint8_t format = r->req.cmd.buf[7]; 863 int size = -1; 864 865 if (s->qdev.type != TYPE_ROM) { 866 return -1; 867 } 868 if (media != 0) { 869 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 870 return -1; 871 } 872 873 if (format != 0xff) { 874 if (!blk_is_available(s->qdev.conf.blk)) { 875 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 876 return -1; 877 } 878 if (media_is_cd(s)) { 879 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 880 return -1; 881 } 882 if (format >= ARRAY_SIZE(rds_caps_size)) { 883 return -1; 884 } 885 size = rds_caps_size[format]; 886 memset(outbuf, 0, size); 887 } 888 889 switch (format) { 890 case 0x00: { 891 /* Physical format information */ 892 uint64_t nb_sectors; 893 if (layer != 0) { 894 goto fail; 895 } 896 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 897 898 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 899 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 900 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 901 outbuf[7] = 0; /* default densities */ 902 903 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 904 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 905 break; 906 } 907 908 case 0x01: /* DVD copyright information, all zeros */ 909 break; 910 911 case 0x03: /* BCA information - invalid field for no BCA info */ 912 return -1; 913 914 case 0x04: /* DVD disc manufacturing information, all zeros */ 915 break; 916 917 case 0xff: { /* List capabilities */ 918 int i; 919 size = 4; 920 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 921 if (!rds_caps_size[i]) { 922 continue; 923 } 924 outbuf[size] = i; 925 outbuf[size + 1] = 0x40; /* Not writable, readable */ 926 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 927 size += 4; 928 } 929 break; 930 } 931 932 default: 933 return -1; 934 } 935 936 /* Size of buffer, not including 2 byte size field */ 937 stw_be_p(outbuf, size - 2); 938 return size; 939 940 fail: 941 return -1; 942 } 943 944 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 945 { 946 uint8_t event_code, media_status; 947 948 media_status = 0; 949 if (s->tray_open) { 950 media_status = MS_TRAY_OPEN; 951 } else if (blk_is_inserted(s->qdev.conf.blk)) { 952 media_status = MS_MEDIA_PRESENT; 953 } 954 955 /* Event notification descriptor */ 956 event_code = MEC_NO_CHANGE; 957 if (media_status != MS_TRAY_OPEN) { 958 if (s->media_event) { 959 event_code = MEC_NEW_MEDIA; 960 s->media_event = false; 961 } else if (s->eject_request) { 962 event_code = MEC_EJECT_REQUESTED; 963 s->eject_request = false; 964 } 965 } 966 967 outbuf[0] = event_code; 968 outbuf[1] = media_status; 969 970 /* These fields are reserved, just clear them. */ 971 outbuf[2] = 0; 972 outbuf[3] = 0; 973 return 4; 974 } 975 976 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 977 uint8_t *outbuf) 978 { 979 int size; 980 uint8_t *buf = r->req.cmd.buf; 981 uint8_t notification_class_request = buf[4]; 982 if (s->qdev.type != TYPE_ROM) { 983 return -1; 984 } 985 if ((buf[1] & 1) == 0) { 986 /* asynchronous */ 987 return -1; 988 } 989 990 size = 4; 991 outbuf[0] = outbuf[1] = 0; 992 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 993 if (notification_class_request & (1 << GESN_MEDIA)) { 994 outbuf[2] = GESN_MEDIA; 995 size += scsi_event_status_media(s, &outbuf[size]); 996 } else { 997 outbuf[2] = 0x80; 998 } 999 stw_be_p(outbuf, size - 4); 1000 return size; 1001 } 1002 1003 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1004 { 1005 int current; 1006 1007 if (s->qdev.type != TYPE_ROM) { 1008 return -1; 1009 } 1010 1011 if (media_is_dvd(s)) { 1012 current = MMC_PROFILE_DVD_ROM; 1013 } else if (media_is_cd(s)) { 1014 current = MMC_PROFILE_CD_ROM; 1015 } else { 1016 current = MMC_PROFILE_NONE; 1017 } 1018 1019 memset(outbuf, 0, 40); 1020 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1021 stw_be_p(&outbuf[6], current); 1022 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1023 outbuf[10] = 0x03; /* persistent, current */ 1024 outbuf[11] = 8; /* two profiles */ 1025 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1026 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1027 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1028 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1029 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1030 stw_be_p(&outbuf[20], 1); 1031 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1032 outbuf[23] = 8; 1033 stl_be_p(&outbuf[24], 1); /* SCSI */ 1034 outbuf[28] = 1; /* DBE = 1, mandatory */ 1035 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1036 stw_be_p(&outbuf[32], 3); 1037 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1038 outbuf[35] = 4; 1039 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1040 /* TODO: Random readable, CD read, DVD read, drive serial number, 1041 power management */ 1042 return 40; 1043 } 1044 1045 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1046 { 1047 if (s->qdev.type != TYPE_ROM) { 1048 return -1; 1049 } 1050 memset(outbuf, 0, 8); 1051 outbuf[5] = 1; /* CD-ROM */ 1052 return 8; 1053 } 1054 1055 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1056 int page_control) 1057 { 1058 static const int mode_sense_valid[0x3f] = { 1059 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1060 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1061 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1062 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1063 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1064 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1065 }; 1066 1067 uint8_t *p = *p_outbuf + 2; 1068 int length; 1069 1070 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1071 return -1; 1072 } 1073 1074 /* 1075 * If Changeable Values are requested, a mask denoting those mode parameters 1076 * that are changeable shall be returned. As we currently don't support 1077 * parameter changes via MODE_SELECT all bits are returned set to zero. 1078 * The buffer was already menset to zero by the caller of this function. 1079 * 1080 * The offsets here are off by two compared to the descriptions in the 1081 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1082 * but it is done so that offsets are consistent within our implementation 1083 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1084 * 2-byte and 4-byte headers. 1085 */ 1086 switch (page) { 1087 case MODE_PAGE_HD_GEOMETRY: 1088 length = 0x16; 1089 if (page_control == 1) { /* Changeable Values */ 1090 break; 1091 } 1092 /* if a geometry hint is available, use it */ 1093 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1094 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1095 p[2] = s->qdev.conf.cyls & 0xff; 1096 p[3] = s->qdev.conf.heads & 0xff; 1097 /* Write precomp start cylinder, disabled */ 1098 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1099 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1100 p[6] = s->qdev.conf.cyls & 0xff; 1101 /* Reduced current start cylinder, disabled */ 1102 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1103 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1104 p[9] = s->qdev.conf.cyls & 0xff; 1105 /* Device step rate [ns], 200ns */ 1106 p[10] = 0; 1107 p[11] = 200; 1108 /* Landing zone cylinder */ 1109 p[12] = 0xff; 1110 p[13] = 0xff; 1111 p[14] = 0xff; 1112 /* Medium rotation rate [rpm], 5400 rpm */ 1113 p[18] = (5400 >> 8) & 0xff; 1114 p[19] = 5400 & 0xff; 1115 break; 1116 1117 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1118 length = 0x1e; 1119 if (page_control == 1) { /* Changeable Values */ 1120 break; 1121 } 1122 /* Transfer rate [kbit/s], 5Mbit/s */ 1123 p[0] = 5000 >> 8; 1124 p[1] = 5000 & 0xff; 1125 /* if a geometry hint is available, use it */ 1126 p[2] = s->qdev.conf.heads & 0xff; 1127 p[3] = s->qdev.conf.secs & 0xff; 1128 p[4] = s->qdev.blocksize >> 8; 1129 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1130 p[7] = s->qdev.conf.cyls & 0xff; 1131 /* Write precomp start cylinder, disabled */ 1132 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1133 p[9] = s->qdev.conf.cyls & 0xff; 1134 /* Reduced current start cylinder, disabled */ 1135 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1136 p[11] = s->qdev.conf.cyls & 0xff; 1137 /* Device step rate [100us], 100us */ 1138 p[12] = 0; 1139 p[13] = 1; 1140 /* Device step pulse width [us], 1us */ 1141 p[14] = 1; 1142 /* Device head settle delay [100us], 100us */ 1143 p[15] = 0; 1144 p[16] = 1; 1145 /* Motor on delay [0.1s], 0.1s */ 1146 p[17] = 1; 1147 /* Motor off delay [0.1s], 0.1s */ 1148 p[18] = 1; 1149 /* Medium rotation rate [rpm], 5400 rpm */ 1150 p[26] = (5400 >> 8) & 0xff; 1151 p[27] = 5400 & 0xff; 1152 break; 1153 1154 case MODE_PAGE_CACHING: 1155 length = 0x12; 1156 if (page_control == 1 || /* Changeable Values */ 1157 blk_enable_write_cache(s->qdev.conf.blk)) { 1158 p[0] = 4; /* WCE */ 1159 } 1160 break; 1161 1162 case MODE_PAGE_R_W_ERROR: 1163 length = 10; 1164 if (page_control == 1) { /* Changeable Values */ 1165 break; 1166 } 1167 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1168 if (s->qdev.type == TYPE_ROM) { 1169 p[1] = 0x20; /* Read Retry Count */ 1170 } 1171 break; 1172 1173 case MODE_PAGE_AUDIO_CTL: 1174 length = 14; 1175 break; 1176 1177 case MODE_PAGE_CAPABILITIES: 1178 length = 0x14; 1179 if (page_control == 1) { /* Changeable Values */ 1180 break; 1181 } 1182 1183 p[0] = 0x3b; /* CD-R & CD-RW read */ 1184 p[1] = 0; /* Writing not supported */ 1185 p[2] = 0x7f; /* Audio, composite, digital out, 1186 mode 2 form 1&2, multi session */ 1187 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1188 RW corrected, C2 errors, ISRC, 1189 UPC, Bar code */ 1190 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1191 /* Locking supported, jumper present, eject, tray */ 1192 p[5] = 0; /* no volume & mute control, no 1193 changer */ 1194 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1195 p[7] = (50 * 176) & 0xff; 1196 p[8] = 2 >> 8; /* Two volume levels */ 1197 p[9] = 2 & 0xff; 1198 p[10] = 2048 >> 8; /* 2M buffer */ 1199 p[11] = 2048 & 0xff; 1200 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1201 p[13] = (16 * 176) & 0xff; 1202 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1203 p[17] = (16 * 176) & 0xff; 1204 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1205 p[19] = (16 * 176) & 0xff; 1206 break; 1207 1208 default: 1209 return -1; 1210 } 1211 1212 assert(length < 256); 1213 (*p_outbuf)[0] = page; 1214 (*p_outbuf)[1] = length; 1215 *p_outbuf += length + 2; 1216 return length + 2; 1217 } 1218 1219 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1220 { 1221 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1222 uint64_t nb_sectors; 1223 bool dbd; 1224 int page, buflen, ret, page_control; 1225 uint8_t *p; 1226 uint8_t dev_specific_param; 1227 1228 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1229 page = r->req.cmd.buf[2] & 0x3f; 1230 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1231 DPRINTF("Mode Sense(%d) (page %d, xfer %zd, page_control %d)\n", 1232 (r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 10, page, r->req.cmd.xfer, page_control); 1233 memset(outbuf, 0, r->req.cmd.xfer); 1234 p = outbuf; 1235 1236 if (s->qdev.type == TYPE_DISK) { 1237 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1238 if (blk_is_read_only(s->qdev.conf.blk)) { 1239 dev_specific_param |= 0x80; /* Readonly. */ 1240 } 1241 } else { 1242 /* MMC prescribes that CD/DVD drives have no block descriptors, 1243 * and defines no device-specific parameter. */ 1244 dev_specific_param = 0x00; 1245 dbd = true; 1246 } 1247 1248 if (r->req.cmd.buf[0] == MODE_SENSE) { 1249 p[1] = 0; /* Default media type. */ 1250 p[2] = dev_specific_param; 1251 p[3] = 0; /* Block descriptor length. */ 1252 p += 4; 1253 } else { /* MODE_SENSE_10 */ 1254 p[2] = 0; /* Default media type. */ 1255 p[3] = dev_specific_param; 1256 p[6] = p[7] = 0; /* Block descriptor length. */ 1257 p += 8; 1258 } 1259 1260 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1261 if (!dbd && nb_sectors) { 1262 if (r->req.cmd.buf[0] == MODE_SENSE) { 1263 outbuf[3] = 8; /* Block descriptor length */ 1264 } else { /* MODE_SENSE_10 */ 1265 outbuf[7] = 8; /* Block descriptor length */ 1266 } 1267 nb_sectors /= (s->qdev.blocksize / 512); 1268 if (nb_sectors > 0xffffff) { 1269 nb_sectors = 0; 1270 } 1271 p[0] = 0; /* media density code */ 1272 p[1] = (nb_sectors >> 16) & 0xff; 1273 p[2] = (nb_sectors >> 8) & 0xff; 1274 p[3] = nb_sectors & 0xff; 1275 p[4] = 0; /* reserved */ 1276 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1277 p[6] = s->qdev.blocksize >> 8; 1278 p[7] = 0; 1279 p += 8; 1280 } 1281 1282 if (page_control == 3) { 1283 /* Saved Values */ 1284 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1285 return -1; 1286 } 1287 1288 if (page == 0x3f) { 1289 for (page = 0; page <= 0x3e; page++) { 1290 mode_sense_page(s, page, &p, page_control); 1291 } 1292 } else { 1293 ret = mode_sense_page(s, page, &p, page_control); 1294 if (ret == -1) { 1295 return -1; 1296 } 1297 } 1298 1299 buflen = p - outbuf; 1300 /* 1301 * The mode data length field specifies the length in bytes of the 1302 * following data that is available to be transferred. The mode data 1303 * length does not include itself. 1304 */ 1305 if (r->req.cmd.buf[0] == MODE_SENSE) { 1306 outbuf[0] = buflen - 1; 1307 } else { /* MODE_SENSE_10 */ 1308 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1309 outbuf[1] = (buflen - 2) & 0xff; 1310 } 1311 return buflen; 1312 } 1313 1314 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1315 { 1316 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1317 int start_track, format, msf, toclen; 1318 uint64_t nb_sectors; 1319 1320 msf = req->cmd.buf[1] & 2; 1321 format = req->cmd.buf[2] & 0xf; 1322 start_track = req->cmd.buf[6]; 1323 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1324 DPRINTF("Read TOC (track %d format %d msf %d)\n", start_track, format, msf >> 1); 1325 nb_sectors /= s->qdev.blocksize / 512; 1326 switch (format) { 1327 case 0: 1328 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1329 break; 1330 case 1: 1331 /* multi session : only a single session defined */ 1332 toclen = 12; 1333 memset(outbuf, 0, 12); 1334 outbuf[1] = 0x0a; 1335 outbuf[2] = 0x01; 1336 outbuf[3] = 0x01; 1337 break; 1338 case 2: 1339 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1340 break; 1341 default: 1342 return -1; 1343 } 1344 return toclen; 1345 } 1346 1347 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1348 { 1349 SCSIRequest *req = &r->req; 1350 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1351 bool start = req->cmd.buf[4] & 1; 1352 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1353 int pwrcnd = req->cmd.buf[4] & 0xf0; 1354 1355 if (pwrcnd) { 1356 /* eject/load only happens for power condition == 0 */ 1357 return 0; 1358 } 1359 1360 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1361 if (!start && !s->tray_open && s->tray_locked) { 1362 scsi_check_condition(r, 1363 blk_is_inserted(s->qdev.conf.blk) 1364 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1365 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1366 return -1; 1367 } 1368 1369 if (s->tray_open != !start) { 1370 blk_eject(s->qdev.conf.blk, !start); 1371 s->tray_open = !start; 1372 } 1373 } 1374 return 0; 1375 } 1376 1377 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1378 { 1379 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1380 int buflen = r->iov.iov_len; 1381 1382 if (buflen) { 1383 DPRINTF("Read buf_len=%d\n", buflen); 1384 r->iov.iov_len = 0; 1385 r->started = true; 1386 scsi_req_data(&r->req, buflen); 1387 return; 1388 } 1389 1390 /* This also clears the sense buffer for REQUEST SENSE. */ 1391 scsi_req_complete(&r->req, GOOD); 1392 } 1393 1394 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1395 uint8_t *inbuf, int inlen) 1396 { 1397 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1398 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1399 uint8_t *p; 1400 int len, expected_len, changeable_len, i; 1401 1402 /* The input buffer does not include the page header, so it is 1403 * off by 2 bytes. 1404 */ 1405 expected_len = inlen + 2; 1406 if (expected_len > SCSI_MAX_MODE_LEN) { 1407 return -1; 1408 } 1409 1410 p = mode_current; 1411 memset(mode_current, 0, inlen + 2); 1412 len = mode_sense_page(s, page, &p, 0); 1413 if (len < 0 || len != expected_len) { 1414 return -1; 1415 } 1416 1417 p = mode_changeable; 1418 memset(mode_changeable, 0, inlen + 2); 1419 changeable_len = mode_sense_page(s, page, &p, 1); 1420 assert(changeable_len == len); 1421 1422 /* Check that unchangeable bits are the same as what MODE SENSE 1423 * would return. 1424 */ 1425 for (i = 2; i < len; i++) { 1426 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1427 return -1; 1428 } 1429 } 1430 return 0; 1431 } 1432 1433 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1434 { 1435 switch (page) { 1436 case MODE_PAGE_CACHING: 1437 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1438 break; 1439 1440 default: 1441 break; 1442 } 1443 } 1444 1445 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1446 { 1447 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1448 1449 while (len > 0) { 1450 int page, subpage, page_len; 1451 1452 /* Parse both possible formats for the mode page headers. */ 1453 page = p[0] & 0x3f; 1454 if (p[0] & 0x40) { 1455 if (len < 4) { 1456 goto invalid_param_len; 1457 } 1458 subpage = p[1]; 1459 page_len = lduw_be_p(&p[2]); 1460 p += 4; 1461 len -= 4; 1462 } else { 1463 if (len < 2) { 1464 goto invalid_param_len; 1465 } 1466 subpage = 0; 1467 page_len = p[1]; 1468 p += 2; 1469 len -= 2; 1470 } 1471 1472 if (subpage) { 1473 goto invalid_param; 1474 } 1475 if (page_len > len) { 1476 goto invalid_param_len; 1477 } 1478 1479 if (!change) { 1480 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1481 goto invalid_param; 1482 } 1483 } else { 1484 scsi_disk_apply_mode_select(s, page, p); 1485 } 1486 1487 p += page_len; 1488 len -= page_len; 1489 } 1490 return 0; 1491 1492 invalid_param: 1493 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1494 return -1; 1495 1496 invalid_param_len: 1497 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1498 return -1; 1499 } 1500 1501 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1502 { 1503 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1504 uint8_t *p = inbuf; 1505 int cmd = r->req.cmd.buf[0]; 1506 int len = r->req.cmd.xfer; 1507 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1508 int bd_len; 1509 int pass; 1510 1511 /* We only support PF=1, SP=0. */ 1512 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1513 goto invalid_field; 1514 } 1515 1516 if (len < hdr_len) { 1517 goto invalid_param_len; 1518 } 1519 1520 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1521 len -= hdr_len; 1522 p += hdr_len; 1523 if (len < bd_len) { 1524 goto invalid_param_len; 1525 } 1526 if (bd_len != 0 && bd_len != 8) { 1527 goto invalid_param; 1528 } 1529 1530 len -= bd_len; 1531 p += bd_len; 1532 1533 /* Ensure no change is made if there is an error! */ 1534 for (pass = 0; pass < 2; pass++) { 1535 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1536 assert(pass == 0); 1537 return; 1538 } 1539 } 1540 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1541 /* The request is used as the AIO opaque value, so add a ref. */ 1542 scsi_req_ref(&r->req); 1543 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1544 BLOCK_ACCT_FLUSH); 1545 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1546 return; 1547 } 1548 1549 scsi_req_complete(&r->req, GOOD); 1550 return; 1551 1552 invalid_param: 1553 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1554 return; 1555 1556 invalid_param_len: 1557 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1558 return; 1559 1560 invalid_field: 1561 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1562 } 1563 1564 static inline bool check_lba_range(SCSIDiskState *s, 1565 uint64_t sector_num, uint32_t nb_sectors) 1566 { 1567 /* 1568 * The first line tests that no overflow happens when computing the last 1569 * sector. The second line tests that the last accessed sector is in 1570 * range. 1571 * 1572 * Careful, the computations should not underflow for nb_sectors == 0, 1573 * and a 0-block read to the first LBA beyond the end of device is 1574 * valid. 1575 */ 1576 return (sector_num <= sector_num + nb_sectors && 1577 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1578 } 1579 1580 typedef struct UnmapCBData { 1581 SCSIDiskReq *r; 1582 uint8_t *inbuf; 1583 int count; 1584 } UnmapCBData; 1585 1586 static void scsi_unmap_complete(void *opaque, int ret); 1587 1588 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1589 { 1590 SCSIDiskReq *r = data->r; 1591 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1592 uint64_t sector_num; 1593 uint32_t nb_sectors; 1594 1595 assert(r->req.aiocb == NULL); 1596 if (scsi_disk_req_check_error(r, ret, false)) { 1597 goto done; 1598 } 1599 1600 if (data->count > 0) { 1601 sector_num = ldq_be_p(&data->inbuf[0]); 1602 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1603 if (!check_lba_range(s, sector_num, nb_sectors)) { 1604 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1605 goto done; 1606 } 1607 1608 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1609 sector_num * s->qdev.blocksize, 1610 nb_sectors * s->qdev.blocksize, 1611 scsi_unmap_complete, data); 1612 data->count--; 1613 data->inbuf += 16; 1614 return; 1615 } 1616 1617 scsi_req_complete(&r->req, GOOD); 1618 1619 done: 1620 scsi_req_unref(&r->req); 1621 g_free(data); 1622 } 1623 1624 static void scsi_unmap_complete(void *opaque, int ret) 1625 { 1626 UnmapCBData *data = opaque; 1627 SCSIDiskReq *r = data->r; 1628 1629 assert(r->req.aiocb != NULL); 1630 r->req.aiocb = NULL; 1631 1632 scsi_unmap_complete_noio(data, ret); 1633 } 1634 1635 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1636 { 1637 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1638 uint8_t *p = inbuf; 1639 int len = r->req.cmd.xfer; 1640 UnmapCBData *data; 1641 1642 /* Reject ANCHOR=1. */ 1643 if (r->req.cmd.buf[1] & 0x1) { 1644 goto invalid_field; 1645 } 1646 1647 if (len < 8) { 1648 goto invalid_param_len; 1649 } 1650 if (len < lduw_be_p(&p[0]) + 2) { 1651 goto invalid_param_len; 1652 } 1653 if (len < lduw_be_p(&p[2]) + 8) { 1654 goto invalid_param_len; 1655 } 1656 if (lduw_be_p(&p[2]) & 15) { 1657 goto invalid_param_len; 1658 } 1659 1660 if (blk_is_read_only(s->qdev.conf.blk)) { 1661 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1662 return; 1663 } 1664 1665 data = g_new0(UnmapCBData, 1); 1666 data->r = r; 1667 data->inbuf = &p[8]; 1668 data->count = lduw_be_p(&p[2]) >> 4; 1669 1670 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1671 scsi_req_ref(&r->req); 1672 scsi_unmap_complete_noio(data, 0); 1673 return; 1674 1675 invalid_param_len: 1676 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1677 return; 1678 1679 invalid_field: 1680 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1681 } 1682 1683 typedef struct WriteSameCBData { 1684 SCSIDiskReq *r; 1685 int64_t sector; 1686 int nb_sectors; 1687 QEMUIOVector qiov; 1688 struct iovec iov; 1689 } WriteSameCBData; 1690 1691 static void scsi_write_same_complete(void *opaque, int ret) 1692 { 1693 WriteSameCBData *data = opaque; 1694 SCSIDiskReq *r = data->r; 1695 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1696 1697 assert(r->req.aiocb != NULL); 1698 r->req.aiocb = NULL; 1699 if (scsi_disk_req_check_error(r, ret, true)) { 1700 goto done; 1701 } 1702 1703 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1704 1705 data->nb_sectors -= data->iov.iov_len / 512; 1706 data->sector += data->iov.iov_len / 512; 1707 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1708 if (data->iov.iov_len) { 1709 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1710 data->iov.iov_len, BLOCK_ACCT_WRITE); 1711 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1712 * where final qiov may need smaller size */ 1713 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1714 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1715 data->sector << BDRV_SECTOR_BITS, 1716 &data->qiov, 0, 1717 scsi_write_same_complete, data); 1718 return; 1719 } 1720 1721 scsi_req_complete(&r->req, GOOD); 1722 1723 done: 1724 scsi_req_unref(&r->req); 1725 qemu_vfree(data->iov.iov_base); 1726 g_free(data); 1727 } 1728 1729 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1730 { 1731 SCSIRequest *req = &r->req; 1732 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1733 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1734 WriteSameCBData *data; 1735 uint8_t *buf; 1736 int i; 1737 1738 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1739 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1740 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1741 return; 1742 } 1743 1744 if (blk_is_read_only(s->qdev.conf.blk)) { 1745 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1746 return; 1747 } 1748 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1749 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1750 return; 1751 } 1752 1753 if (buffer_is_zero(inbuf, s->qdev.blocksize)) { 1754 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1755 1756 /* The request is used as the AIO opaque value, so add a ref. */ 1757 scsi_req_ref(&r->req); 1758 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1759 nb_sectors * s->qdev.blocksize, 1760 BLOCK_ACCT_WRITE); 1761 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1762 r->req.cmd.lba * s->qdev.blocksize, 1763 nb_sectors * s->qdev.blocksize, 1764 flags, scsi_aio_complete, r); 1765 return; 1766 } 1767 1768 data = g_new0(WriteSameCBData, 1); 1769 data->r = r; 1770 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1771 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1772 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1773 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1774 data->iov.iov_len); 1775 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1776 1777 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1778 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1779 } 1780 1781 scsi_req_ref(&r->req); 1782 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1783 data->iov.iov_len, BLOCK_ACCT_WRITE); 1784 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1785 data->sector << BDRV_SECTOR_BITS, 1786 &data->qiov, 0, 1787 scsi_write_same_complete, data); 1788 } 1789 1790 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1791 { 1792 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1793 1794 if (r->iov.iov_len) { 1795 int buflen = r->iov.iov_len; 1796 DPRINTF("Write buf_len=%d\n", buflen); 1797 r->iov.iov_len = 0; 1798 scsi_req_data(&r->req, buflen); 1799 return; 1800 } 1801 1802 switch (req->cmd.buf[0]) { 1803 case MODE_SELECT: 1804 case MODE_SELECT_10: 1805 /* This also clears the sense buffer for REQUEST SENSE. */ 1806 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1807 break; 1808 1809 case UNMAP: 1810 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1811 break; 1812 1813 case VERIFY_10: 1814 case VERIFY_12: 1815 case VERIFY_16: 1816 if (r->req.status == -1) { 1817 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1818 } 1819 break; 1820 1821 case WRITE_SAME_10: 1822 case WRITE_SAME_16: 1823 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1824 break; 1825 1826 default: 1827 abort(); 1828 } 1829 } 1830 1831 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1832 { 1833 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1834 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1835 uint64_t nb_sectors; 1836 uint8_t *outbuf; 1837 int buflen; 1838 1839 switch (req->cmd.buf[0]) { 1840 case INQUIRY: 1841 case MODE_SENSE: 1842 case MODE_SENSE_10: 1843 case RESERVE: 1844 case RESERVE_10: 1845 case RELEASE: 1846 case RELEASE_10: 1847 case START_STOP: 1848 case ALLOW_MEDIUM_REMOVAL: 1849 case GET_CONFIGURATION: 1850 case GET_EVENT_STATUS_NOTIFICATION: 1851 case MECHANISM_STATUS: 1852 case REQUEST_SENSE: 1853 break; 1854 1855 default: 1856 if (!blk_is_available(s->qdev.conf.blk)) { 1857 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1858 return 0; 1859 } 1860 break; 1861 } 1862 1863 /* 1864 * FIXME: we shouldn't return anything bigger than 4k, but the code 1865 * requires the buffer to be as big as req->cmd.xfer in several 1866 * places. So, do not allow CDBs with a very large ALLOCATION 1867 * LENGTH. The real fix would be to modify scsi_read_data and 1868 * dma_buf_read, so that they return data beyond the buflen 1869 * as all zeros. 1870 */ 1871 if (req->cmd.xfer > 65536) { 1872 goto illegal_request; 1873 } 1874 r->buflen = MAX(4096, req->cmd.xfer); 1875 1876 if (!r->iov.iov_base) { 1877 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1878 } 1879 1880 buflen = req->cmd.xfer; 1881 outbuf = r->iov.iov_base; 1882 memset(outbuf, 0, r->buflen); 1883 switch (req->cmd.buf[0]) { 1884 case TEST_UNIT_READY: 1885 assert(blk_is_available(s->qdev.conf.blk)); 1886 break; 1887 case INQUIRY: 1888 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1889 if (buflen < 0) { 1890 goto illegal_request; 1891 } 1892 break; 1893 case MODE_SENSE: 1894 case MODE_SENSE_10: 1895 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1896 if (buflen < 0) { 1897 goto illegal_request; 1898 } 1899 break; 1900 case READ_TOC: 1901 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1902 if (buflen < 0) { 1903 goto illegal_request; 1904 } 1905 break; 1906 case RESERVE: 1907 if (req->cmd.buf[1] & 1) { 1908 goto illegal_request; 1909 } 1910 break; 1911 case RESERVE_10: 1912 if (req->cmd.buf[1] & 3) { 1913 goto illegal_request; 1914 } 1915 break; 1916 case RELEASE: 1917 if (req->cmd.buf[1] & 1) { 1918 goto illegal_request; 1919 } 1920 break; 1921 case RELEASE_10: 1922 if (req->cmd.buf[1] & 3) { 1923 goto illegal_request; 1924 } 1925 break; 1926 case START_STOP: 1927 if (scsi_disk_emulate_start_stop(r) < 0) { 1928 return 0; 1929 } 1930 break; 1931 case ALLOW_MEDIUM_REMOVAL: 1932 s->tray_locked = req->cmd.buf[4] & 1; 1933 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1934 break; 1935 case READ_CAPACITY_10: 1936 /* The normal LEN field for this command is zero. */ 1937 memset(outbuf, 0, 8); 1938 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1939 if (!nb_sectors) { 1940 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1941 return 0; 1942 } 1943 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1944 goto illegal_request; 1945 } 1946 nb_sectors /= s->qdev.blocksize / 512; 1947 /* Returned value is the address of the last sector. */ 1948 nb_sectors--; 1949 /* Remember the new size for read/write sanity checking. */ 1950 s->qdev.max_lba = nb_sectors; 1951 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 1952 if (nb_sectors > UINT32_MAX) { 1953 nb_sectors = UINT32_MAX; 1954 } 1955 outbuf[0] = (nb_sectors >> 24) & 0xff; 1956 outbuf[1] = (nb_sectors >> 16) & 0xff; 1957 outbuf[2] = (nb_sectors >> 8) & 0xff; 1958 outbuf[3] = nb_sectors & 0xff; 1959 outbuf[4] = 0; 1960 outbuf[5] = 0; 1961 outbuf[6] = s->qdev.blocksize >> 8; 1962 outbuf[7] = 0; 1963 break; 1964 case REQUEST_SENSE: 1965 /* Just return "NO SENSE". */ 1966 buflen = scsi_build_sense(NULL, 0, outbuf, r->buflen, 1967 (req->cmd.buf[1] & 1) == 0); 1968 if (buflen < 0) { 1969 goto illegal_request; 1970 } 1971 break; 1972 case MECHANISM_STATUS: 1973 buflen = scsi_emulate_mechanism_status(s, outbuf); 1974 if (buflen < 0) { 1975 goto illegal_request; 1976 } 1977 break; 1978 case GET_CONFIGURATION: 1979 buflen = scsi_get_configuration(s, outbuf); 1980 if (buflen < 0) { 1981 goto illegal_request; 1982 } 1983 break; 1984 case GET_EVENT_STATUS_NOTIFICATION: 1985 buflen = scsi_get_event_status_notification(s, r, outbuf); 1986 if (buflen < 0) { 1987 goto illegal_request; 1988 } 1989 break; 1990 case READ_DISC_INFORMATION: 1991 buflen = scsi_read_disc_information(s, r, outbuf); 1992 if (buflen < 0) { 1993 goto illegal_request; 1994 } 1995 break; 1996 case READ_DVD_STRUCTURE: 1997 buflen = scsi_read_dvd_structure(s, r, outbuf); 1998 if (buflen < 0) { 1999 goto illegal_request; 2000 } 2001 break; 2002 case SERVICE_ACTION_IN_16: 2003 /* Service Action In subcommands. */ 2004 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2005 DPRINTF("SAI READ CAPACITY(16)\n"); 2006 memset(outbuf, 0, req->cmd.xfer); 2007 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2008 if (!nb_sectors) { 2009 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2010 return 0; 2011 } 2012 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2013 goto illegal_request; 2014 } 2015 nb_sectors /= s->qdev.blocksize / 512; 2016 /* Returned value is the address of the last sector. */ 2017 nb_sectors--; 2018 /* Remember the new size for read/write sanity checking. */ 2019 s->qdev.max_lba = nb_sectors; 2020 outbuf[0] = (nb_sectors >> 56) & 0xff; 2021 outbuf[1] = (nb_sectors >> 48) & 0xff; 2022 outbuf[2] = (nb_sectors >> 40) & 0xff; 2023 outbuf[3] = (nb_sectors >> 32) & 0xff; 2024 outbuf[4] = (nb_sectors >> 24) & 0xff; 2025 outbuf[5] = (nb_sectors >> 16) & 0xff; 2026 outbuf[6] = (nb_sectors >> 8) & 0xff; 2027 outbuf[7] = nb_sectors & 0xff; 2028 outbuf[8] = 0; 2029 outbuf[9] = 0; 2030 outbuf[10] = s->qdev.blocksize >> 8; 2031 outbuf[11] = 0; 2032 outbuf[12] = 0; 2033 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2034 2035 /* set TPE bit if the format supports discard */ 2036 if (s->qdev.conf.discard_granularity) { 2037 outbuf[14] = 0x80; 2038 } 2039 2040 /* Protection, exponent and lowest lba field left blank. */ 2041 break; 2042 } 2043 DPRINTF("Unsupported Service Action In\n"); 2044 goto illegal_request; 2045 case SYNCHRONIZE_CACHE: 2046 /* The request is used as the AIO opaque value, so add a ref. */ 2047 scsi_req_ref(&r->req); 2048 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2049 BLOCK_ACCT_FLUSH); 2050 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2051 return 0; 2052 case SEEK_10: 2053 DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba); 2054 if (r->req.cmd.lba > s->qdev.max_lba) { 2055 goto illegal_lba; 2056 } 2057 break; 2058 case MODE_SELECT: 2059 DPRINTF("Mode Select(6) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2060 break; 2061 case MODE_SELECT_10: 2062 DPRINTF("Mode Select(10) (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2063 break; 2064 case UNMAP: 2065 DPRINTF("Unmap (len %lu)\n", (unsigned long)r->req.cmd.xfer); 2066 break; 2067 case VERIFY_10: 2068 case VERIFY_12: 2069 case VERIFY_16: 2070 DPRINTF("Verify (bytchk %d)\n", (req->cmd.buf[1] >> 1) & 3); 2071 if (req->cmd.buf[1] & 6) { 2072 goto illegal_request; 2073 } 2074 break; 2075 case WRITE_SAME_10: 2076 case WRITE_SAME_16: 2077 DPRINTF("WRITE SAME %d (len %lu)\n", 2078 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, 2079 (unsigned long)r->req.cmd.xfer); 2080 break; 2081 default: 2082 DPRINTF("Unknown SCSI command (%2.2x=%s)\n", buf[0], 2083 scsi_command_name(buf[0])); 2084 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2085 return 0; 2086 } 2087 assert(!r->req.aiocb); 2088 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2089 if (r->iov.iov_len == 0) { 2090 scsi_req_complete(&r->req, GOOD); 2091 } 2092 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2093 assert(r->iov.iov_len == req->cmd.xfer); 2094 return -r->iov.iov_len; 2095 } else { 2096 return r->iov.iov_len; 2097 } 2098 2099 illegal_request: 2100 if (r->req.status == -1) { 2101 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2102 } 2103 return 0; 2104 2105 illegal_lba: 2106 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2107 return 0; 2108 } 2109 2110 /* Execute a scsi command. Returns the length of the data expected by the 2111 command. This will be Positive for data transfers from the device 2112 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2113 and zero if the command does not transfer any data. */ 2114 2115 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2116 { 2117 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2118 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2119 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2120 uint32_t len; 2121 uint8_t command; 2122 2123 command = buf[0]; 2124 2125 if (!blk_is_available(s->qdev.conf.blk)) { 2126 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2127 return 0; 2128 } 2129 2130 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2131 switch (command) { 2132 case READ_6: 2133 case READ_10: 2134 case READ_12: 2135 case READ_16: 2136 DPRINTF("Read (sector %" PRId64 ", count %u)\n", r->req.cmd.lba, len); 2137 if (r->req.cmd.buf[1] & 0xe0) { 2138 goto illegal_request; 2139 } 2140 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2141 goto illegal_lba; 2142 } 2143 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2144 r->sector_count = len * (s->qdev.blocksize / 512); 2145 break; 2146 case WRITE_6: 2147 case WRITE_10: 2148 case WRITE_12: 2149 case WRITE_16: 2150 case WRITE_VERIFY_10: 2151 case WRITE_VERIFY_12: 2152 case WRITE_VERIFY_16: 2153 if (blk_is_read_only(s->qdev.conf.blk)) { 2154 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2155 return 0; 2156 } 2157 DPRINTF("Write %s(sector %" PRId64 ", count %u)\n", 2158 (command & 0xe) == 0xe ? "And Verify " : "", 2159 r->req.cmd.lba, len); 2160 case VERIFY_10: 2161 case VERIFY_12: 2162 case VERIFY_16: 2163 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2164 * As far as DMA is concerned, we can treat it the same as a write; 2165 * scsi_block_do_sgio will send VERIFY commands. 2166 */ 2167 if (r->req.cmd.buf[1] & 0xe0) { 2168 goto illegal_request; 2169 } 2170 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2171 goto illegal_lba; 2172 } 2173 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2174 r->sector_count = len * (s->qdev.blocksize / 512); 2175 break; 2176 default: 2177 abort(); 2178 illegal_request: 2179 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2180 return 0; 2181 illegal_lba: 2182 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2183 return 0; 2184 } 2185 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2186 if (r->sector_count == 0) { 2187 scsi_req_complete(&r->req, GOOD); 2188 } 2189 assert(r->iov.iov_len == 0); 2190 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2191 return -r->sector_count * 512; 2192 } else { 2193 return r->sector_count * 512; 2194 } 2195 } 2196 2197 static void scsi_disk_reset(DeviceState *dev) 2198 { 2199 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2200 uint64_t nb_sectors; 2201 2202 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2203 2204 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2205 nb_sectors /= s->qdev.blocksize / 512; 2206 if (nb_sectors) { 2207 nb_sectors--; 2208 } 2209 s->qdev.max_lba = nb_sectors; 2210 /* reset tray statuses */ 2211 s->tray_locked = 0; 2212 s->tray_open = 0; 2213 } 2214 2215 static void scsi_disk_resize_cb(void *opaque) 2216 { 2217 SCSIDiskState *s = opaque; 2218 2219 /* SPC lists this sense code as available only for 2220 * direct-access devices. 2221 */ 2222 if (s->qdev.type == TYPE_DISK) { 2223 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2224 } 2225 } 2226 2227 static void scsi_cd_change_media_cb(void *opaque, bool load) 2228 { 2229 SCSIDiskState *s = opaque; 2230 2231 /* 2232 * When a CD gets changed, we have to report an ejected state and 2233 * then a loaded state to guests so that they detect tray 2234 * open/close and media change events. Guests that do not use 2235 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2236 * states rely on this behavior. 2237 * 2238 * media_changed governs the state machine used for unit attention 2239 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2240 */ 2241 s->media_changed = load; 2242 s->tray_open = !load; 2243 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2244 s->media_event = true; 2245 s->eject_request = false; 2246 } 2247 2248 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2249 { 2250 SCSIDiskState *s = opaque; 2251 2252 s->eject_request = true; 2253 if (force) { 2254 s->tray_locked = false; 2255 } 2256 } 2257 2258 static bool scsi_cd_is_tray_open(void *opaque) 2259 { 2260 return ((SCSIDiskState *)opaque)->tray_open; 2261 } 2262 2263 static bool scsi_cd_is_medium_locked(void *opaque) 2264 { 2265 return ((SCSIDiskState *)opaque)->tray_locked; 2266 } 2267 2268 static const BlockDevOps scsi_disk_removable_block_ops = { 2269 .change_media_cb = scsi_cd_change_media_cb, 2270 .eject_request_cb = scsi_cd_eject_request_cb, 2271 .is_tray_open = scsi_cd_is_tray_open, 2272 .is_medium_locked = scsi_cd_is_medium_locked, 2273 2274 .resize_cb = scsi_disk_resize_cb, 2275 }; 2276 2277 static const BlockDevOps scsi_disk_block_ops = { 2278 .resize_cb = scsi_disk_resize_cb, 2279 }; 2280 2281 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2282 { 2283 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2284 if (s->media_changed) { 2285 s->media_changed = false; 2286 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2287 } 2288 } 2289 2290 static void scsi_realize(SCSIDevice *dev, Error **errp) 2291 { 2292 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2293 Error *err = NULL; 2294 2295 if (!s->qdev.conf.blk) { 2296 error_setg(errp, "drive property not set"); 2297 return; 2298 } 2299 2300 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2301 !blk_is_inserted(s->qdev.conf.blk)) { 2302 error_setg(errp, "Device needs media, but drive is empty"); 2303 return; 2304 } 2305 2306 blkconf_serial(&s->qdev.conf, &s->serial); 2307 blkconf_blocksizes(&s->qdev.conf); 2308 if (dev->type == TYPE_DISK) { 2309 blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, &err); 2310 if (err) { 2311 error_propagate(errp, err); 2312 return; 2313 } 2314 } 2315 blkconf_apply_backend_options(&dev->conf); 2316 2317 if (s->qdev.conf.discard_granularity == -1) { 2318 s->qdev.conf.discard_granularity = 2319 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2320 } 2321 2322 if (!s->version) { 2323 s->version = g_strdup(qemu_hw_version()); 2324 } 2325 if (!s->vendor) { 2326 s->vendor = g_strdup("QEMU"); 2327 } 2328 2329 if (blk_is_sg(s->qdev.conf.blk)) { 2330 error_setg(errp, "unwanted /dev/sg*"); 2331 return; 2332 } 2333 2334 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2335 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2336 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2337 } else { 2338 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2339 } 2340 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2341 2342 blk_iostatus_enable(s->qdev.conf.blk); 2343 } 2344 2345 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2346 { 2347 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2348 /* can happen for devices without drive. The error message for missing 2349 * backend will be issued in scsi_realize 2350 */ 2351 if (s->qdev.conf.blk) { 2352 blkconf_blocksizes(&s->qdev.conf); 2353 } 2354 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2355 s->qdev.type = TYPE_DISK; 2356 if (!s->product) { 2357 s->product = g_strdup("QEMU HARDDISK"); 2358 } 2359 scsi_realize(&s->qdev, errp); 2360 } 2361 2362 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2363 { 2364 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2365 2366 if (!dev->conf.blk) { 2367 dev->conf.blk = blk_new(); 2368 } 2369 2370 s->qdev.blocksize = 2048; 2371 s->qdev.type = TYPE_ROM; 2372 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2373 if (!s->product) { 2374 s->product = g_strdup("QEMU CD-ROM"); 2375 } 2376 scsi_realize(&s->qdev, errp); 2377 } 2378 2379 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2380 { 2381 DriveInfo *dinfo; 2382 Error *local_err = NULL; 2383 2384 if (!dev->conf.blk) { 2385 scsi_realize(dev, &local_err); 2386 assert(local_err); 2387 error_propagate(errp, local_err); 2388 return; 2389 } 2390 2391 dinfo = blk_legacy_dinfo(dev->conf.blk); 2392 if (dinfo && dinfo->media_cd) { 2393 scsi_cd_realize(dev, errp); 2394 } else { 2395 scsi_hd_realize(dev, errp); 2396 } 2397 } 2398 2399 static const SCSIReqOps scsi_disk_emulate_reqops = { 2400 .size = sizeof(SCSIDiskReq), 2401 .free_req = scsi_free_request, 2402 .send_command = scsi_disk_emulate_command, 2403 .read_data = scsi_disk_emulate_read_data, 2404 .write_data = scsi_disk_emulate_write_data, 2405 .get_buf = scsi_get_buf, 2406 }; 2407 2408 static const SCSIReqOps scsi_disk_dma_reqops = { 2409 .size = sizeof(SCSIDiskReq), 2410 .free_req = scsi_free_request, 2411 .send_command = scsi_disk_dma_command, 2412 .read_data = scsi_read_data, 2413 .write_data = scsi_write_data, 2414 .get_buf = scsi_get_buf, 2415 .load_request = scsi_disk_load_request, 2416 .save_request = scsi_disk_save_request, 2417 }; 2418 2419 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2420 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2421 [INQUIRY] = &scsi_disk_emulate_reqops, 2422 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2423 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2424 [START_STOP] = &scsi_disk_emulate_reqops, 2425 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2426 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2427 [READ_TOC] = &scsi_disk_emulate_reqops, 2428 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2429 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2430 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2431 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2432 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2433 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2434 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2435 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2436 [SEEK_10] = &scsi_disk_emulate_reqops, 2437 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2438 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2439 [UNMAP] = &scsi_disk_emulate_reqops, 2440 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2441 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2442 [VERIFY_10] = &scsi_disk_emulate_reqops, 2443 [VERIFY_12] = &scsi_disk_emulate_reqops, 2444 [VERIFY_16] = &scsi_disk_emulate_reqops, 2445 2446 [READ_6] = &scsi_disk_dma_reqops, 2447 [READ_10] = &scsi_disk_dma_reqops, 2448 [READ_12] = &scsi_disk_dma_reqops, 2449 [READ_16] = &scsi_disk_dma_reqops, 2450 [WRITE_6] = &scsi_disk_dma_reqops, 2451 [WRITE_10] = &scsi_disk_dma_reqops, 2452 [WRITE_12] = &scsi_disk_dma_reqops, 2453 [WRITE_16] = &scsi_disk_dma_reqops, 2454 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2455 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2456 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2457 }; 2458 2459 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2460 uint8_t *buf, void *hba_private) 2461 { 2462 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2463 SCSIRequest *req; 2464 const SCSIReqOps *ops; 2465 uint8_t command; 2466 2467 command = buf[0]; 2468 ops = scsi_disk_reqops_dispatch[command]; 2469 if (!ops) { 2470 ops = &scsi_disk_emulate_reqops; 2471 } 2472 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2473 2474 #ifdef DEBUG_SCSI 2475 DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]); 2476 { 2477 int i; 2478 for (i = 1; i < scsi_cdb_length(buf); i++) { 2479 printf(" 0x%02x", buf[i]); 2480 } 2481 printf("\n"); 2482 } 2483 #endif 2484 2485 return req; 2486 } 2487 2488 #ifdef __linux__ 2489 static int get_device_type(SCSIDiskState *s) 2490 { 2491 uint8_t cmd[16]; 2492 uint8_t buf[36]; 2493 uint8_t sensebuf[8]; 2494 sg_io_hdr_t io_header; 2495 int ret; 2496 2497 memset(cmd, 0, sizeof(cmd)); 2498 memset(buf, 0, sizeof(buf)); 2499 cmd[0] = INQUIRY; 2500 cmd[4] = sizeof(buf); 2501 2502 memset(&io_header, 0, sizeof(io_header)); 2503 io_header.interface_id = 'S'; 2504 io_header.dxfer_direction = SG_DXFER_FROM_DEV; 2505 io_header.dxfer_len = sizeof(buf); 2506 io_header.dxferp = buf; 2507 io_header.cmdp = cmd; 2508 io_header.cmd_len = sizeof(cmd); 2509 io_header.mx_sb_len = sizeof(sensebuf); 2510 io_header.sbp = sensebuf; 2511 io_header.timeout = 6000; /* XXX */ 2512 2513 ret = blk_ioctl(s->qdev.conf.blk, SG_IO, &io_header); 2514 if (ret < 0 || io_header.driver_status || io_header.host_status) { 2515 return -1; 2516 } 2517 s->qdev.type = buf[0]; 2518 if (buf[1] & 0x80) { 2519 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2520 } 2521 return 0; 2522 } 2523 2524 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2525 { 2526 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2527 int sg_version; 2528 int rc; 2529 2530 if (!s->qdev.conf.blk) { 2531 error_setg(errp, "drive property not set"); 2532 return; 2533 } 2534 2535 /* check we are using a driver managing SG_IO (version 3 and after) */ 2536 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2537 if (rc < 0) { 2538 error_setg(errp, "cannot get SG_IO version number: %s. " 2539 "Is this a SCSI device?", 2540 strerror(-rc)); 2541 return; 2542 } 2543 if (sg_version < 30000) { 2544 error_setg(errp, "scsi generic interface too old"); 2545 return; 2546 } 2547 2548 /* get device type from INQUIRY data */ 2549 rc = get_device_type(s); 2550 if (rc < 0) { 2551 error_setg(errp, "INQUIRY failed"); 2552 return; 2553 } 2554 2555 /* Make a guess for the block size, we'll fix it when the guest sends. 2556 * READ CAPACITY. If they don't, they likely would assume these sizes 2557 * anyway. (TODO: check in /sys). 2558 */ 2559 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2560 s->qdev.blocksize = 2048; 2561 } else { 2562 s->qdev.blocksize = 512; 2563 } 2564 2565 /* Makes the scsi-block device not removable by using HMP and QMP eject 2566 * command. 2567 */ 2568 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2569 2570 scsi_realize(&s->qdev, errp); 2571 scsi_generic_read_device_identification(&s->qdev); 2572 } 2573 2574 typedef struct SCSIBlockReq { 2575 SCSIDiskReq req; 2576 sg_io_hdr_t io_header; 2577 2578 /* Selected bytes of the original CDB, copied into our own CDB. */ 2579 uint8_t cmd, cdb1, group_number; 2580 2581 /* CDB passed to SG_IO. */ 2582 uint8_t cdb[16]; 2583 } SCSIBlockReq; 2584 2585 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2586 int64_t offset, QEMUIOVector *iov, 2587 int direction, 2588 BlockCompletionFunc *cb, void *opaque) 2589 { 2590 sg_io_hdr_t *io_header = &req->io_header; 2591 SCSIDiskReq *r = &req->req; 2592 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2593 int nb_logical_blocks; 2594 uint64_t lba; 2595 BlockAIOCB *aiocb; 2596 2597 /* This is not supported yet. It can only happen if the guest does 2598 * reads and writes that are not aligned to one logical sectors 2599 * _and_ cover multiple MemoryRegions. 2600 */ 2601 assert(offset % s->qdev.blocksize == 0); 2602 assert(iov->size % s->qdev.blocksize == 0); 2603 2604 io_header->interface_id = 'S'; 2605 2606 /* The data transfer comes from the QEMUIOVector. */ 2607 io_header->dxfer_direction = direction; 2608 io_header->dxfer_len = iov->size; 2609 io_header->dxferp = (void *)iov->iov; 2610 io_header->iovec_count = iov->niov; 2611 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2612 2613 /* Build a new CDB with the LBA and length patched in, in case 2614 * DMA helpers split the transfer in multiple segments. Do not 2615 * build a CDB smaller than what the guest wanted, and only build 2616 * a larger one if strictly necessary. 2617 */ 2618 io_header->cmdp = req->cdb; 2619 lba = offset / s->qdev.blocksize; 2620 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2621 2622 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2623 /* 6-byte CDB */ 2624 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2625 req->cdb[4] = nb_logical_blocks; 2626 req->cdb[5] = 0; 2627 io_header->cmd_len = 6; 2628 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2629 /* 10-byte CDB */ 2630 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2631 req->cdb[1] = req->cdb1; 2632 stl_be_p(&req->cdb[2], lba); 2633 req->cdb[6] = req->group_number; 2634 stw_be_p(&req->cdb[7], nb_logical_blocks); 2635 req->cdb[9] = 0; 2636 io_header->cmd_len = 10; 2637 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2638 /* 12-byte CDB */ 2639 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2640 req->cdb[1] = req->cdb1; 2641 stl_be_p(&req->cdb[2], lba); 2642 stl_be_p(&req->cdb[6], nb_logical_blocks); 2643 req->cdb[10] = req->group_number; 2644 req->cdb[11] = 0; 2645 io_header->cmd_len = 12; 2646 } else { 2647 /* 16-byte CDB */ 2648 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2649 req->cdb[1] = req->cdb1; 2650 stq_be_p(&req->cdb[2], lba); 2651 stl_be_p(&req->cdb[10], nb_logical_blocks); 2652 req->cdb[14] = req->group_number; 2653 req->cdb[15] = 0; 2654 io_header->cmd_len = 16; 2655 } 2656 2657 /* The rest is as in scsi-generic.c. */ 2658 io_header->mx_sb_len = sizeof(r->req.sense); 2659 io_header->sbp = r->req.sense; 2660 io_header->timeout = UINT_MAX; 2661 io_header->usr_ptr = r; 2662 io_header->flags |= SG_FLAG_DIRECT_IO; 2663 2664 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2665 assert(aiocb != NULL); 2666 return aiocb; 2667 } 2668 2669 static bool scsi_block_no_fua(SCSICommand *cmd) 2670 { 2671 return false; 2672 } 2673 2674 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2675 QEMUIOVector *iov, 2676 BlockCompletionFunc *cb, void *cb_opaque, 2677 void *opaque) 2678 { 2679 SCSIBlockReq *r = opaque; 2680 return scsi_block_do_sgio(r, offset, iov, 2681 SG_DXFER_FROM_DEV, cb, cb_opaque); 2682 } 2683 2684 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2685 QEMUIOVector *iov, 2686 BlockCompletionFunc *cb, void *cb_opaque, 2687 void *opaque) 2688 { 2689 SCSIBlockReq *r = opaque; 2690 return scsi_block_do_sgio(r, offset, iov, 2691 SG_DXFER_TO_DEV, cb, cb_opaque); 2692 } 2693 2694 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2695 { 2696 switch (buf[0]) { 2697 case VERIFY_10: 2698 case VERIFY_12: 2699 case VERIFY_16: 2700 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2701 * for the number of logical blocks specified in the length 2702 * field). For other modes, do not use scatter/gather operation. 2703 */ 2704 if ((buf[1] & 6) == 2) { 2705 return false; 2706 } 2707 break; 2708 2709 case READ_6: 2710 case READ_10: 2711 case READ_12: 2712 case READ_16: 2713 case WRITE_6: 2714 case WRITE_10: 2715 case WRITE_12: 2716 case WRITE_16: 2717 case WRITE_VERIFY_10: 2718 case WRITE_VERIFY_12: 2719 case WRITE_VERIFY_16: 2720 /* MMC writing cannot be done via DMA helpers, because it sometimes 2721 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2722 * We might use scsi_block_dma_reqops as long as no writing commands are 2723 * seen, but performance usually isn't paramount on optical media. So, 2724 * just make scsi-block operate the same as scsi-generic for them. 2725 */ 2726 if (s->qdev.type != TYPE_ROM) { 2727 return false; 2728 } 2729 break; 2730 2731 default: 2732 break; 2733 } 2734 2735 return true; 2736 } 2737 2738 2739 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2740 { 2741 SCSIBlockReq *r = (SCSIBlockReq *)req; 2742 r->cmd = req->cmd.buf[0]; 2743 switch (r->cmd >> 5) { 2744 case 0: 2745 /* 6-byte CDB. */ 2746 r->cdb1 = r->group_number = 0; 2747 break; 2748 case 1: 2749 /* 10-byte CDB. */ 2750 r->cdb1 = req->cmd.buf[1]; 2751 r->group_number = req->cmd.buf[6]; 2752 break; 2753 case 4: 2754 /* 12-byte CDB. */ 2755 r->cdb1 = req->cmd.buf[1]; 2756 r->group_number = req->cmd.buf[10]; 2757 break; 2758 case 5: 2759 /* 16-byte CDB. */ 2760 r->cdb1 = req->cmd.buf[1]; 2761 r->group_number = req->cmd.buf[14]; 2762 break; 2763 default: 2764 abort(); 2765 } 2766 2767 if (r->cdb1 & 0xe0) { 2768 /* Protection information is not supported. */ 2769 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2770 return 0; 2771 } 2772 2773 r->req.status = &r->io_header.status; 2774 return scsi_disk_dma_command(req, buf); 2775 } 2776 2777 static const SCSIReqOps scsi_block_dma_reqops = { 2778 .size = sizeof(SCSIBlockReq), 2779 .free_req = scsi_free_request, 2780 .send_command = scsi_block_dma_command, 2781 .read_data = scsi_read_data, 2782 .write_data = scsi_write_data, 2783 .get_buf = scsi_get_buf, 2784 .load_request = scsi_disk_load_request, 2785 .save_request = scsi_disk_save_request, 2786 }; 2787 2788 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2789 uint32_t lun, uint8_t *buf, 2790 void *hba_private) 2791 { 2792 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2793 2794 if (scsi_block_is_passthrough(s, buf)) { 2795 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2796 hba_private); 2797 } else { 2798 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2799 hba_private); 2800 } 2801 } 2802 2803 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2804 uint8_t *buf, void *hba_private) 2805 { 2806 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2807 2808 if (scsi_block_is_passthrough(s, buf)) { 2809 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2810 } else { 2811 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2812 } 2813 } 2814 2815 #endif 2816 2817 static 2818 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2819 BlockCompletionFunc *cb, void *cb_opaque, 2820 void *opaque) 2821 { 2822 SCSIDiskReq *r = opaque; 2823 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2824 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2825 } 2826 2827 static 2828 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2829 BlockCompletionFunc *cb, void *cb_opaque, 2830 void *opaque) 2831 { 2832 SCSIDiskReq *r = opaque; 2833 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2834 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2835 } 2836 2837 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2838 { 2839 DeviceClass *dc = DEVICE_CLASS(klass); 2840 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2841 2842 dc->fw_name = "disk"; 2843 dc->reset = scsi_disk_reset; 2844 sdc->dma_readv = scsi_dma_readv; 2845 sdc->dma_writev = scsi_dma_writev; 2846 sdc->need_fua_emulation = scsi_is_cmd_fua; 2847 } 2848 2849 static const TypeInfo scsi_disk_base_info = { 2850 .name = TYPE_SCSI_DISK_BASE, 2851 .parent = TYPE_SCSI_DEVICE, 2852 .class_init = scsi_disk_base_class_initfn, 2853 .instance_size = sizeof(SCSIDiskState), 2854 .class_size = sizeof(SCSIDiskClass), 2855 .abstract = true, 2856 }; 2857 2858 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2859 DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ 2860 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2861 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2862 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2863 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2864 DEFINE_PROP_STRING("product", SCSIDiskState, product) 2865 2866 static Property scsi_hd_properties[] = { 2867 DEFINE_SCSI_DISK_PROPERTIES(), 2868 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2869 SCSI_DISK_F_REMOVABLE, false), 2870 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2871 SCSI_DISK_F_DPOFUA, false), 2872 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2873 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2874 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2875 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2876 DEFAULT_MAX_UNMAP_SIZE), 2877 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2878 DEFAULT_MAX_IO_SIZE), 2879 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2880 DEFINE_PROP_END_OF_LIST(), 2881 }; 2882 2883 static const VMStateDescription vmstate_scsi_disk_state = { 2884 .name = "scsi-disk", 2885 .version_id = 1, 2886 .minimum_version_id = 1, 2887 .fields = (VMStateField[]) { 2888 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2889 VMSTATE_BOOL(media_changed, SCSIDiskState), 2890 VMSTATE_BOOL(media_event, SCSIDiskState), 2891 VMSTATE_BOOL(eject_request, SCSIDiskState), 2892 VMSTATE_BOOL(tray_open, SCSIDiskState), 2893 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2894 VMSTATE_END_OF_LIST() 2895 } 2896 }; 2897 2898 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 2899 { 2900 DeviceClass *dc = DEVICE_CLASS(klass); 2901 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2902 2903 sc->realize = scsi_hd_realize; 2904 sc->alloc_req = scsi_new_request; 2905 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 2906 dc->desc = "virtual SCSI disk"; 2907 dc->props = scsi_hd_properties; 2908 dc->vmsd = &vmstate_scsi_disk_state; 2909 } 2910 2911 static const TypeInfo scsi_hd_info = { 2912 .name = "scsi-hd", 2913 .parent = TYPE_SCSI_DISK_BASE, 2914 .class_init = scsi_hd_class_initfn, 2915 }; 2916 2917 static Property scsi_cd_properties[] = { 2918 DEFINE_SCSI_DISK_PROPERTIES(), 2919 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2920 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2921 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2922 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2923 DEFAULT_MAX_IO_SIZE), 2924 DEFINE_PROP_END_OF_LIST(), 2925 }; 2926 2927 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 2928 { 2929 DeviceClass *dc = DEVICE_CLASS(klass); 2930 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2931 2932 sc->realize = scsi_cd_realize; 2933 sc->alloc_req = scsi_new_request; 2934 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 2935 dc->desc = "virtual SCSI CD-ROM"; 2936 dc->props = scsi_cd_properties; 2937 dc->vmsd = &vmstate_scsi_disk_state; 2938 } 2939 2940 static const TypeInfo scsi_cd_info = { 2941 .name = "scsi-cd", 2942 .parent = TYPE_SCSI_DISK_BASE, 2943 .class_init = scsi_cd_class_initfn, 2944 }; 2945 2946 #ifdef __linux__ 2947 static Property scsi_block_properties[] = { 2948 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 2949 DEFINE_PROP_END_OF_LIST(), 2950 }; 2951 2952 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 2953 { 2954 DeviceClass *dc = DEVICE_CLASS(klass); 2955 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2956 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2957 2958 sc->realize = scsi_block_realize; 2959 sc->alloc_req = scsi_block_new_request; 2960 sc->parse_cdb = scsi_block_parse_cdb; 2961 sdc->dma_readv = scsi_block_dma_readv; 2962 sdc->dma_writev = scsi_block_dma_writev; 2963 sdc->need_fua_emulation = scsi_block_no_fua; 2964 dc->desc = "SCSI block device passthrough"; 2965 dc->props = scsi_block_properties; 2966 dc->vmsd = &vmstate_scsi_disk_state; 2967 } 2968 2969 static const TypeInfo scsi_block_info = { 2970 .name = "scsi-block", 2971 .parent = TYPE_SCSI_DISK_BASE, 2972 .class_init = scsi_block_class_initfn, 2973 }; 2974 #endif 2975 2976 static Property scsi_disk_properties[] = { 2977 DEFINE_SCSI_DISK_PROPERTIES(), 2978 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2979 SCSI_DISK_F_REMOVABLE, false), 2980 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2981 SCSI_DISK_F_DPOFUA, false), 2982 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2983 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2984 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2985 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2986 DEFAULT_MAX_UNMAP_SIZE), 2987 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2988 DEFAULT_MAX_IO_SIZE), 2989 DEFINE_PROP_END_OF_LIST(), 2990 }; 2991 2992 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 2993 { 2994 DeviceClass *dc = DEVICE_CLASS(klass); 2995 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 2996 2997 sc->realize = scsi_disk_realize; 2998 sc->alloc_req = scsi_new_request; 2999 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3000 dc->fw_name = "disk"; 3001 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3002 dc->reset = scsi_disk_reset; 3003 dc->props = scsi_disk_properties; 3004 dc->vmsd = &vmstate_scsi_disk_state; 3005 } 3006 3007 static const TypeInfo scsi_disk_info = { 3008 .name = "scsi-disk", 3009 .parent = TYPE_SCSI_DISK_BASE, 3010 .class_init = scsi_disk_class_initfn, 3011 }; 3012 3013 static void scsi_disk_register_types(void) 3014 { 3015 type_register_static(&scsi_disk_base_info); 3016 type_register_static(&scsi_hd_info); 3017 type_register_static(&scsi_cd_info); 3018 #ifdef __linux__ 3019 type_register_static(&scsi_block_info); 3020 #endif 3021 type_register_static(&scsi_disk_info); 3022 } 3023 3024 type_init(scsi_disk_register_types) 3025