1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/units.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "qemu/module.h" 27 #include "hw/scsi/scsi.h" 28 #include "hw/scsi/emulation.h" 29 #include "scsi/constants.h" 30 #include "sysemu/sysemu.h" 31 #include "sysemu/block-backend.h" 32 #include "sysemu/blockdev.h" 33 #include "hw/block/block.h" 34 #include "sysemu/dma.h" 35 #include "qemu/cutils.h" 36 #include "trace.h" 37 38 #ifdef __linux 39 #include <scsi/sg.h> 40 #endif 41 42 #define SCSI_WRITE_SAME_MAX (512 * KiB) 43 #define SCSI_DMA_BUF_SIZE (128 * KiB) 44 #define SCSI_MAX_INQUIRY_LEN 256 45 #define SCSI_MAX_MODE_LEN 256 46 47 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 48 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 49 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 50 51 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 52 53 #define SCSI_DISK_BASE(obj) \ 54 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 55 #define SCSI_DISK_BASE_CLASS(klass) \ 56 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 57 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 58 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 59 60 typedef struct SCSIDiskClass { 61 SCSIDeviceClass parent_class; 62 DMAIOFunc *dma_readv; 63 DMAIOFunc *dma_writev; 64 bool (*need_fua_emulation)(SCSICommand *cmd); 65 void (*update_sense)(SCSIRequest *r); 66 } SCSIDiskClass; 67 68 typedef struct SCSIDiskReq { 69 SCSIRequest req; 70 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 71 uint64_t sector; 72 uint32_t sector_count; 73 uint32_t buflen; 74 bool started; 75 bool need_fua_emulation; 76 struct iovec iov; 77 QEMUIOVector qiov; 78 BlockAcctCookie acct; 79 unsigned char *status; 80 } SCSIDiskReq; 81 82 #define SCSI_DISK_F_REMOVABLE 0 83 #define SCSI_DISK_F_DPOFUA 1 84 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 85 86 typedef struct SCSIDiskState 87 { 88 SCSIDevice qdev; 89 uint32_t features; 90 bool media_changed; 91 bool media_event; 92 bool eject_request; 93 uint16_t port_index; 94 uint64_t max_unmap_size; 95 uint64_t max_io_size; 96 QEMUBH *bh; 97 char *version; 98 char *serial; 99 char *vendor; 100 char *product; 101 char *device_id; 102 bool tray_open; 103 bool tray_locked; 104 /* 105 * 0x0000 - rotation rate not reported 106 * 0x0001 - non-rotating medium (SSD) 107 * 0x0002-0x0400 - reserved 108 * 0x0401-0xffe - rotations per minute 109 * 0xffff - reserved 110 */ 111 uint16_t rotation_rate; 112 } SCSIDiskState; 113 114 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 115 116 static void scsi_free_request(SCSIRequest *req) 117 { 118 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 119 120 qemu_vfree(r->iov.iov_base); 121 } 122 123 /* Helper function for command completion with sense. */ 124 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 125 { 126 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, 127 sense.ascq); 128 scsi_req_build_sense(&r->req, sense); 129 scsi_req_complete(&r->req, CHECK_CONDITION); 130 } 131 132 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 133 { 134 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 135 136 if (!r->iov.iov_base) { 137 r->buflen = size; 138 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 139 } 140 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 141 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 142 } 143 144 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 145 { 146 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 147 148 qemu_put_be64s(f, &r->sector); 149 qemu_put_be32s(f, &r->sector_count); 150 qemu_put_be32s(f, &r->buflen); 151 if (r->buflen) { 152 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 153 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 154 } else if (!req->retry) { 155 uint32_t len = r->iov.iov_len; 156 qemu_put_be32s(f, &len); 157 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 158 } 159 } 160 } 161 162 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 163 { 164 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 165 166 qemu_get_be64s(f, &r->sector); 167 qemu_get_be32s(f, &r->sector_count); 168 qemu_get_be32s(f, &r->buflen); 169 if (r->buflen) { 170 scsi_init_iovec(r, r->buflen); 171 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 172 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 173 } else if (!r->req.retry) { 174 uint32_t len; 175 qemu_get_be32s(f, &len); 176 r->iov.iov_len = len; 177 assert(r->iov.iov_len <= r->buflen); 178 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 179 } 180 } 181 182 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 183 } 184 185 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 186 { 187 if (r->req.io_canceled) { 188 scsi_req_cancel_complete(&r->req); 189 return true; 190 } 191 192 if (ret < 0 || (r->status && *r->status)) { 193 return scsi_handle_rw_error(r, -ret, acct_failed); 194 } 195 196 return false; 197 } 198 199 static void scsi_aio_complete(void *opaque, int ret) 200 { 201 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 202 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 203 204 assert(r->req.aiocb != NULL); 205 r->req.aiocb = NULL; 206 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 207 if (scsi_disk_req_check_error(r, ret, true)) { 208 goto done; 209 } 210 211 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 212 scsi_req_complete(&r->req, GOOD); 213 214 done: 215 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 216 scsi_req_unref(&r->req); 217 } 218 219 static bool scsi_is_cmd_fua(SCSICommand *cmd) 220 { 221 switch (cmd->buf[0]) { 222 case READ_10: 223 case READ_12: 224 case READ_16: 225 case WRITE_10: 226 case WRITE_12: 227 case WRITE_16: 228 return (cmd->buf[1] & 8) != 0; 229 230 case VERIFY_10: 231 case VERIFY_12: 232 case VERIFY_16: 233 case WRITE_VERIFY_10: 234 case WRITE_VERIFY_12: 235 case WRITE_VERIFY_16: 236 return true; 237 238 case READ_6: 239 case WRITE_6: 240 default: 241 return false; 242 } 243 } 244 245 static void scsi_write_do_fua(SCSIDiskReq *r) 246 { 247 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 248 249 assert(r->req.aiocb == NULL); 250 assert(!r->req.io_canceled); 251 252 if (r->need_fua_emulation) { 253 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 254 BLOCK_ACCT_FLUSH); 255 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 256 return; 257 } 258 259 scsi_req_complete(&r->req, GOOD); 260 scsi_req_unref(&r->req); 261 } 262 263 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 264 { 265 assert(r->req.aiocb == NULL); 266 if (scsi_disk_req_check_error(r, ret, false)) { 267 goto done; 268 } 269 270 r->sector += r->sector_count; 271 r->sector_count = 0; 272 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 273 scsi_write_do_fua(r); 274 return; 275 } else { 276 scsi_req_complete(&r->req, GOOD); 277 } 278 279 done: 280 scsi_req_unref(&r->req); 281 } 282 283 static void scsi_dma_complete(void *opaque, int ret) 284 { 285 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 286 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 287 288 assert(r->req.aiocb != NULL); 289 r->req.aiocb = NULL; 290 291 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 292 if (ret < 0) { 293 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 294 } else { 295 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 296 } 297 scsi_dma_complete_noio(r, ret); 298 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 299 } 300 301 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) 302 { 303 uint32_t n; 304 305 assert(r->req.aiocb == NULL); 306 if (scsi_disk_req_check_error(r, ret, false)) { 307 goto done; 308 } 309 310 n = r->qiov.size / 512; 311 r->sector += n; 312 r->sector_count -= n; 313 scsi_req_data(&r->req, r->qiov.size); 314 315 done: 316 scsi_req_unref(&r->req); 317 } 318 319 static void scsi_read_complete(void *opaque, int ret) 320 { 321 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 322 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 323 324 assert(r->req.aiocb != NULL); 325 r->req.aiocb = NULL; 326 327 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 328 if (ret < 0) { 329 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 330 } else { 331 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 332 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); 333 } 334 scsi_read_complete_noio(r, ret); 335 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 336 } 337 338 /* Actually issue a read to the block device. */ 339 static void scsi_do_read(SCSIDiskReq *r, int ret) 340 { 341 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 342 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 343 344 assert (r->req.aiocb == NULL); 345 if (scsi_disk_req_check_error(r, ret, false)) { 346 goto done; 347 } 348 349 /* The request is used as the AIO opaque value, so add a ref. */ 350 scsi_req_ref(&r->req); 351 352 if (r->req.sg) { 353 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 354 r->req.resid -= r->req.sg->size; 355 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 356 r->req.sg, r->sector << BDRV_SECTOR_BITS, 357 BDRV_SECTOR_SIZE, 358 sdc->dma_readv, r, scsi_dma_complete, r, 359 DMA_DIRECTION_FROM_DEVICE); 360 } else { 361 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 362 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 363 r->qiov.size, BLOCK_ACCT_READ); 364 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 365 scsi_read_complete, r, r); 366 } 367 368 done: 369 scsi_req_unref(&r->req); 370 } 371 372 static void scsi_do_read_cb(void *opaque, int ret) 373 { 374 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 375 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 376 377 assert (r->req.aiocb != NULL); 378 r->req.aiocb = NULL; 379 380 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 381 if (ret < 0) { 382 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 383 } else { 384 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 385 } 386 scsi_do_read(opaque, ret); 387 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 388 } 389 390 /* Read more data from scsi device into buffer. */ 391 static void scsi_read_data(SCSIRequest *req) 392 { 393 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 394 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 395 bool first; 396 397 trace_scsi_disk_read_data_count(r->sector_count); 398 if (r->sector_count == 0) { 399 /* This also clears the sense buffer for REQUEST SENSE. */ 400 scsi_req_complete(&r->req, GOOD); 401 return; 402 } 403 404 /* No data transfer may already be in progress */ 405 assert(r->req.aiocb == NULL); 406 407 /* The request is used as the AIO opaque value, so add a ref. */ 408 scsi_req_ref(&r->req); 409 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 410 trace_scsi_disk_read_data_invalid(); 411 scsi_read_complete_noio(r, -EINVAL); 412 return; 413 } 414 415 if (!blk_is_available(req->dev->conf.blk)) { 416 scsi_read_complete_noio(r, -ENOMEDIUM); 417 return; 418 } 419 420 first = !r->started; 421 r->started = true; 422 if (first && r->need_fua_emulation) { 423 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 424 BLOCK_ACCT_FLUSH); 425 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 426 } else { 427 scsi_do_read(r, 0); 428 } 429 } 430 431 /* 432 * scsi_handle_rw_error has two return values. False means that the error 433 * must be ignored, true means that the error has been processed and the 434 * caller should not do anything else for this request. Note that 435 * scsi_handle_rw_error always manages its reference counts, independent 436 * of the return value. 437 */ 438 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 439 { 440 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 441 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 442 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 443 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 444 is_read, error); 445 446 if (action == BLOCK_ERROR_ACTION_REPORT) { 447 if (acct_failed) { 448 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 449 } 450 switch (error) { 451 case 0: 452 /* A passthrough command has run and has produced sense data; check 453 * whether the error has to be handled by the guest or should rather 454 * pause the host. 455 */ 456 assert(r->status && *r->status); 457 if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { 458 /* These errors are handled by guest. */ 459 sdc->update_sense(&r->req); 460 scsi_req_complete(&r->req, *r->status); 461 return true; 462 } 463 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 464 break; 465 case ENOMEDIUM: 466 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 467 break; 468 case ENOMEM: 469 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 470 break; 471 case EINVAL: 472 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 473 break; 474 case ENOSPC: 475 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 476 break; 477 default: 478 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 479 break; 480 } 481 } 482 483 blk_error_action(s->qdev.conf.blk, action, is_read, error); 484 if (action == BLOCK_ERROR_ACTION_IGNORE) { 485 scsi_req_complete(&r->req, 0); 486 return true; 487 } 488 489 if (action == BLOCK_ERROR_ACTION_STOP) { 490 scsi_req_retry(&r->req); 491 } 492 return true; 493 } 494 495 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 496 { 497 uint32_t n; 498 499 assert (r->req.aiocb == NULL); 500 if (scsi_disk_req_check_error(r, ret, false)) { 501 goto done; 502 } 503 504 n = r->qiov.size / 512; 505 r->sector += n; 506 r->sector_count -= n; 507 if (r->sector_count == 0) { 508 scsi_write_do_fua(r); 509 return; 510 } else { 511 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 512 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); 513 scsi_req_data(&r->req, r->qiov.size); 514 } 515 516 done: 517 scsi_req_unref(&r->req); 518 } 519 520 static void scsi_write_complete(void * opaque, int ret) 521 { 522 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 523 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 524 525 assert (r->req.aiocb != NULL); 526 r->req.aiocb = NULL; 527 528 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 529 if (ret < 0) { 530 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 531 } else { 532 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 533 } 534 scsi_write_complete_noio(r, ret); 535 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 536 } 537 538 static void scsi_write_data(SCSIRequest *req) 539 { 540 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 541 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 542 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 543 544 /* No data transfer may already be in progress */ 545 assert(r->req.aiocb == NULL); 546 547 /* The request is used as the AIO opaque value, so add a ref. */ 548 scsi_req_ref(&r->req); 549 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 550 trace_scsi_disk_write_data_invalid(); 551 scsi_write_complete_noio(r, -EINVAL); 552 return; 553 } 554 555 if (!r->req.sg && !r->qiov.size) { 556 /* Called for the first time. Ask the driver to send us more data. */ 557 r->started = true; 558 scsi_write_complete_noio(r, 0); 559 return; 560 } 561 if (!blk_is_available(req->dev->conf.blk)) { 562 scsi_write_complete_noio(r, -ENOMEDIUM); 563 return; 564 } 565 566 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 567 r->req.cmd.buf[0] == VERIFY_16) { 568 if (r->req.sg) { 569 scsi_dma_complete_noio(r, 0); 570 } else { 571 scsi_write_complete_noio(r, 0); 572 } 573 return; 574 } 575 576 if (r->req.sg) { 577 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 578 r->req.resid -= r->req.sg->size; 579 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 580 r->req.sg, r->sector << BDRV_SECTOR_BITS, 581 BDRV_SECTOR_SIZE, 582 sdc->dma_writev, r, scsi_dma_complete, r, 583 DMA_DIRECTION_TO_DEVICE); 584 } else { 585 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 586 r->qiov.size, BLOCK_ACCT_WRITE); 587 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 588 scsi_write_complete, r, r); 589 } 590 } 591 592 /* Return a pointer to the data buffer. */ 593 static uint8_t *scsi_get_buf(SCSIRequest *req) 594 { 595 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 596 597 return (uint8_t *)r->iov.iov_base; 598 } 599 600 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 601 { 602 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 603 uint8_t page_code = req->cmd.buf[2]; 604 int start, buflen = 0; 605 606 outbuf[buflen++] = s->qdev.type & 0x1f; 607 outbuf[buflen++] = page_code; 608 outbuf[buflen++] = 0x00; 609 outbuf[buflen++] = 0x00; 610 start = buflen; 611 612 switch (page_code) { 613 case 0x00: /* Supported page codes, mandatory */ 614 { 615 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); 616 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 617 if (s->serial) { 618 outbuf[buflen++] = 0x80; /* unit serial number */ 619 } 620 outbuf[buflen++] = 0x83; /* device identification */ 621 if (s->qdev.type == TYPE_DISK) { 622 outbuf[buflen++] = 0xb0; /* block limits */ 623 outbuf[buflen++] = 0xb1; /* block device characteristics */ 624 outbuf[buflen++] = 0xb2; /* thin provisioning */ 625 } 626 break; 627 } 628 case 0x80: /* Device serial number, optional */ 629 { 630 int l; 631 632 if (!s->serial) { 633 trace_scsi_disk_emulate_vpd_page_80_not_supported(); 634 return -1; 635 } 636 637 l = strlen(s->serial); 638 if (l > 36) { 639 l = 36; 640 } 641 642 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); 643 memcpy(outbuf + buflen, s->serial, l); 644 buflen += l; 645 break; 646 } 647 648 case 0x83: /* Device identification page, mandatory */ 649 { 650 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; 651 652 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); 653 654 if (id_len) { 655 outbuf[buflen++] = 0x2; /* ASCII */ 656 outbuf[buflen++] = 0; /* not officially assigned */ 657 outbuf[buflen++] = 0; /* reserved */ 658 outbuf[buflen++] = id_len; /* length of data following */ 659 memcpy(outbuf + buflen, s->device_id, id_len); 660 buflen += id_len; 661 } 662 663 if (s->qdev.wwn) { 664 outbuf[buflen++] = 0x1; /* Binary */ 665 outbuf[buflen++] = 0x3; /* NAA */ 666 outbuf[buflen++] = 0; /* reserved */ 667 outbuf[buflen++] = 8; 668 stq_be_p(&outbuf[buflen], s->qdev.wwn); 669 buflen += 8; 670 } 671 672 if (s->qdev.port_wwn) { 673 outbuf[buflen++] = 0x61; /* SAS / Binary */ 674 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 675 outbuf[buflen++] = 0; /* reserved */ 676 outbuf[buflen++] = 8; 677 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 678 buflen += 8; 679 } 680 681 if (s->port_index) { 682 outbuf[buflen++] = 0x61; /* SAS / Binary */ 683 684 /* PIV/Target port/relative target port */ 685 outbuf[buflen++] = 0x94; 686 687 outbuf[buflen++] = 0; /* reserved */ 688 outbuf[buflen++] = 4; 689 stw_be_p(&outbuf[buflen + 2], s->port_index); 690 buflen += 4; 691 } 692 break; 693 } 694 case 0xb0: /* block limits */ 695 { 696 SCSIBlockLimits bl = {}; 697 698 if (s->qdev.type == TYPE_ROM) { 699 trace_scsi_disk_emulate_vpd_page_b0_not_supported(); 700 return -1; 701 } 702 bl.wsnz = 1; 703 bl.unmap_sectors = 704 s->qdev.conf.discard_granularity / s->qdev.blocksize; 705 bl.min_io_size = 706 s->qdev.conf.min_io_size / s->qdev.blocksize; 707 bl.opt_io_size = 708 s->qdev.conf.opt_io_size / s->qdev.blocksize; 709 bl.max_unmap_sectors = 710 s->max_unmap_size / s->qdev.blocksize; 711 bl.max_io_sectors = 712 s->max_io_size / s->qdev.blocksize; 713 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 714 bl.max_unmap_descr = 255; 715 716 if (s->qdev.type == TYPE_DISK) { 717 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 718 int max_io_sectors_blk = 719 max_transfer_blk / s->qdev.blocksize; 720 721 bl.max_io_sectors = 722 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 723 } 724 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 725 break; 726 } 727 case 0xb1: /* block device characteristics */ 728 { 729 buflen = 0x40; 730 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 731 outbuf[5] = s->rotation_rate & 0xff; 732 outbuf[6] = 0; /* PRODUCT TYPE */ 733 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 734 outbuf[8] = 0; /* VBULS */ 735 break; 736 } 737 case 0xb2: /* thin provisioning */ 738 { 739 buflen = 8; 740 outbuf[4] = 0; 741 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 742 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 743 outbuf[7] = 0; 744 break; 745 } 746 default: 747 return -1; 748 } 749 /* done with EVPD */ 750 assert(buflen - start <= 255); 751 outbuf[start - 1] = buflen - start; 752 return buflen; 753 } 754 755 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 756 { 757 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 758 int buflen = 0; 759 760 if (req->cmd.buf[1] & 0x1) { 761 /* Vital product data */ 762 return scsi_disk_emulate_vpd_page(req, outbuf); 763 } 764 765 /* Standard INQUIRY data */ 766 if (req->cmd.buf[2] != 0) { 767 return -1; 768 } 769 770 /* PAGE CODE == 0 */ 771 buflen = req->cmd.xfer; 772 if (buflen > SCSI_MAX_INQUIRY_LEN) { 773 buflen = SCSI_MAX_INQUIRY_LEN; 774 } 775 776 outbuf[0] = s->qdev.type & 0x1f; 777 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 778 779 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 780 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 781 782 memset(&outbuf[32], 0, 4); 783 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 784 /* 785 * We claim conformance to SPC-3, which is required for guests 786 * to ask for modern features like READ CAPACITY(16) or the 787 * block characteristics VPD page by default. Not all of SPC-3 788 * is actually implemented, but we're good enough. 789 */ 790 outbuf[2] = s->qdev.default_scsi_version; 791 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 792 793 if (buflen > 36) { 794 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 795 } else { 796 /* If the allocation length of CDB is too small, 797 the additional length is not adjusted */ 798 outbuf[4] = 36 - 5; 799 } 800 801 /* Sync data transfer and TCQ. */ 802 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 803 return buflen; 804 } 805 806 static inline bool media_is_dvd(SCSIDiskState *s) 807 { 808 uint64_t nb_sectors; 809 if (s->qdev.type != TYPE_ROM) { 810 return false; 811 } 812 if (!blk_is_available(s->qdev.conf.blk)) { 813 return false; 814 } 815 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 816 return nb_sectors > CD_MAX_SECTORS; 817 } 818 819 static inline bool media_is_cd(SCSIDiskState *s) 820 { 821 uint64_t nb_sectors; 822 if (s->qdev.type != TYPE_ROM) { 823 return false; 824 } 825 if (!blk_is_available(s->qdev.conf.blk)) { 826 return false; 827 } 828 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 829 return nb_sectors <= CD_MAX_SECTORS; 830 } 831 832 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 833 uint8_t *outbuf) 834 { 835 uint8_t type = r->req.cmd.buf[1] & 7; 836 837 if (s->qdev.type != TYPE_ROM) { 838 return -1; 839 } 840 841 /* Types 1/2 are only defined for Blu-Ray. */ 842 if (type != 0) { 843 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 844 return -1; 845 } 846 847 memset(outbuf, 0, 34); 848 outbuf[1] = 32; 849 outbuf[2] = 0xe; /* last session complete, disc finalized */ 850 outbuf[3] = 1; /* first track on disc */ 851 outbuf[4] = 1; /* # of sessions */ 852 outbuf[5] = 1; /* first track of last session */ 853 outbuf[6] = 1; /* last track of last session */ 854 outbuf[7] = 0x20; /* unrestricted use */ 855 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 856 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 857 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 858 /* 24-31: disc bar code */ 859 /* 32: disc application code */ 860 /* 33: number of OPC tables */ 861 862 return 34; 863 } 864 865 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 866 uint8_t *outbuf) 867 { 868 static const int rds_caps_size[5] = { 869 [0] = 2048 + 4, 870 [1] = 4 + 4, 871 [3] = 188 + 4, 872 [4] = 2048 + 4, 873 }; 874 875 uint8_t media = r->req.cmd.buf[1]; 876 uint8_t layer = r->req.cmd.buf[6]; 877 uint8_t format = r->req.cmd.buf[7]; 878 int size = -1; 879 880 if (s->qdev.type != TYPE_ROM) { 881 return -1; 882 } 883 if (media != 0) { 884 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 885 return -1; 886 } 887 888 if (format != 0xff) { 889 if (!blk_is_available(s->qdev.conf.blk)) { 890 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 891 return -1; 892 } 893 if (media_is_cd(s)) { 894 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 895 return -1; 896 } 897 if (format >= ARRAY_SIZE(rds_caps_size)) { 898 return -1; 899 } 900 size = rds_caps_size[format]; 901 memset(outbuf, 0, size); 902 } 903 904 switch (format) { 905 case 0x00: { 906 /* Physical format information */ 907 uint64_t nb_sectors; 908 if (layer != 0) { 909 goto fail; 910 } 911 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 912 913 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 914 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 915 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 916 outbuf[7] = 0; /* default densities */ 917 918 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 919 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 920 break; 921 } 922 923 case 0x01: /* DVD copyright information, all zeros */ 924 break; 925 926 case 0x03: /* BCA information - invalid field for no BCA info */ 927 return -1; 928 929 case 0x04: /* DVD disc manufacturing information, all zeros */ 930 break; 931 932 case 0xff: { /* List capabilities */ 933 int i; 934 size = 4; 935 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 936 if (!rds_caps_size[i]) { 937 continue; 938 } 939 outbuf[size] = i; 940 outbuf[size + 1] = 0x40; /* Not writable, readable */ 941 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 942 size += 4; 943 } 944 break; 945 } 946 947 default: 948 return -1; 949 } 950 951 /* Size of buffer, not including 2 byte size field */ 952 stw_be_p(outbuf, size - 2); 953 return size; 954 955 fail: 956 return -1; 957 } 958 959 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 960 { 961 uint8_t event_code, media_status; 962 963 media_status = 0; 964 if (s->tray_open) { 965 media_status = MS_TRAY_OPEN; 966 } else if (blk_is_inserted(s->qdev.conf.blk)) { 967 media_status = MS_MEDIA_PRESENT; 968 } 969 970 /* Event notification descriptor */ 971 event_code = MEC_NO_CHANGE; 972 if (media_status != MS_TRAY_OPEN) { 973 if (s->media_event) { 974 event_code = MEC_NEW_MEDIA; 975 s->media_event = false; 976 } else if (s->eject_request) { 977 event_code = MEC_EJECT_REQUESTED; 978 s->eject_request = false; 979 } 980 } 981 982 outbuf[0] = event_code; 983 outbuf[1] = media_status; 984 985 /* These fields are reserved, just clear them. */ 986 outbuf[2] = 0; 987 outbuf[3] = 0; 988 return 4; 989 } 990 991 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 992 uint8_t *outbuf) 993 { 994 int size; 995 uint8_t *buf = r->req.cmd.buf; 996 uint8_t notification_class_request = buf[4]; 997 if (s->qdev.type != TYPE_ROM) { 998 return -1; 999 } 1000 if ((buf[1] & 1) == 0) { 1001 /* asynchronous */ 1002 return -1; 1003 } 1004 1005 size = 4; 1006 outbuf[0] = outbuf[1] = 0; 1007 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1008 if (notification_class_request & (1 << GESN_MEDIA)) { 1009 outbuf[2] = GESN_MEDIA; 1010 size += scsi_event_status_media(s, &outbuf[size]); 1011 } else { 1012 outbuf[2] = 0x80; 1013 } 1014 stw_be_p(outbuf, size - 4); 1015 return size; 1016 } 1017 1018 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1019 { 1020 int current; 1021 1022 if (s->qdev.type != TYPE_ROM) { 1023 return -1; 1024 } 1025 1026 if (media_is_dvd(s)) { 1027 current = MMC_PROFILE_DVD_ROM; 1028 } else if (media_is_cd(s)) { 1029 current = MMC_PROFILE_CD_ROM; 1030 } else { 1031 current = MMC_PROFILE_NONE; 1032 } 1033 1034 memset(outbuf, 0, 40); 1035 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1036 stw_be_p(&outbuf[6], current); 1037 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1038 outbuf[10] = 0x03; /* persistent, current */ 1039 outbuf[11] = 8; /* two profiles */ 1040 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1041 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1042 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1043 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1044 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1045 stw_be_p(&outbuf[20], 1); 1046 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1047 outbuf[23] = 8; 1048 stl_be_p(&outbuf[24], 1); /* SCSI */ 1049 outbuf[28] = 1; /* DBE = 1, mandatory */ 1050 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1051 stw_be_p(&outbuf[32], 3); 1052 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1053 outbuf[35] = 4; 1054 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1055 /* TODO: Random readable, CD read, DVD read, drive serial number, 1056 power management */ 1057 return 40; 1058 } 1059 1060 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1061 { 1062 if (s->qdev.type != TYPE_ROM) { 1063 return -1; 1064 } 1065 memset(outbuf, 0, 8); 1066 outbuf[5] = 1; /* CD-ROM */ 1067 return 8; 1068 } 1069 1070 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1071 int page_control) 1072 { 1073 static const int mode_sense_valid[0x3f] = { 1074 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1075 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1076 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1077 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1078 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1079 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1080 }; 1081 1082 uint8_t *p = *p_outbuf + 2; 1083 int length; 1084 1085 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1086 return -1; 1087 } 1088 1089 /* 1090 * If Changeable Values are requested, a mask denoting those mode parameters 1091 * that are changeable shall be returned. As we currently don't support 1092 * parameter changes via MODE_SELECT all bits are returned set to zero. 1093 * The buffer was already menset to zero by the caller of this function. 1094 * 1095 * The offsets here are off by two compared to the descriptions in the 1096 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1097 * but it is done so that offsets are consistent within our implementation 1098 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1099 * 2-byte and 4-byte headers. 1100 */ 1101 switch (page) { 1102 case MODE_PAGE_HD_GEOMETRY: 1103 length = 0x16; 1104 if (page_control == 1) { /* Changeable Values */ 1105 break; 1106 } 1107 /* if a geometry hint is available, use it */ 1108 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1109 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1110 p[2] = s->qdev.conf.cyls & 0xff; 1111 p[3] = s->qdev.conf.heads & 0xff; 1112 /* Write precomp start cylinder, disabled */ 1113 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1114 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1115 p[6] = s->qdev.conf.cyls & 0xff; 1116 /* Reduced current start cylinder, disabled */ 1117 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1118 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1119 p[9] = s->qdev.conf.cyls & 0xff; 1120 /* Device step rate [ns], 200ns */ 1121 p[10] = 0; 1122 p[11] = 200; 1123 /* Landing zone cylinder */ 1124 p[12] = 0xff; 1125 p[13] = 0xff; 1126 p[14] = 0xff; 1127 /* Medium rotation rate [rpm], 5400 rpm */ 1128 p[18] = (5400 >> 8) & 0xff; 1129 p[19] = 5400 & 0xff; 1130 break; 1131 1132 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1133 length = 0x1e; 1134 if (page_control == 1) { /* Changeable Values */ 1135 break; 1136 } 1137 /* Transfer rate [kbit/s], 5Mbit/s */ 1138 p[0] = 5000 >> 8; 1139 p[1] = 5000 & 0xff; 1140 /* if a geometry hint is available, use it */ 1141 p[2] = s->qdev.conf.heads & 0xff; 1142 p[3] = s->qdev.conf.secs & 0xff; 1143 p[4] = s->qdev.blocksize >> 8; 1144 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1145 p[7] = s->qdev.conf.cyls & 0xff; 1146 /* Write precomp start cylinder, disabled */ 1147 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1148 p[9] = s->qdev.conf.cyls & 0xff; 1149 /* Reduced current start cylinder, disabled */ 1150 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1151 p[11] = s->qdev.conf.cyls & 0xff; 1152 /* Device step rate [100us], 100us */ 1153 p[12] = 0; 1154 p[13] = 1; 1155 /* Device step pulse width [us], 1us */ 1156 p[14] = 1; 1157 /* Device head settle delay [100us], 100us */ 1158 p[15] = 0; 1159 p[16] = 1; 1160 /* Motor on delay [0.1s], 0.1s */ 1161 p[17] = 1; 1162 /* Motor off delay [0.1s], 0.1s */ 1163 p[18] = 1; 1164 /* Medium rotation rate [rpm], 5400 rpm */ 1165 p[26] = (5400 >> 8) & 0xff; 1166 p[27] = 5400 & 0xff; 1167 break; 1168 1169 case MODE_PAGE_CACHING: 1170 length = 0x12; 1171 if (page_control == 1 || /* Changeable Values */ 1172 blk_enable_write_cache(s->qdev.conf.blk)) { 1173 p[0] = 4; /* WCE */ 1174 } 1175 break; 1176 1177 case MODE_PAGE_R_W_ERROR: 1178 length = 10; 1179 if (page_control == 1) { /* Changeable Values */ 1180 break; 1181 } 1182 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1183 if (s->qdev.type == TYPE_ROM) { 1184 p[1] = 0x20; /* Read Retry Count */ 1185 } 1186 break; 1187 1188 case MODE_PAGE_AUDIO_CTL: 1189 length = 14; 1190 break; 1191 1192 case MODE_PAGE_CAPABILITIES: 1193 length = 0x14; 1194 if (page_control == 1) { /* Changeable Values */ 1195 break; 1196 } 1197 1198 p[0] = 0x3b; /* CD-R & CD-RW read */ 1199 p[1] = 0; /* Writing not supported */ 1200 p[2] = 0x7f; /* Audio, composite, digital out, 1201 mode 2 form 1&2, multi session */ 1202 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1203 RW corrected, C2 errors, ISRC, 1204 UPC, Bar code */ 1205 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1206 /* Locking supported, jumper present, eject, tray */ 1207 p[5] = 0; /* no volume & mute control, no 1208 changer */ 1209 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1210 p[7] = (50 * 176) & 0xff; 1211 p[8] = 2 >> 8; /* Two volume levels */ 1212 p[9] = 2 & 0xff; 1213 p[10] = 2048 >> 8; /* 2M buffer */ 1214 p[11] = 2048 & 0xff; 1215 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1216 p[13] = (16 * 176) & 0xff; 1217 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1218 p[17] = (16 * 176) & 0xff; 1219 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1220 p[19] = (16 * 176) & 0xff; 1221 break; 1222 1223 default: 1224 return -1; 1225 } 1226 1227 assert(length < 256); 1228 (*p_outbuf)[0] = page; 1229 (*p_outbuf)[1] = length; 1230 *p_outbuf += length + 2; 1231 return length + 2; 1232 } 1233 1234 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1235 { 1236 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1237 uint64_t nb_sectors; 1238 bool dbd; 1239 int page, buflen, ret, page_control; 1240 uint8_t *p; 1241 uint8_t dev_specific_param; 1242 1243 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1244 page = r->req.cmd.buf[2] & 0x3f; 1245 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1246 1247 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 1248 10, page, r->req.cmd.xfer, page_control); 1249 memset(outbuf, 0, r->req.cmd.xfer); 1250 p = outbuf; 1251 1252 if (s->qdev.type == TYPE_DISK) { 1253 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1254 if (blk_is_read_only(s->qdev.conf.blk)) { 1255 dev_specific_param |= 0x80; /* Readonly. */ 1256 } 1257 } else { 1258 /* MMC prescribes that CD/DVD drives have no block descriptors, 1259 * and defines no device-specific parameter. */ 1260 dev_specific_param = 0x00; 1261 dbd = true; 1262 } 1263 1264 if (r->req.cmd.buf[0] == MODE_SENSE) { 1265 p[1] = 0; /* Default media type. */ 1266 p[2] = dev_specific_param; 1267 p[3] = 0; /* Block descriptor length. */ 1268 p += 4; 1269 } else { /* MODE_SENSE_10 */ 1270 p[2] = 0; /* Default media type. */ 1271 p[3] = dev_specific_param; 1272 p[6] = p[7] = 0; /* Block descriptor length. */ 1273 p += 8; 1274 } 1275 1276 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1277 if (!dbd && nb_sectors) { 1278 if (r->req.cmd.buf[0] == MODE_SENSE) { 1279 outbuf[3] = 8; /* Block descriptor length */ 1280 } else { /* MODE_SENSE_10 */ 1281 outbuf[7] = 8; /* Block descriptor length */ 1282 } 1283 nb_sectors /= (s->qdev.blocksize / 512); 1284 if (nb_sectors > 0xffffff) { 1285 nb_sectors = 0; 1286 } 1287 p[0] = 0; /* media density code */ 1288 p[1] = (nb_sectors >> 16) & 0xff; 1289 p[2] = (nb_sectors >> 8) & 0xff; 1290 p[3] = nb_sectors & 0xff; 1291 p[4] = 0; /* reserved */ 1292 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1293 p[6] = s->qdev.blocksize >> 8; 1294 p[7] = 0; 1295 p += 8; 1296 } 1297 1298 if (page_control == 3) { 1299 /* Saved Values */ 1300 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1301 return -1; 1302 } 1303 1304 if (page == 0x3f) { 1305 for (page = 0; page <= 0x3e; page++) { 1306 mode_sense_page(s, page, &p, page_control); 1307 } 1308 } else { 1309 ret = mode_sense_page(s, page, &p, page_control); 1310 if (ret == -1) { 1311 return -1; 1312 } 1313 } 1314 1315 buflen = p - outbuf; 1316 /* 1317 * The mode data length field specifies the length in bytes of the 1318 * following data that is available to be transferred. The mode data 1319 * length does not include itself. 1320 */ 1321 if (r->req.cmd.buf[0] == MODE_SENSE) { 1322 outbuf[0] = buflen - 1; 1323 } else { /* MODE_SENSE_10 */ 1324 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1325 outbuf[1] = (buflen - 2) & 0xff; 1326 } 1327 return buflen; 1328 } 1329 1330 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1331 { 1332 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1333 int start_track, format, msf, toclen; 1334 uint64_t nb_sectors; 1335 1336 msf = req->cmd.buf[1] & 2; 1337 format = req->cmd.buf[2] & 0xf; 1338 start_track = req->cmd.buf[6]; 1339 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1340 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); 1341 nb_sectors /= s->qdev.blocksize / 512; 1342 switch (format) { 1343 case 0: 1344 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1345 break; 1346 case 1: 1347 /* multi session : only a single session defined */ 1348 toclen = 12; 1349 memset(outbuf, 0, 12); 1350 outbuf[1] = 0x0a; 1351 outbuf[2] = 0x01; 1352 outbuf[3] = 0x01; 1353 break; 1354 case 2: 1355 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1356 break; 1357 default: 1358 return -1; 1359 } 1360 return toclen; 1361 } 1362 1363 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1364 { 1365 SCSIRequest *req = &r->req; 1366 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1367 bool start = req->cmd.buf[4] & 1; 1368 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1369 int pwrcnd = req->cmd.buf[4] & 0xf0; 1370 1371 if (pwrcnd) { 1372 /* eject/load only happens for power condition == 0 */ 1373 return 0; 1374 } 1375 1376 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1377 if (!start && !s->tray_open && s->tray_locked) { 1378 scsi_check_condition(r, 1379 blk_is_inserted(s->qdev.conf.blk) 1380 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1381 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1382 return -1; 1383 } 1384 1385 if (s->tray_open != !start) { 1386 blk_eject(s->qdev.conf.blk, !start); 1387 s->tray_open = !start; 1388 } 1389 } 1390 return 0; 1391 } 1392 1393 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1394 { 1395 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1396 int buflen = r->iov.iov_len; 1397 1398 if (buflen) { 1399 trace_scsi_disk_emulate_read_data(buflen); 1400 r->iov.iov_len = 0; 1401 r->started = true; 1402 scsi_req_data(&r->req, buflen); 1403 return; 1404 } 1405 1406 /* This also clears the sense buffer for REQUEST SENSE. */ 1407 scsi_req_complete(&r->req, GOOD); 1408 } 1409 1410 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1411 uint8_t *inbuf, int inlen) 1412 { 1413 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1414 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1415 uint8_t *p; 1416 int len, expected_len, changeable_len, i; 1417 1418 /* The input buffer does not include the page header, so it is 1419 * off by 2 bytes. 1420 */ 1421 expected_len = inlen + 2; 1422 if (expected_len > SCSI_MAX_MODE_LEN) { 1423 return -1; 1424 } 1425 1426 p = mode_current; 1427 memset(mode_current, 0, inlen + 2); 1428 len = mode_sense_page(s, page, &p, 0); 1429 if (len < 0 || len != expected_len) { 1430 return -1; 1431 } 1432 1433 p = mode_changeable; 1434 memset(mode_changeable, 0, inlen + 2); 1435 changeable_len = mode_sense_page(s, page, &p, 1); 1436 assert(changeable_len == len); 1437 1438 /* Check that unchangeable bits are the same as what MODE SENSE 1439 * would return. 1440 */ 1441 for (i = 2; i < len; i++) { 1442 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1443 return -1; 1444 } 1445 } 1446 return 0; 1447 } 1448 1449 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1450 { 1451 switch (page) { 1452 case MODE_PAGE_CACHING: 1453 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1454 break; 1455 1456 default: 1457 break; 1458 } 1459 } 1460 1461 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1462 { 1463 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1464 1465 while (len > 0) { 1466 int page, subpage, page_len; 1467 1468 /* Parse both possible formats for the mode page headers. */ 1469 page = p[0] & 0x3f; 1470 if (p[0] & 0x40) { 1471 if (len < 4) { 1472 goto invalid_param_len; 1473 } 1474 subpage = p[1]; 1475 page_len = lduw_be_p(&p[2]); 1476 p += 4; 1477 len -= 4; 1478 } else { 1479 if (len < 2) { 1480 goto invalid_param_len; 1481 } 1482 subpage = 0; 1483 page_len = p[1]; 1484 p += 2; 1485 len -= 2; 1486 } 1487 1488 if (subpage) { 1489 goto invalid_param; 1490 } 1491 if (page_len > len) { 1492 goto invalid_param_len; 1493 } 1494 1495 if (!change) { 1496 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1497 goto invalid_param; 1498 } 1499 } else { 1500 scsi_disk_apply_mode_select(s, page, p); 1501 } 1502 1503 p += page_len; 1504 len -= page_len; 1505 } 1506 return 0; 1507 1508 invalid_param: 1509 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1510 return -1; 1511 1512 invalid_param_len: 1513 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1514 return -1; 1515 } 1516 1517 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1518 { 1519 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1520 uint8_t *p = inbuf; 1521 int cmd = r->req.cmd.buf[0]; 1522 int len = r->req.cmd.xfer; 1523 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1524 int bd_len; 1525 int pass; 1526 1527 /* We only support PF=1, SP=0. */ 1528 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1529 goto invalid_field; 1530 } 1531 1532 if (len < hdr_len) { 1533 goto invalid_param_len; 1534 } 1535 1536 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1537 len -= hdr_len; 1538 p += hdr_len; 1539 if (len < bd_len) { 1540 goto invalid_param_len; 1541 } 1542 if (bd_len != 0 && bd_len != 8) { 1543 goto invalid_param; 1544 } 1545 1546 len -= bd_len; 1547 p += bd_len; 1548 1549 /* Ensure no change is made if there is an error! */ 1550 for (pass = 0; pass < 2; pass++) { 1551 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1552 assert(pass == 0); 1553 return; 1554 } 1555 } 1556 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1557 /* The request is used as the AIO opaque value, so add a ref. */ 1558 scsi_req_ref(&r->req); 1559 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1560 BLOCK_ACCT_FLUSH); 1561 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1562 return; 1563 } 1564 1565 scsi_req_complete(&r->req, GOOD); 1566 return; 1567 1568 invalid_param: 1569 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1570 return; 1571 1572 invalid_param_len: 1573 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1574 return; 1575 1576 invalid_field: 1577 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1578 } 1579 1580 static inline bool check_lba_range(SCSIDiskState *s, 1581 uint64_t sector_num, uint32_t nb_sectors) 1582 { 1583 /* 1584 * The first line tests that no overflow happens when computing the last 1585 * sector. The second line tests that the last accessed sector is in 1586 * range. 1587 * 1588 * Careful, the computations should not underflow for nb_sectors == 0, 1589 * and a 0-block read to the first LBA beyond the end of device is 1590 * valid. 1591 */ 1592 return (sector_num <= sector_num + nb_sectors && 1593 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1594 } 1595 1596 typedef struct UnmapCBData { 1597 SCSIDiskReq *r; 1598 uint8_t *inbuf; 1599 int count; 1600 } UnmapCBData; 1601 1602 static void scsi_unmap_complete(void *opaque, int ret); 1603 1604 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1605 { 1606 SCSIDiskReq *r = data->r; 1607 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1608 uint64_t sector_num; 1609 uint32_t nb_sectors; 1610 1611 assert(r->req.aiocb == NULL); 1612 if (scsi_disk_req_check_error(r, ret, false)) { 1613 goto done; 1614 } 1615 1616 if (data->count > 0) { 1617 sector_num = ldq_be_p(&data->inbuf[0]); 1618 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1619 if (!check_lba_range(s, sector_num, nb_sectors)) { 1620 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1621 goto done; 1622 } 1623 1624 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1625 sector_num * s->qdev.blocksize, 1626 nb_sectors * s->qdev.blocksize, 1627 scsi_unmap_complete, data); 1628 data->count--; 1629 data->inbuf += 16; 1630 return; 1631 } 1632 1633 scsi_req_complete(&r->req, GOOD); 1634 1635 done: 1636 scsi_req_unref(&r->req); 1637 g_free(data); 1638 } 1639 1640 static void scsi_unmap_complete(void *opaque, int ret) 1641 { 1642 UnmapCBData *data = opaque; 1643 SCSIDiskReq *r = data->r; 1644 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1645 1646 assert(r->req.aiocb != NULL); 1647 r->req.aiocb = NULL; 1648 1649 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1650 scsi_unmap_complete_noio(data, ret); 1651 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1652 } 1653 1654 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1655 { 1656 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1657 uint8_t *p = inbuf; 1658 int len = r->req.cmd.xfer; 1659 UnmapCBData *data; 1660 1661 /* Reject ANCHOR=1. */ 1662 if (r->req.cmd.buf[1] & 0x1) { 1663 goto invalid_field; 1664 } 1665 1666 if (len < 8) { 1667 goto invalid_param_len; 1668 } 1669 if (len < lduw_be_p(&p[0]) + 2) { 1670 goto invalid_param_len; 1671 } 1672 if (len < lduw_be_p(&p[2]) + 8) { 1673 goto invalid_param_len; 1674 } 1675 if (lduw_be_p(&p[2]) & 15) { 1676 goto invalid_param_len; 1677 } 1678 1679 if (blk_is_read_only(s->qdev.conf.blk)) { 1680 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1681 return; 1682 } 1683 1684 data = g_new0(UnmapCBData, 1); 1685 data->r = r; 1686 data->inbuf = &p[8]; 1687 data->count = lduw_be_p(&p[2]) >> 4; 1688 1689 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1690 scsi_req_ref(&r->req); 1691 scsi_unmap_complete_noio(data, 0); 1692 return; 1693 1694 invalid_param_len: 1695 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1696 return; 1697 1698 invalid_field: 1699 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1700 } 1701 1702 typedef struct WriteSameCBData { 1703 SCSIDiskReq *r; 1704 int64_t sector; 1705 int nb_sectors; 1706 QEMUIOVector qiov; 1707 struct iovec iov; 1708 } WriteSameCBData; 1709 1710 static void scsi_write_same_complete(void *opaque, int ret) 1711 { 1712 WriteSameCBData *data = opaque; 1713 SCSIDiskReq *r = data->r; 1714 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1715 1716 assert(r->req.aiocb != NULL); 1717 r->req.aiocb = NULL; 1718 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1719 if (scsi_disk_req_check_error(r, ret, true)) { 1720 goto done; 1721 } 1722 1723 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1724 1725 data->nb_sectors -= data->iov.iov_len / 512; 1726 data->sector += data->iov.iov_len / 512; 1727 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1728 if (data->iov.iov_len) { 1729 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1730 data->iov.iov_len, BLOCK_ACCT_WRITE); 1731 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1732 * where final qiov may need smaller size */ 1733 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1734 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1735 data->sector << BDRV_SECTOR_BITS, 1736 &data->qiov, 0, 1737 scsi_write_same_complete, data); 1738 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1739 return; 1740 } 1741 1742 scsi_req_complete(&r->req, GOOD); 1743 1744 done: 1745 scsi_req_unref(&r->req); 1746 qemu_vfree(data->iov.iov_base); 1747 g_free(data); 1748 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1749 } 1750 1751 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1752 { 1753 SCSIRequest *req = &r->req; 1754 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1755 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1756 WriteSameCBData *data; 1757 uint8_t *buf; 1758 int i; 1759 1760 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1761 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1762 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1763 return; 1764 } 1765 1766 if (blk_is_read_only(s->qdev.conf.blk)) { 1767 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1768 return; 1769 } 1770 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1771 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1772 return; 1773 } 1774 1775 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1776 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1777 1778 /* The request is used as the AIO opaque value, so add a ref. */ 1779 scsi_req_ref(&r->req); 1780 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1781 nb_sectors * s->qdev.blocksize, 1782 BLOCK_ACCT_WRITE); 1783 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1784 r->req.cmd.lba * s->qdev.blocksize, 1785 nb_sectors * s->qdev.blocksize, 1786 flags, scsi_aio_complete, r); 1787 return; 1788 } 1789 1790 data = g_new0(WriteSameCBData, 1); 1791 data->r = r; 1792 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1793 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1794 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1795 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1796 data->iov.iov_len); 1797 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1798 1799 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1800 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1801 } 1802 1803 scsi_req_ref(&r->req); 1804 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1805 data->iov.iov_len, BLOCK_ACCT_WRITE); 1806 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1807 data->sector << BDRV_SECTOR_BITS, 1808 &data->qiov, 0, 1809 scsi_write_same_complete, data); 1810 } 1811 1812 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1813 { 1814 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1815 1816 if (r->iov.iov_len) { 1817 int buflen = r->iov.iov_len; 1818 trace_scsi_disk_emulate_write_data(buflen); 1819 r->iov.iov_len = 0; 1820 scsi_req_data(&r->req, buflen); 1821 return; 1822 } 1823 1824 switch (req->cmd.buf[0]) { 1825 case MODE_SELECT: 1826 case MODE_SELECT_10: 1827 /* This also clears the sense buffer for REQUEST SENSE. */ 1828 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1829 break; 1830 1831 case UNMAP: 1832 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1833 break; 1834 1835 case VERIFY_10: 1836 case VERIFY_12: 1837 case VERIFY_16: 1838 if (r->req.status == -1) { 1839 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1840 } 1841 break; 1842 1843 case WRITE_SAME_10: 1844 case WRITE_SAME_16: 1845 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1846 break; 1847 1848 default: 1849 abort(); 1850 } 1851 } 1852 1853 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1854 { 1855 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1856 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1857 uint64_t nb_sectors; 1858 uint8_t *outbuf; 1859 int buflen; 1860 1861 switch (req->cmd.buf[0]) { 1862 case INQUIRY: 1863 case MODE_SENSE: 1864 case MODE_SENSE_10: 1865 case RESERVE: 1866 case RESERVE_10: 1867 case RELEASE: 1868 case RELEASE_10: 1869 case START_STOP: 1870 case ALLOW_MEDIUM_REMOVAL: 1871 case GET_CONFIGURATION: 1872 case GET_EVENT_STATUS_NOTIFICATION: 1873 case MECHANISM_STATUS: 1874 case REQUEST_SENSE: 1875 break; 1876 1877 default: 1878 if (!blk_is_available(s->qdev.conf.blk)) { 1879 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1880 return 0; 1881 } 1882 break; 1883 } 1884 1885 /* 1886 * FIXME: we shouldn't return anything bigger than 4k, but the code 1887 * requires the buffer to be as big as req->cmd.xfer in several 1888 * places. So, do not allow CDBs with a very large ALLOCATION 1889 * LENGTH. The real fix would be to modify scsi_read_data and 1890 * dma_buf_read, so that they return data beyond the buflen 1891 * as all zeros. 1892 */ 1893 if (req->cmd.xfer > 65536) { 1894 goto illegal_request; 1895 } 1896 r->buflen = MAX(4096, req->cmd.xfer); 1897 1898 if (!r->iov.iov_base) { 1899 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1900 } 1901 1902 buflen = req->cmd.xfer; 1903 outbuf = r->iov.iov_base; 1904 memset(outbuf, 0, r->buflen); 1905 switch (req->cmd.buf[0]) { 1906 case TEST_UNIT_READY: 1907 assert(blk_is_available(s->qdev.conf.blk)); 1908 break; 1909 case INQUIRY: 1910 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1911 if (buflen < 0) { 1912 goto illegal_request; 1913 } 1914 break; 1915 case MODE_SENSE: 1916 case MODE_SENSE_10: 1917 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1918 if (buflen < 0) { 1919 goto illegal_request; 1920 } 1921 break; 1922 case READ_TOC: 1923 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1924 if (buflen < 0) { 1925 goto illegal_request; 1926 } 1927 break; 1928 case RESERVE: 1929 if (req->cmd.buf[1] & 1) { 1930 goto illegal_request; 1931 } 1932 break; 1933 case RESERVE_10: 1934 if (req->cmd.buf[1] & 3) { 1935 goto illegal_request; 1936 } 1937 break; 1938 case RELEASE: 1939 if (req->cmd.buf[1] & 1) { 1940 goto illegal_request; 1941 } 1942 break; 1943 case RELEASE_10: 1944 if (req->cmd.buf[1] & 3) { 1945 goto illegal_request; 1946 } 1947 break; 1948 case START_STOP: 1949 if (scsi_disk_emulate_start_stop(r) < 0) { 1950 return 0; 1951 } 1952 break; 1953 case ALLOW_MEDIUM_REMOVAL: 1954 s->tray_locked = req->cmd.buf[4] & 1; 1955 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1956 break; 1957 case READ_CAPACITY_10: 1958 /* The normal LEN field for this command is zero. */ 1959 memset(outbuf, 0, 8); 1960 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1961 if (!nb_sectors) { 1962 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1963 return 0; 1964 } 1965 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1966 goto illegal_request; 1967 } 1968 nb_sectors /= s->qdev.blocksize / 512; 1969 /* Returned value is the address of the last sector. */ 1970 nb_sectors--; 1971 /* Remember the new size for read/write sanity checking. */ 1972 s->qdev.max_lba = nb_sectors; 1973 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 1974 if (nb_sectors > UINT32_MAX) { 1975 nb_sectors = UINT32_MAX; 1976 } 1977 outbuf[0] = (nb_sectors >> 24) & 0xff; 1978 outbuf[1] = (nb_sectors >> 16) & 0xff; 1979 outbuf[2] = (nb_sectors >> 8) & 0xff; 1980 outbuf[3] = nb_sectors & 0xff; 1981 outbuf[4] = 0; 1982 outbuf[5] = 0; 1983 outbuf[6] = s->qdev.blocksize >> 8; 1984 outbuf[7] = 0; 1985 break; 1986 case REQUEST_SENSE: 1987 /* Just return "NO SENSE". */ 1988 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 1989 (req->cmd.buf[1] & 1) == 0); 1990 if (buflen < 0) { 1991 goto illegal_request; 1992 } 1993 break; 1994 case MECHANISM_STATUS: 1995 buflen = scsi_emulate_mechanism_status(s, outbuf); 1996 if (buflen < 0) { 1997 goto illegal_request; 1998 } 1999 break; 2000 case GET_CONFIGURATION: 2001 buflen = scsi_get_configuration(s, outbuf); 2002 if (buflen < 0) { 2003 goto illegal_request; 2004 } 2005 break; 2006 case GET_EVENT_STATUS_NOTIFICATION: 2007 buflen = scsi_get_event_status_notification(s, r, outbuf); 2008 if (buflen < 0) { 2009 goto illegal_request; 2010 } 2011 break; 2012 case READ_DISC_INFORMATION: 2013 buflen = scsi_read_disc_information(s, r, outbuf); 2014 if (buflen < 0) { 2015 goto illegal_request; 2016 } 2017 break; 2018 case READ_DVD_STRUCTURE: 2019 buflen = scsi_read_dvd_structure(s, r, outbuf); 2020 if (buflen < 0) { 2021 goto illegal_request; 2022 } 2023 break; 2024 case SERVICE_ACTION_IN_16: 2025 /* Service Action In subcommands. */ 2026 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2027 trace_scsi_disk_emulate_command_SAI_16(); 2028 memset(outbuf, 0, req->cmd.xfer); 2029 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2030 if (!nb_sectors) { 2031 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2032 return 0; 2033 } 2034 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2035 goto illegal_request; 2036 } 2037 nb_sectors /= s->qdev.blocksize / 512; 2038 /* Returned value is the address of the last sector. */ 2039 nb_sectors--; 2040 /* Remember the new size for read/write sanity checking. */ 2041 s->qdev.max_lba = nb_sectors; 2042 outbuf[0] = (nb_sectors >> 56) & 0xff; 2043 outbuf[1] = (nb_sectors >> 48) & 0xff; 2044 outbuf[2] = (nb_sectors >> 40) & 0xff; 2045 outbuf[3] = (nb_sectors >> 32) & 0xff; 2046 outbuf[4] = (nb_sectors >> 24) & 0xff; 2047 outbuf[5] = (nb_sectors >> 16) & 0xff; 2048 outbuf[6] = (nb_sectors >> 8) & 0xff; 2049 outbuf[7] = nb_sectors & 0xff; 2050 outbuf[8] = 0; 2051 outbuf[9] = 0; 2052 outbuf[10] = s->qdev.blocksize >> 8; 2053 outbuf[11] = 0; 2054 outbuf[12] = 0; 2055 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2056 2057 /* set TPE bit if the format supports discard */ 2058 if (s->qdev.conf.discard_granularity) { 2059 outbuf[14] = 0x80; 2060 } 2061 2062 /* Protection, exponent and lowest lba field left blank. */ 2063 break; 2064 } 2065 trace_scsi_disk_emulate_command_SAI_unsupported(); 2066 goto illegal_request; 2067 case SYNCHRONIZE_CACHE: 2068 /* The request is used as the AIO opaque value, so add a ref. */ 2069 scsi_req_ref(&r->req); 2070 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2071 BLOCK_ACCT_FLUSH); 2072 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2073 return 0; 2074 case SEEK_10: 2075 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); 2076 if (r->req.cmd.lba > s->qdev.max_lba) { 2077 goto illegal_lba; 2078 } 2079 break; 2080 case MODE_SELECT: 2081 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); 2082 break; 2083 case MODE_SELECT_10: 2084 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 2085 break; 2086 case UNMAP: 2087 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); 2088 break; 2089 case VERIFY_10: 2090 case VERIFY_12: 2091 case VERIFY_16: 2092 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 2093 if (req->cmd.buf[1] & 6) { 2094 goto illegal_request; 2095 } 2096 break; 2097 case WRITE_SAME_10: 2098 case WRITE_SAME_16: 2099 trace_scsi_disk_emulate_command_WRITE_SAME( 2100 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); 2101 break; 2102 default: 2103 trace_scsi_disk_emulate_command_UNKNOWN(buf[0], 2104 scsi_command_name(buf[0])); 2105 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2106 return 0; 2107 } 2108 assert(!r->req.aiocb); 2109 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2110 if (r->iov.iov_len == 0) { 2111 scsi_req_complete(&r->req, GOOD); 2112 } 2113 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2114 assert(r->iov.iov_len == req->cmd.xfer); 2115 return -r->iov.iov_len; 2116 } else { 2117 return r->iov.iov_len; 2118 } 2119 2120 illegal_request: 2121 if (r->req.status == -1) { 2122 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2123 } 2124 return 0; 2125 2126 illegal_lba: 2127 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2128 return 0; 2129 } 2130 2131 /* Execute a scsi command. Returns the length of the data expected by the 2132 command. This will be Positive for data transfers from the device 2133 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2134 and zero if the command does not transfer any data. */ 2135 2136 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2137 { 2138 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2139 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2140 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2141 uint32_t len; 2142 uint8_t command; 2143 2144 command = buf[0]; 2145 2146 if (!blk_is_available(s->qdev.conf.blk)) { 2147 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2148 return 0; 2149 } 2150 2151 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2152 switch (command) { 2153 case READ_6: 2154 case READ_10: 2155 case READ_12: 2156 case READ_16: 2157 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); 2158 /* Protection information is not supported. For SCSI versions 2 and 2159 * older (as determined by snooping the guest's INQUIRY commands), 2160 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2161 */ 2162 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2163 goto illegal_request; 2164 } 2165 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2166 goto illegal_lba; 2167 } 2168 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2169 r->sector_count = len * (s->qdev.blocksize / 512); 2170 break; 2171 case WRITE_6: 2172 case WRITE_10: 2173 case WRITE_12: 2174 case WRITE_16: 2175 case WRITE_VERIFY_10: 2176 case WRITE_VERIFY_12: 2177 case WRITE_VERIFY_16: 2178 if (blk_is_read_only(s->qdev.conf.blk)) { 2179 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2180 return 0; 2181 } 2182 trace_scsi_disk_dma_command_WRITE( 2183 (command & 0xe) == 0xe ? "And Verify " : "", 2184 r->req.cmd.lba, len); 2185 /* fall through */ 2186 case VERIFY_10: 2187 case VERIFY_12: 2188 case VERIFY_16: 2189 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2190 * As far as DMA is concerned, we can treat it the same as a write; 2191 * scsi_block_do_sgio will send VERIFY commands. 2192 */ 2193 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2194 goto illegal_request; 2195 } 2196 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2197 goto illegal_lba; 2198 } 2199 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2200 r->sector_count = len * (s->qdev.blocksize / 512); 2201 break; 2202 default: 2203 abort(); 2204 illegal_request: 2205 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2206 return 0; 2207 illegal_lba: 2208 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2209 return 0; 2210 } 2211 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2212 if (r->sector_count == 0) { 2213 scsi_req_complete(&r->req, GOOD); 2214 } 2215 assert(r->iov.iov_len == 0); 2216 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2217 return -r->sector_count * 512; 2218 } else { 2219 return r->sector_count * 512; 2220 } 2221 } 2222 2223 static void scsi_disk_reset(DeviceState *dev) 2224 { 2225 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2226 uint64_t nb_sectors; 2227 2228 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2229 2230 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2231 nb_sectors /= s->qdev.blocksize / 512; 2232 if (nb_sectors) { 2233 nb_sectors--; 2234 } 2235 s->qdev.max_lba = nb_sectors; 2236 /* reset tray statuses */ 2237 s->tray_locked = 0; 2238 s->tray_open = 0; 2239 2240 s->qdev.scsi_version = s->qdev.default_scsi_version; 2241 } 2242 2243 static void scsi_disk_resize_cb(void *opaque) 2244 { 2245 SCSIDiskState *s = opaque; 2246 2247 /* SPC lists this sense code as available only for 2248 * direct-access devices. 2249 */ 2250 if (s->qdev.type == TYPE_DISK) { 2251 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2252 } 2253 } 2254 2255 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2256 { 2257 SCSIDiskState *s = opaque; 2258 2259 /* 2260 * When a CD gets changed, we have to report an ejected state and 2261 * then a loaded state to guests so that they detect tray 2262 * open/close and media change events. Guests that do not use 2263 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2264 * states rely on this behavior. 2265 * 2266 * media_changed governs the state machine used for unit attention 2267 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2268 */ 2269 s->media_changed = load; 2270 s->tray_open = !load; 2271 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2272 s->media_event = true; 2273 s->eject_request = false; 2274 } 2275 2276 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2277 { 2278 SCSIDiskState *s = opaque; 2279 2280 s->eject_request = true; 2281 if (force) { 2282 s->tray_locked = false; 2283 } 2284 } 2285 2286 static bool scsi_cd_is_tray_open(void *opaque) 2287 { 2288 return ((SCSIDiskState *)opaque)->tray_open; 2289 } 2290 2291 static bool scsi_cd_is_medium_locked(void *opaque) 2292 { 2293 return ((SCSIDiskState *)opaque)->tray_locked; 2294 } 2295 2296 static const BlockDevOps scsi_disk_removable_block_ops = { 2297 .change_media_cb = scsi_cd_change_media_cb, 2298 .eject_request_cb = scsi_cd_eject_request_cb, 2299 .is_tray_open = scsi_cd_is_tray_open, 2300 .is_medium_locked = scsi_cd_is_medium_locked, 2301 2302 .resize_cb = scsi_disk_resize_cb, 2303 }; 2304 2305 static const BlockDevOps scsi_disk_block_ops = { 2306 .resize_cb = scsi_disk_resize_cb, 2307 }; 2308 2309 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2310 { 2311 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2312 if (s->media_changed) { 2313 s->media_changed = false; 2314 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2315 } 2316 } 2317 2318 static void scsi_realize(SCSIDevice *dev, Error **errp) 2319 { 2320 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2321 bool read_only; 2322 2323 if (!s->qdev.conf.blk) { 2324 error_setg(errp, "drive property not set"); 2325 return; 2326 } 2327 2328 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2329 !blk_is_inserted(s->qdev.conf.blk)) { 2330 error_setg(errp, "Device needs media, but drive is empty"); 2331 return; 2332 } 2333 2334 blkconf_blocksizes(&s->qdev.conf); 2335 2336 if (s->qdev.conf.logical_block_size > 2337 s->qdev.conf.physical_block_size) { 2338 error_setg(errp, 2339 "logical_block_size > physical_block_size not supported"); 2340 return; 2341 } 2342 2343 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && 2344 !s->qdev.hba_supports_iothread) 2345 { 2346 error_setg(errp, "HBA does not support iothreads"); 2347 return; 2348 } 2349 2350 if (dev->type == TYPE_DISK) { 2351 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2352 return; 2353 } 2354 } 2355 2356 read_only = blk_is_read_only(s->qdev.conf.blk); 2357 if (dev->type == TYPE_ROM) { 2358 read_only = true; 2359 } 2360 2361 if (!blkconf_apply_backend_options(&dev->conf, read_only, 2362 dev->type == TYPE_DISK, errp)) { 2363 return; 2364 } 2365 2366 if (s->qdev.conf.discard_granularity == -1) { 2367 s->qdev.conf.discard_granularity = 2368 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2369 } 2370 2371 if (!s->version) { 2372 s->version = g_strdup(qemu_hw_version()); 2373 } 2374 if (!s->vendor) { 2375 s->vendor = g_strdup("QEMU"); 2376 } 2377 if (!s->device_id) { 2378 if (s->serial) { 2379 s->device_id = g_strdup_printf("%.20s", s->serial); 2380 } else { 2381 const char *str = blk_name(s->qdev.conf.blk); 2382 if (str && *str) { 2383 s->device_id = g_strdup(str); 2384 } 2385 } 2386 } 2387 2388 if (blk_is_sg(s->qdev.conf.blk)) { 2389 error_setg(errp, "unwanted /dev/sg*"); 2390 return; 2391 } 2392 2393 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2394 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2395 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2396 } else { 2397 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2398 } 2399 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2400 2401 blk_iostatus_enable(s->qdev.conf.blk); 2402 } 2403 2404 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2405 { 2406 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2407 AioContext *ctx = NULL; 2408 /* can happen for devices without drive. The error message for missing 2409 * backend will be issued in scsi_realize 2410 */ 2411 if (s->qdev.conf.blk) { 2412 ctx = blk_get_aio_context(s->qdev.conf.blk); 2413 aio_context_acquire(ctx); 2414 blkconf_blocksizes(&s->qdev.conf); 2415 } 2416 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2417 s->qdev.type = TYPE_DISK; 2418 if (!s->product) { 2419 s->product = g_strdup("QEMU HARDDISK"); 2420 } 2421 scsi_realize(&s->qdev, errp); 2422 if (ctx) { 2423 aio_context_release(ctx); 2424 } 2425 } 2426 2427 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2428 { 2429 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2430 AioContext *ctx; 2431 int ret; 2432 2433 if (!dev->conf.blk) { 2434 /* Anonymous BlockBackend for an empty drive. As we put it into 2435 * dev->conf, qdev takes care of detaching on unplug. */ 2436 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 2437 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2438 assert(ret == 0); 2439 } 2440 2441 ctx = blk_get_aio_context(dev->conf.blk); 2442 aio_context_acquire(ctx); 2443 s->qdev.blocksize = 2048; 2444 s->qdev.type = TYPE_ROM; 2445 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2446 if (!s->product) { 2447 s->product = g_strdup("QEMU CD-ROM"); 2448 } 2449 scsi_realize(&s->qdev, errp); 2450 aio_context_release(ctx); 2451 } 2452 2453 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2454 { 2455 DriveInfo *dinfo; 2456 Error *local_err = NULL; 2457 2458 if (!dev->conf.blk) { 2459 scsi_realize(dev, &local_err); 2460 assert(local_err); 2461 error_propagate(errp, local_err); 2462 return; 2463 } 2464 2465 dinfo = blk_legacy_dinfo(dev->conf.blk); 2466 if (dinfo && dinfo->media_cd) { 2467 scsi_cd_realize(dev, errp); 2468 } else { 2469 scsi_hd_realize(dev, errp); 2470 } 2471 } 2472 2473 static const SCSIReqOps scsi_disk_emulate_reqops = { 2474 .size = sizeof(SCSIDiskReq), 2475 .free_req = scsi_free_request, 2476 .send_command = scsi_disk_emulate_command, 2477 .read_data = scsi_disk_emulate_read_data, 2478 .write_data = scsi_disk_emulate_write_data, 2479 .get_buf = scsi_get_buf, 2480 }; 2481 2482 static const SCSIReqOps scsi_disk_dma_reqops = { 2483 .size = sizeof(SCSIDiskReq), 2484 .free_req = scsi_free_request, 2485 .send_command = scsi_disk_dma_command, 2486 .read_data = scsi_read_data, 2487 .write_data = scsi_write_data, 2488 .get_buf = scsi_get_buf, 2489 .load_request = scsi_disk_load_request, 2490 .save_request = scsi_disk_save_request, 2491 }; 2492 2493 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2494 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2495 [INQUIRY] = &scsi_disk_emulate_reqops, 2496 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2497 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2498 [START_STOP] = &scsi_disk_emulate_reqops, 2499 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2500 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2501 [READ_TOC] = &scsi_disk_emulate_reqops, 2502 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2503 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2504 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2505 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2506 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2507 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2508 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2509 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2510 [SEEK_10] = &scsi_disk_emulate_reqops, 2511 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2512 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2513 [UNMAP] = &scsi_disk_emulate_reqops, 2514 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2515 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2516 [VERIFY_10] = &scsi_disk_emulate_reqops, 2517 [VERIFY_12] = &scsi_disk_emulate_reqops, 2518 [VERIFY_16] = &scsi_disk_emulate_reqops, 2519 2520 [READ_6] = &scsi_disk_dma_reqops, 2521 [READ_10] = &scsi_disk_dma_reqops, 2522 [READ_12] = &scsi_disk_dma_reqops, 2523 [READ_16] = &scsi_disk_dma_reqops, 2524 [WRITE_6] = &scsi_disk_dma_reqops, 2525 [WRITE_10] = &scsi_disk_dma_reqops, 2526 [WRITE_12] = &scsi_disk_dma_reqops, 2527 [WRITE_16] = &scsi_disk_dma_reqops, 2528 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2529 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2530 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2531 }; 2532 2533 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) 2534 { 2535 int i; 2536 int len = scsi_cdb_length(buf); 2537 char *line_buffer, *p; 2538 2539 line_buffer = g_malloc(len * 5 + 1); 2540 2541 for (i = 0, p = line_buffer; i < len; i++) { 2542 p += sprintf(p, " 0x%02x", buf[i]); 2543 } 2544 trace_scsi_disk_new_request(lun, tag, line_buffer); 2545 2546 g_free(line_buffer); 2547 } 2548 2549 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2550 uint8_t *buf, void *hba_private) 2551 { 2552 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2553 SCSIRequest *req; 2554 const SCSIReqOps *ops; 2555 uint8_t command; 2556 2557 command = buf[0]; 2558 ops = scsi_disk_reqops_dispatch[command]; 2559 if (!ops) { 2560 ops = &scsi_disk_emulate_reqops; 2561 } 2562 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2563 2564 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { 2565 scsi_disk_new_request_dump(lun, tag, buf); 2566 } 2567 2568 return req; 2569 } 2570 2571 #ifdef __linux__ 2572 static int get_device_type(SCSIDiskState *s) 2573 { 2574 uint8_t cmd[16]; 2575 uint8_t buf[36]; 2576 int ret; 2577 2578 memset(cmd, 0, sizeof(cmd)); 2579 memset(buf, 0, sizeof(buf)); 2580 cmd[0] = INQUIRY; 2581 cmd[4] = sizeof(buf); 2582 2583 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2584 buf, sizeof(buf)); 2585 if (ret < 0) { 2586 return -1; 2587 } 2588 s->qdev.type = buf[0]; 2589 if (buf[1] & 0x80) { 2590 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2591 } 2592 return 0; 2593 } 2594 2595 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2596 { 2597 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2598 AioContext *ctx; 2599 int sg_version; 2600 int rc; 2601 2602 if (!s->qdev.conf.blk) { 2603 error_setg(errp, "drive property not set"); 2604 return; 2605 } 2606 2607 if (s->rotation_rate) { 2608 error_report_once("rotation_rate is specified for scsi-block but is " 2609 "not implemented. This option is deprecated and will " 2610 "be removed in a future version"); 2611 } 2612 2613 ctx = blk_get_aio_context(s->qdev.conf.blk); 2614 aio_context_acquire(ctx); 2615 2616 /* check we are using a driver managing SG_IO (version 3 and after) */ 2617 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2618 if (rc < 0) { 2619 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2620 if (rc != -EPERM) { 2621 error_append_hint(errp, "Is this a SCSI device?\n"); 2622 } 2623 goto out; 2624 } 2625 if (sg_version < 30000) { 2626 error_setg(errp, "scsi generic interface too old"); 2627 goto out; 2628 } 2629 2630 /* get device type from INQUIRY data */ 2631 rc = get_device_type(s); 2632 if (rc < 0) { 2633 error_setg(errp, "INQUIRY failed"); 2634 goto out; 2635 } 2636 2637 /* Make a guess for the block size, we'll fix it when the guest sends. 2638 * READ CAPACITY. If they don't, they likely would assume these sizes 2639 * anyway. (TODO: check in /sys). 2640 */ 2641 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2642 s->qdev.blocksize = 2048; 2643 } else { 2644 s->qdev.blocksize = 512; 2645 } 2646 2647 /* Makes the scsi-block device not removable by using HMP and QMP eject 2648 * command. 2649 */ 2650 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2651 2652 scsi_realize(&s->qdev, errp); 2653 scsi_generic_read_device_inquiry(&s->qdev); 2654 2655 out: 2656 aio_context_release(ctx); 2657 } 2658 2659 typedef struct SCSIBlockReq { 2660 SCSIDiskReq req; 2661 sg_io_hdr_t io_header; 2662 2663 /* Selected bytes of the original CDB, copied into our own CDB. */ 2664 uint8_t cmd, cdb1, group_number; 2665 2666 /* CDB passed to SG_IO. */ 2667 uint8_t cdb[16]; 2668 } SCSIBlockReq; 2669 2670 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2671 int64_t offset, QEMUIOVector *iov, 2672 int direction, 2673 BlockCompletionFunc *cb, void *opaque) 2674 { 2675 sg_io_hdr_t *io_header = &req->io_header; 2676 SCSIDiskReq *r = &req->req; 2677 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2678 int nb_logical_blocks; 2679 uint64_t lba; 2680 BlockAIOCB *aiocb; 2681 2682 /* This is not supported yet. It can only happen if the guest does 2683 * reads and writes that are not aligned to one logical sectors 2684 * _and_ cover multiple MemoryRegions. 2685 */ 2686 assert(offset % s->qdev.blocksize == 0); 2687 assert(iov->size % s->qdev.blocksize == 0); 2688 2689 io_header->interface_id = 'S'; 2690 2691 /* The data transfer comes from the QEMUIOVector. */ 2692 io_header->dxfer_direction = direction; 2693 io_header->dxfer_len = iov->size; 2694 io_header->dxferp = (void *)iov->iov; 2695 io_header->iovec_count = iov->niov; 2696 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2697 2698 /* Build a new CDB with the LBA and length patched in, in case 2699 * DMA helpers split the transfer in multiple segments. Do not 2700 * build a CDB smaller than what the guest wanted, and only build 2701 * a larger one if strictly necessary. 2702 */ 2703 io_header->cmdp = req->cdb; 2704 lba = offset / s->qdev.blocksize; 2705 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2706 2707 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2708 /* 6-byte CDB */ 2709 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2710 req->cdb[4] = nb_logical_blocks; 2711 req->cdb[5] = 0; 2712 io_header->cmd_len = 6; 2713 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2714 /* 10-byte CDB */ 2715 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2716 req->cdb[1] = req->cdb1; 2717 stl_be_p(&req->cdb[2], lba); 2718 req->cdb[6] = req->group_number; 2719 stw_be_p(&req->cdb[7], nb_logical_blocks); 2720 req->cdb[9] = 0; 2721 io_header->cmd_len = 10; 2722 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2723 /* 12-byte CDB */ 2724 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2725 req->cdb[1] = req->cdb1; 2726 stl_be_p(&req->cdb[2], lba); 2727 stl_be_p(&req->cdb[6], nb_logical_blocks); 2728 req->cdb[10] = req->group_number; 2729 req->cdb[11] = 0; 2730 io_header->cmd_len = 12; 2731 } else { 2732 /* 16-byte CDB */ 2733 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2734 req->cdb[1] = req->cdb1; 2735 stq_be_p(&req->cdb[2], lba); 2736 stl_be_p(&req->cdb[10], nb_logical_blocks); 2737 req->cdb[14] = req->group_number; 2738 req->cdb[15] = 0; 2739 io_header->cmd_len = 16; 2740 } 2741 2742 /* The rest is as in scsi-generic.c. */ 2743 io_header->mx_sb_len = sizeof(r->req.sense); 2744 io_header->sbp = r->req.sense; 2745 io_header->timeout = UINT_MAX; 2746 io_header->usr_ptr = r; 2747 io_header->flags |= SG_FLAG_DIRECT_IO; 2748 2749 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2750 assert(aiocb != NULL); 2751 return aiocb; 2752 } 2753 2754 static bool scsi_block_no_fua(SCSICommand *cmd) 2755 { 2756 return false; 2757 } 2758 2759 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2760 QEMUIOVector *iov, 2761 BlockCompletionFunc *cb, void *cb_opaque, 2762 void *opaque) 2763 { 2764 SCSIBlockReq *r = opaque; 2765 return scsi_block_do_sgio(r, offset, iov, 2766 SG_DXFER_FROM_DEV, cb, cb_opaque); 2767 } 2768 2769 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2770 QEMUIOVector *iov, 2771 BlockCompletionFunc *cb, void *cb_opaque, 2772 void *opaque) 2773 { 2774 SCSIBlockReq *r = opaque; 2775 return scsi_block_do_sgio(r, offset, iov, 2776 SG_DXFER_TO_DEV, cb, cb_opaque); 2777 } 2778 2779 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2780 { 2781 switch (buf[0]) { 2782 case VERIFY_10: 2783 case VERIFY_12: 2784 case VERIFY_16: 2785 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2786 * for the number of logical blocks specified in the length 2787 * field). For other modes, do not use scatter/gather operation. 2788 */ 2789 if ((buf[1] & 6) == 2) { 2790 return false; 2791 } 2792 break; 2793 2794 case READ_6: 2795 case READ_10: 2796 case READ_12: 2797 case READ_16: 2798 case WRITE_6: 2799 case WRITE_10: 2800 case WRITE_12: 2801 case WRITE_16: 2802 case WRITE_VERIFY_10: 2803 case WRITE_VERIFY_12: 2804 case WRITE_VERIFY_16: 2805 /* MMC writing cannot be done via DMA helpers, because it sometimes 2806 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2807 * We might use scsi_block_dma_reqops as long as no writing commands are 2808 * seen, but performance usually isn't paramount on optical media. So, 2809 * just make scsi-block operate the same as scsi-generic for them. 2810 */ 2811 if (s->qdev.type != TYPE_ROM) { 2812 return false; 2813 } 2814 break; 2815 2816 default: 2817 break; 2818 } 2819 2820 return true; 2821 } 2822 2823 2824 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2825 { 2826 SCSIBlockReq *r = (SCSIBlockReq *)req; 2827 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2828 2829 r->cmd = req->cmd.buf[0]; 2830 switch (r->cmd >> 5) { 2831 case 0: 2832 /* 6-byte CDB. */ 2833 r->cdb1 = r->group_number = 0; 2834 break; 2835 case 1: 2836 /* 10-byte CDB. */ 2837 r->cdb1 = req->cmd.buf[1]; 2838 r->group_number = req->cmd.buf[6]; 2839 break; 2840 case 4: 2841 /* 12-byte CDB. */ 2842 r->cdb1 = req->cmd.buf[1]; 2843 r->group_number = req->cmd.buf[10]; 2844 break; 2845 case 5: 2846 /* 16-byte CDB. */ 2847 r->cdb1 = req->cmd.buf[1]; 2848 r->group_number = req->cmd.buf[14]; 2849 break; 2850 default: 2851 abort(); 2852 } 2853 2854 /* Protection information is not supported. For SCSI versions 2 and 2855 * older (as determined by snooping the guest's INQUIRY commands), 2856 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2857 */ 2858 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2859 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2860 return 0; 2861 } 2862 2863 r->req.status = &r->io_header.status; 2864 return scsi_disk_dma_command(req, buf); 2865 } 2866 2867 static const SCSIReqOps scsi_block_dma_reqops = { 2868 .size = sizeof(SCSIBlockReq), 2869 .free_req = scsi_free_request, 2870 .send_command = scsi_block_dma_command, 2871 .read_data = scsi_read_data, 2872 .write_data = scsi_write_data, 2873 .get_buf = scsi_get_buf, 2874 .load_request = scsi_disk_load_request, 2875 .save_request = scsi_disk_save_request, 2876 }; 2877 2878 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2879 uint32_t lun, uint8_t *buf, 2880 void *hba_private) 2881 { 2882 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2883 2884 if (scsi_block_is_passthrough(s, buf)) { 2885 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2886 hba_private); 2887 } else { 2888 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2889 hba_private); 2890 } 2891 } 2892 2893 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2894 uint8_t *buf, void *hba_private) 2895 { 2896 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2897 2898 if (scsi_block_is_passthrough(s, buf)) { 2899 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2900 } else { 2901 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2902 } 2903 } 2904 2905 static void scsi_block_update_sense(SCSIRequest *req) 2906 { 2907 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2908 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); 2909 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); 2910 } 2911 #endif 2912 2913 static 2914 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2915 BlockCompletionFunc *cb, void *cb_opaque, 2916 void *opaque) 2917 { 2918 SCSIDiskReq *r = opaque; 2919 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2920 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2921 } 2922 2923 static 2924 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2925 BlockCompletionFunc *cb, void *cb_opaque, 2926 void *opaque) 2927 { 2928 SCSIDiskReq *r = opaque; 2929 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2930 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2931 } 2932 2933 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2934 { 2935 DeviceClass *dc = DEVICE_CLASS(klass); 2936 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2937 2938 dc->fw_name = "disk"; 2939 dc->reset = scsi_disk_reset; 2940 sdc->dma_readv = scsi_dma_readv; 2941 sdc->dma_writev = scsi_dma_writev; 2942 sdc->need_fua_emulation = scsi_is_cmd_fua; 2943 } 2944 2945 static const TypeInfo scsi_disk_base_info = { 2946 .name = TYPE_SCSI_DISK_BASE, 2947 .parent = TYPE_SCSI_DEVICE, 2948 .class_init = scsi_disk_base_class_initfn, 2949 .instance_size = sizeof(SCSIDiskState), 2950 .class_size = sizeof(SCSIDiskClass), 2951 .abstract = true, 2952 }; 2953 2954 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2955 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ 2956 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ 2957 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2958 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2959 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2960 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2961 DEFINE_PROP_STRING("product", SCSIDiskState, product), \ 2962 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) 2963 2964 2965 static Property scsi_hd_properties[] = { 2966 DEFINE_SCSI_DISK_PROPERTIES(), 2967 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2968 SCSI_DISK_F_REMOVABLE, false), 2969 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2970 SCSI_DISK_F_DPOFUA, false), 2971 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2972 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2973 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2974 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2975 DEFAULT_MAX_UNMAP_SIZE), 2976 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2977 DEFAULT_MAX_IO_SIZE), 2978 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2979 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2980 5), 2981 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2982 DEFINE_PROP_END_OF_LIST(), 2983 }; 2984 2985 static const VMStateDescription vmstate_scsi_disk_state = { 2986 .name = "scsi-disk", 2987 .version_id = 1, 2988 .minimum_version_id = 1, 2989 .fields = (VMStateField[]) { 2990 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2991 VMSTATE_BOOL(media_changed, SCSIDiskState), 2992 VMSTATE_BOOL(media_event, SCSIDiskState), 2993 VMSTATE_BOOL(eject_request, SCSIDiskState), 2994 VMSTATE_BOOL(tray_open, SCSIDiskState), 2995 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2996 VMSTATE_END_OF_LIST() 2997 } 2998 }; 2999 3000 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 3001 { 3002 DeviceClass *dc = DEVICE_CLASS(klass); 3003 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3004 3005 sc->realize = scsi_hd_realize; 3006 sc->alloc_req = scsi_new_request; 3007 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3008 dc->desc = "virtual SCSI disk"; 3009 dc->props = scsi_hd_properties; 3010 dc->vmsd = &vmstate_scsi_disk_state; 3011 } 3012 3013 static const TypeInfo scsi_hd_info = { 3014 .name = "scsi-hd", 3015 .parent = TYPE_SCSI_DISK_BASE, 3016 .class_init = scsi_hd_class_initfn, 3017 }; 3018 3019 static Property scsi_cd_properties[] = { 3020 DEFINE_SCSI_DISK_PROPERTIES(), 3021 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3022 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3023 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3024 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3025 DEFAULT_MAX_IO_SIZE), 3026 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3027 5), 3028 DEFINE_PROP_END_OF_LIST(), 3029 }; 3030 3031 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3032 { 3033 DeviceClass *dc = DEVICE_CLASS(klass); 3034 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3035 3036 sc->realize = scsi_cd_realize; 3037 sc->alloc_req = scsi_new_request; 3038 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3039 dc->desc = "virtual SCSI CD-ROM"; 3040 dc->props = scsi_cd_properties; 3041 dc->vmsd = &vmstate_scsi_disk_state; 3042 } 3043 3044 static const TypeInfo scsi_cd_info = { 3045 .name = "scsi-cd", 3046 .parent = TYPE_SCSI_DISK_BASE, 3047 .class_init = scsi_cd_class_initfn, 3048 }; 3049 3050 #ifdef __linux__ 3051 static Property scsi_block_properties[] = { 3052 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3053 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3054 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3055 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3056 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3057 DEFAULT_MAX_UNMAP_SIZE), 3058 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3059 DEFAULT_MAX_IO_SIZE), 3060 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3061 -1), 3062 DEFINE_PROP_END_OF_LIST(), 3063 }; 3064 3065 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3066 { 3067 DeviceClass *dc = DEVICE_CLASS(klass); 3068 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3069 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3070 3071 sc->realize = scsi_block_realize; 3072 sc->alloc_req = scsi_block_new_request; 3073 sc->parse_cdb = scsi_block_parse_cdb; 3074 sdc->dma_readv = scsi_block_dma_readv; 3075 sdc->dma_writev = scsi_block_dma_writev; 3076 sdc->update_sense = scsi_block_update_sense; 3077 sdc->need_fua_emulation = scsi_block_no_fua; 3078 dc->desc = "SCSI block device passthrough"; 3079 dc->props = scsi_block_properties; 3080 dc->vmsd = &vmstate_scsi_disk_state; 3081 } 3082 3083 static const TypeInfo scsi_block_info = { 3084 .name = "scsi-block", 3085 .parent = TYPE_SCSI_DISK_BASE, 3086 .class_init = scsi_block_class_initfn, 3087 }; 3088 #endif 3089 3090 static Property scsi_disk_properties[] = { 3091 DEFINE_SCSI_DISK_PROPERTIES(), 3092 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3093 SCSI_DISK_F_REMOVABLE, false), 3094 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3095 SCSI_DISK_F_DPOFUA, false), 3096 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3097 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3098 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3099 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3100 DEFAULT_MAX_UNMAP_SIZE), 3101 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3102 DEFAULT_MAX_IO_SIZE), 3103 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3104 5), 3105 DEFINE_PROP_END_OF_LIST(), 3106 }; 3107 3108 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3109 { 3110 DeviceClass *dc = DEVICE_CLASS(klass); 3111 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3112 3113 sc->realize = scsi_disk_realize; 3114 sc->alloc_req = scsi_new_request; 3115 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3116 dc->fw_name = "disk"; 3117 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3118 dc->reset = scsi_disk_reset; 3119 dc->props = scsi_disk_properties; 3120 dc->vmsd = &vmstate_scsi_disk_state; 3121 } 3122 3123 static const TypeInfo scsi_disk_info = { 3124 .name = "scsi-disk", 3125 .parent = TYPE_SCSI_DISK_BASE, 3126 .class_init = scsi_disk_class_initfn, 3127 }; 3128 3129 static void scsi_disk_register_types(void) 3130 { 3131 type_register_static(&scsi_disk_base_info); 3132 type_register_static(&scsi_hd_info); 3133 type_register_static(&scsi_cd_info); 3134 #ifdef __linux__ 3135 type_register_static(&scsi_block_info); 3136 #endif 3137 type_register_static(&scsi_disk_info); 3138 } 3139 3140 type_init(scsi_disk_register_types) 3141