1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/units.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "qemu/main-loop.h" 27 #include "qemu/module.h" 28 #include "hw/scsi/scsi.h" 29 #include "migration/qemu-file-types.h" 30 #include "migration/vmstate.h" 31 #include "hw/scsi/emulation.h" 32 #include "scsi/constants.h" 33 #include "sysemu/sysemu.h" 34 #include "sysemu/block-backend.h" 35 #include "sysemu/blockdev.h" 36 #include "hw/block/block.h" 37 #include "sysemu/dma.h" 38 #include "qemu/cutils.h" 39 #include "trace.h" 40 41 #ifdef __linux 42 #include <scsi/sg.h> 43 #endif 44 45 #define SCSI_WRITE_SAME_MAX (512 * KiB) 46 #define SCSI_DMA_BUF_SIZE (128 * KiB) 47 #define SCSI_MAX_INQUIRY_LEN 256 48 #define SCSI_MAX_MODE_LEN 256 49 50 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 51 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 52 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 53 54 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 55 56 #define SCSI_DISK_BASE(obj) \ 57 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 58 #define SCSI_DISK_BASE_CLASS(klass) \ 59 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 60 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 61 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 62 63 typedef struct SCSIDiskClass { 64 SCSIDeviceClass parent_class; 65 DMAIOFunc *dma_readv; 66 DMAIOFunc *dma_writev; 67 bool (*need_fua_emulation)(SCSICommand *cmd); 68 void (*update_sense)(SCSIRequest *r); 69 } SCSIDiskClass; 70 71 typedef struct SCSIDiskReq { 72 SCSIRequest req; 73 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 74 uint64_t sector; 75 uint32_t sector_count; 76 uint32_t buflen; 77 bool started; 78 bool need_fua_emulation; 79 struct iovec iov; 80 QEMUIOVector qiov; 81 BlockAcctCookie acct; 82 unsigned char *status; 83 } SCSIDiskReq; 84 85 #define SCSI_DISK_F_REMOVABLE 0 86 #define SCSI_DISK_F_DPOFUA 1 87 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 88 89 typedef struct SCSIDiskState 90 { 91 SCSIDevice qdev; 92 uint32_t features; 93 bool media_changed; 94 bool media_event; 95 bool eject_request; 96 uint16_t port_index; 97 uint64_t max_unmap_size; 98 uint64_t max_io_size; 99 QEMUBH *bh; 100 char *version; 101 char *serial; 102 char *vendor; 103 char *product; 104 char *device_id; 105 bool tray_open; 106 bool tray_locked; 107 /* 108 * 0x0000 - rotation rate not reported 109 * 0x0001 - non-rotating medium (SSD) 110 * 0x0002-0x0400 - reserved 111 * 0x0401-0xffe - rotations per minute 112 * 0xffff - reserved 113 */ 114 uint16_t rotation_rate; 115 } SCSIDiskState; 116 117 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 118 119 static void scsi_free_request(SCSIRequest *req) 120 { 121 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 122 123 qemu_vfree(r->iov.iov_base); 124 } 125 126 /* Helper function for command completion with sense. */ 127 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 128 { 129 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, 130 sense.ascq); 131 scsi_req_build_sense(&r->req, sense); 132 scsi_req_complete(&r->req, CHECK_CONDITION); 133 } 134 135 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 136 { 137 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 138 139 if (!r->iov.iov_base) { 140 r->buflen = size; 141 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 142 } 143 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 144 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 145 } 146 147 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 148 { 149 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 150 151 qemu_put_be64s(f, &r->sector); 152 qemu_put_be32s(f, &r->sector_count); 153 qemu_put_be32s(f, &r->buflen); 154 if (r->buflen) { 155 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 156 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 157 } else if (!req->retry) { 158 uint32_t len = r->iov.iov_len; 159 qemu_put_be32s(f, &len); 160 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 161 } 162 } 163 } 164 165 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 166 { 167 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 168 169 qemu_get_be64s(f, &r->sector); 170 qemu_get_be32s(f, &r->sector_count); 171 qemu_get_be32s(f, &r->buflen); 172 if (r->buflen) { 173 scsi_init_iovec(r, r->buflen); 174 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 175 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 176 } else if (!r->req.retry) { 177 uint32_t len; 178 qemu_get_be32s(f, &len); 179 r->iov.iov_len = len; 180 assert(r->iov.iov_len <= r->buflen); 181 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 182 } 183 } 184 185 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 186 } 187 188 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 189 { 190 if (r->req.io_canceled) { 191 scsi_req_cancel_complete(&r->req); 192 return true; 193 } 194 195 if (ret < 0 || (r->status && *r->status)) { 196 return scsi_handle_rw_error(r, -ret, acct_failed); 197 } 198 199 return false; 200 } 201 202 static void scsi_aio_complete(void *opaque, int ret) 203 { 204 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 205 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 206 207 assert(r->req.aiocb != NULL); 208 r->req.aiocb = NULL; 209 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 210 if (scsi_disk_req_check_error(r, ret, true)) { 211 goto done; 212 } 213 214 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 215 scsi_req_complete(&r->req, GOOD); 216 217 done: 218 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 219 scsi_req_unref(&r->req); 220 } 221 222 static bool scsi_is_cmd_fua(SCSICommand *cmd) 223 { 224 switch (cmd->buf[0]) { 225 case READ_10: 226 case READ_12: 227 case READ_16: 228 case WRITE_10: 229 case WRITE_12: 230 case WRITE_16: 231 return (cmd->buf[1] & 8) != 0; 232 233 case VERIFY_10: 234 case VERIFY_12: 235 case VERIFY_16: 236 case WRITE_VERIFY_10: 237 case WRITE_VERIFY_12: 238 case WRITE_VERIFY_16: 239 return true; 240 241 case READ_6: 242 case WRITE_6: 243 default: 244 return false; 245 } 246 } 247 248 static void scsi_write_do_fua(SCSIDiskReq *r) 249 { 250 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 251 252 assert(r->req.aiocb == NULL); 253 assert(!r->req.io_canceled); 254 255 if (r->need_fua_emulation) { 256 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 257 BLOCK_ACCT_FLUSH); 258 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 259 return; 260 } 261 262 scsi_req_complete(&r->req, GOOD); 263 scsi_req_unref(&r->req); 264 } 265 266 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 267 { 268 assert(r->req.aiocb == NULL); 269 if (scsi_disk_req_check_error(r, ret, false)) { 270 goto done; 271 } 272 273 r->sector += r->sector_count; 274 r->sector_count = 0; 275 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 276 scsi_write_do_fua(r); 277 return; 278 } else { 279 scsi_req_complete(&r->req, GOOD); 280 } 281 282 done: 283 scsi_req_unref(&r->req); 284 } 285 286 static void scsi_dma_complete(void *opaque, int ret) 287 { 288 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 289 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 290 291 assert(r->req.aiocb != NULL); 292 r->req.aiocb = NULL; 293 294 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 295 if (ret < 0) { 296 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 297 } else { 298 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 299 } 300 scsi_dma_complete_noio(r, ret); 301 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 302 } 303 304 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) 305 { 306 uint32_t n; 307 308 assert(r->req.aiocb == NULL); 309 if (scsi_disk_req_check_error(r, ret, false)) { 310 goto done; 311 } 312 313 n = r->qiov.size / 512; 314 r->sector += n; 315 r->sector_count -= n; 316 scsi_req_data(&r->req, r->qiov.size); 317 318 done: 319 scsi_req_unref(&r->req); 320 } 321 322 static void scsi_read_complete(void *opaque, int ret) 323 { 324 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 325 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 326 327 assert(r->req.aiocb != NULL); 328 r->req.aiocb = NULL; 329 330 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 331 if (ret < 0) { 332 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 333 } else { 334 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 335 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); 336 } 337 scsi_read_complete_noio(r, ret); 338 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 339 } 340 341 /* Actually issue a read to the block device. */ 342 static void scsi_do_read(SCSIDiskReq *r, int ret) 343 { 344 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 345 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 346 347 assert (r->req.aiocb == NULL); 348 if (scsi_disk_req_check_error(r, ret, false)) { 349 goto done; 350 } 351 352 /* The request is used as the AIO opaque value, so add a ref. */ 353 scsi_req_ref(&r->req); 354 355 if (r->req.sg) { 356 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 357 r->req.resid -= r->req.sg->size; 358 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 359 r->req.sg, r->sector << BDRV_SECTOR_BITS, 360 BDRV_SECTOR_SIZE, 361 sdc->dma_readv, r, scsi_dma_complete, r, 362 DMA_DIRECTION_FROM_DEVICE); 363 } else { 364 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 365 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 366 r->qiov.size, BLOCK_ACCT_READ); 367 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 368 scsi_read_complete, r, r); 369 } 370 371 done: 372 scsi_req_unref(&r->req); 373 } 374 375 static void scsi_do_read_cb(void *opaque, int ret) 376 { 377 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 378 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 379 380 assert (r->req.aiocb != NULL); 381 r->req.aiocb = NULL; 382 383 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 384 if (ret < 0) { 385 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 386 } else { 387 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 388 } 389 scsi_do_read(opaque, ret); 390 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 391 } 392 393 /* Read more data from scsi device into buffer. */ 394 static void scsi_read_data(SCSIRequest *req) 395 { 396 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 397 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 398 bool first; 399 400 trace_scsi_disk_read_data_count(r->sector_count); 401 if (r->sector_count == 0) { 402 /* This also clears the sense buffer for REQUEST SENSE. */ 403 scsi_req_complete(&r->req, GOOD); 404 return; 405 } 406 407 /* No data transfer may already be in progress */ 408 assert(r->req.aiocb == NULL); 409 410 /* The request is used as the AIO opaque value, so add a ref. */ 411 scsi_req_ref(&r->req); 412 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 413 trace_scsi_disk_read_data_invalid(); 414 scsi_read_complete_noio(r, -EINVAL); 415 return; 416 } 417 418 if (!blk_is_available(req->dev->conf.blk)) { 419 scsi_read_complete_noio(r, -ENOMEDIUM); 420 return; 421 } 422 423 first = !r->started; 424 r->started = true; 425 if (first && r->need_fua_emulation) { 426 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 427 BLOCK_ACCT_FLUSH); 428 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 429 } else { 430 scsi_do_read(r, 0); 431 } 432 } 433 434 /* 435 * scsi_handle_rw_error has two return values. False means that the error 436 * must be ignored, true means that the error has been processed and the 437 * caller should not do anything else for this request. Note that 438 * scsi_handle_rw_error always manages its reference counts, independent 439 * of the return value. 440 */ 441 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 442 { 443 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 444 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 445 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 446 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 447 is_read, error); 448 449 if (action == BLOCK_ERROR_ACTION_REPORT) { 450 if (acct_failed) { 451 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 452 } 453 switch (error) { 454 case 0: 455 /* A passthrough command has run and has produced sense data; check 456 * whether the error has to be handled by the guest or should rather 457 * pause the host. 458 */ 459 assert(r->status && *r->status); 460 if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { 461 /* These errors are handled by guest. */ 462 sdc->update_sense(&r->req); 463 scsi_req_complete(&r->req, *r->status); 464 return true; 465 } 466 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 467 break; 468 case ENOMEDIUM: 469 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 470 break; 471 case ENOMEM: 472 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 473 break; 474 case EINVAL: 475 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 476 break; 477 case ENOSPC: 478 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 479 break; 480 default: 481 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 482 break; 483 } 484 } 485 486 blk_error_action(s->qdev.conf.blk, action, is_read, error); 487 if (action == BLOCK_ERROR_ACTION_IGNORE) { 488 scsi_req_complete(&r->req, 0); 489 return true; 490 } 491 492 if (action == BLOCK_ERROR_ACTION_STOP) { 493 scsi_req_retry(&r->req); 494 } 495 return true; 496 } 497 498 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 499 { 500 uint32_t n; 501 502 assert (r->req.aiocb == NULL); 503 if (scsi_disk_req_check_error(r, ret, false)) { 504 goto done; 505 } 506 507 n = r->qiov.size / 512; 508 r->sector += n; 509 r->sector_count -= n; 510 if (r->sector_count == 0) { 511 scsi_write_do_fua(r); 512 return; 513 } else { 514 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 515 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); 516 scsi_req_data(&r->req, r->qiov.size); 517 } 518 519 done: 520 scsi_req_unref(&r->req); 521 } 522 523 static void scsi_write_complete(void * opaque, int ret) 524 { 525 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 526 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 527 528 assert (r->req.aiocb != NULL); 529 r->req.aiocb = NULL; 530 531 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 532 if (ret < 0) { 533 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 534 } else { 535 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 536 } 537 scsi_write_complete_noio(r, ret); 538 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 539 } 540 541 static void scsi_write_data(SCSIRequest *req) 542 { 543 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 544 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 545 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 546 547 /* No data transfer may already be in progress */ 548 assert(r->req.aiocb == NULL); 549 550 /* The request is used as the AIO opaque value, so add a ref. */ 551 scsi_req_ref(&r->req); 552 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 553 trace_scsi_disk_write_data_invalid(); 554 scsi_write_complete_noio(r, -EINVAL); 555 return; 556 } 557 558 if (!r->req.sg && !r->qiov.size) { 559 /* Called for the first time. Ask the driver to send us more data. */ 560 r->started = true; 561 scsi_write_complete_noio(r, 0); 562 return; 563 } 564 if (!blk_is_available(req->dev->conf.blk)) { 565 scsi_write_complete_noio(r, -ENOMEDIUM); 566 return; 567 } 568 569 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 570 r->req.cmd.buf[0] == VERIFY_16) { 571 if (r->req.sg) { 572 scsi_dma_complete_noio(r, 0); 573 } else { 574 scsi_write_complete_noio(r, 0); 575 } 576 return; 577 } 578 579 if (r->req.sg) { 580 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 581 r->req.resid -= r->req.sg->size; 582 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 583 r->req.sg, r->sector << BDRV_SECTOR_BITS, 584 BDRV_SECTOR_SIZE, 585 sdc->dma_writev, r, scsi_dma_complete, r, 586 DMA_DIRECTION_TO_DEVICE); 587 } else { 588 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 589 r->qiov.size, BLOCK_ACCT_WRITE); 590 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 591 scsi_write_complete, r, r); 592 } 593 } 594 595 /* Return a pointer to the data buffer. */ 596 static uint8_t *scsi_get_buf(SCSIRequest *req) 597 { 598 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 599 600 return (uint8_t *)r->iov.iov_base; 601 } 602 603 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 604 { 605 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 606 uint8_t page_code = req->cmd.buf[2]; 607 int start, buflen = 0; 608 609 outbuf[buflen++] = s->qdev.type & 0x1f; 610 outbuf[buflen++] = page_code; 611 outbuf[buflen++] = 0x00; 612 outbuf[buflen++] = 0x00; 613 start = buflen; 614 615 switch (page_code) { 616 case 0x00: /* Supported page codes, mandatory */ 617 { 618 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); 619 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 620 if (s->serial) { 621 outbuf[buflen++] = 0x80; /* unit serial number */ 622 } 623 outbuf[buflen++] = 0x83; /* device identification */ 624 if (s->qdev.type == TYPE_DISK) { 625 outbuf[buflen++] = 0xb0; /* block limits */ 626 outbuf[buflen++] = 0xb1; /* block device characteristics */ 627 outbuf[buflen++] = 0xb2; /* thin provisioning */ 628 } 629 break; 630 } 631 case 0x80: /* Device serial number, optional */ 632 { 633 int l; 634 635 if (!s->serial) { 636 trace_scsi_disk_emulate_vpd_page_80_not_supported(); 637 return -1; 638 } 639 640 l = strlen(s->serial); 641 if (l > 36) { 642 l = 36; 643 } 644 645 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); 646 memcpy(outbuf + buflen, s->serial, l); 647 buflen += l; 648 break; 649 } 650 651 case 0x83: /* Device identification page, mandatory */ 652 { 653 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; 654 655 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); 656 657 if (id_len) { 658 outbuf[buflen++] = 0x2; /* ASCII */ 659 outbuf[buflen++] = 0; /* not officially assigned */ 660 outbuf[buflen++] = 0; /* reserved */ 661 outbuf[buflen++] = id_len; /* length of data following */ 662 memcpy(outbuf + buflen, s->device_id, id_len); 663 buflen += id_len; 664 } 665 666 if (s->qdev.wwn) { 667 outbuf[buflen++] = 0x1; /* Binary */ 668 outbuf[buflen++] = 0x3; /* NAA */ 669 outbuf[buflen++] = 0; /* reserved */ 670 outbuf[buflen++] = 8; 671 stq_be_p(&outbuf[buflen], s->qdev.wwn); 672 buflen += 8; 673 } 674 675 if (s->qdev.port_wwn) { 676 outbuf[buflen++] = 0x61; /* SAS / Binary */ 677 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 678 outbuf[buflen++] = 0; /* reserved */ 679 outbuf[buflen++] = 8; 680 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 681 buflen += 8; 682 } 683 684 if (s->port_index) { 685 outbuf[buflen++] = 0x61; /* SAS / Binary */ 686 687 /* PIV/Target port/relative target port */ 688 outbuf[buflen++] = 0x94; 689 690 outbuf[buflen++] = 0; /* reserved */ 691 outbuf[buflen++] = 4; 692 stw_be_p(&outbuf[buflen + 2], s->port_index); 693 buflen += 4; 694 } 695 break; 696 } 697 case 0xb0: /* block limits */ 698 { 699 SCSIBlockLimits bl = {}; 700 701 if (s->qdev.type == TYPE_ROM) { 702 trace_scsi_disk_emulate_vpd_page_b0_not_supported(); 703 return -1; 704 } 705 bl.wsnz = 1; 706 bl.unmap_sectors = 707 s->qdev.conf.discard_granularity / s->qdev.blocksize; 708 bl.min_io_size = 709 s->qdev.conf.min_io_size / s->qdev.blocksize; 710 bl.opt_io_size = 711 s->qdev.conf.opt_io_size / s->qdev.blocksize; 712 bl.max_unmap_sectors = 713 s->max_unmap_size / s->qdev.blocksize; 714 bl.max_io_sectors = 715 s->max_io_size / s->qdev.blocksize; 716 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 717 bl.max_unmap_descr = 255; 718 719 if (s->qdev.type == TYPE_DISK) { 720 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 721 int max_io_sectors_blk = 722 max_transfer_blk / s->qdev.blocksize; 723 724 bl.max_io_sectors = 725 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 726 } 727 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 728 break; 729 } 730 case 0xb1: /* block device characteristics */ 731 { 732 buflen = 0x40; 733 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 734 outbuf[5] = s->rotation_rate & 0xff; 735 outbuf[6] = 0; /* PRODUCT TYPE */ 736 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 737 outbuf[8] = 0; /* VBULS */ 738 break; 739 } 740 case 0xb2: /* thin provisioning */ 741 { 742 buflen = 8; 743 outbuf[4] = 0; 744 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 745 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 746 outbuf[7] = 0; 747 break; 748 } 749 default: 750 return -1; 751 } 752 /* done with EVPD */ 753 assert(buflen - start <= 255); 754 outbuf[start - 1] = buflen - start; 755 return buflen; 756 } 757 758 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 759 { 760 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 761 int buflen = 0; 762 763 if (req->cmd.buf[1] & 0x1) { 764 /* Vital product data */ 765 return scsi_disk_emulate_vpd_page(req, outbuf); 766 } 767 768 /* Standard INQUIRY data */ 769 if (req->cmd.buf[2] != 0) { 770 return -1; 771 } 772 773 /* PAGE CODE == 0 */ 774 buflen = req->cmd.xfer; 775 if (buflen > SCSI_MAX_INQUIRY_LEN) { 776 buflen = SCSI_MAX_INQUIRY_LEN; 777 } 778 779 outbuf[0] = s->qdev.type & 0x1f; 780 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 781 782 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 783 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 784 785 memset(&outbuf[32], 0, 4); 786 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 787 /* 788 * We claim conformance to SPC-3, which is required for guests 789 * to ask for modern features like READ CAPACITY(16) or the 790 * block characteristics VPD page by default. Not all of SPC-3 791 * is actually implemented, but we're good enough. 792 */ 793 outbuf[2] = s->qdev.default_scsi_version; 794 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 795 796 if (buflen > 36) { 797 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 798 } else { 799 /* If the allocation length of CDB is too small, 800 the additional length is not adjusted */ 801 outbuf[4] = 36 - 5; 802 } 803 804 /* Sync data transfer and TCQ. */ 805 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 806 return buflen; 807 } 808 809 static inline bool media_is_dvd(SCSIDiskState *s) 810 { 811 uint64_t nb_sectors; 812 if (s->qdev.type != TYPE_ROM) { 813 return false; 814 } 815 if (!blk_is_available(s->qdev.conf.blk)) { 816 return false; 817 } 818 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 819 return nb_sectors > CD_MAX_SECTORS; 820 } 821 822 static inline bool media_is_cd(SCSIDiskState *s) 823 { 824 uint64_t nb_sectors; 825 if (s->qdev.type != TYPE_ROM) { 826 return false; 827 } 828 if (!blk_is_available(s->qdev.conf.blk)) { 829 return false; 830 } 831 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 832 return nb_sectors <= CD_MAX_SECTORS; 833 } 834 835 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 836 uint8_t *outbuf) 837 { 838 uint8_t type = r->req.cmd.buf[1] & 7; 839 840 if (s->qdev.type != TYPE_ROM) { 841 return -1; 842 } 843 844 /* Types 1/2 are only defined for Blu-Ray. */ 845 if (type != 0) { 846 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 847 return -1; 848 } 849 850 memset(outbuf, 0, 34); 851 outbuf[1] = 32; 852 outbuf[2] = 0xe; /* last session complete, disc finalized */ 853 outbuf[3] = 1; /* first track on disc */ 854 outbuf[4] = 1; /* # of sessions */ 855 outbuf[5] = 1; /* first track of last session */ 856 outbuf[6] = 1; /* last track of last session */ 857 outbuf[7] = 0x20; /* unrestricted use */ 858 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 859 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 860 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 861 /* 24-31: disc bar code */ 862 /* 32: disc application code */ 863 /* 33: number of OPC tables */ 864 865 return 34; 866 } 867 868 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 869 uint8_t *outbuf) 870 { 871 static const int rds_caps_size[5] = { 872 [0] = 2048 + 4, 873 [1] = 4 + 4, 874 [3] = 188 + 4, 875 [4] = 2048 + 4, 876 }; 877 878 uint8_t media = r->req.cmd.buf[1]; 879 uint8_t layer = r->req.cmd.buf[6]; 880 uint8_t format = r->req.cmd.buf[7]; 881 int size = -1; 882 883 if (s->qdev.type != TYPE_ROM) { 884 return -1; 885 } 886 if (media != 0) { 887 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 888 return -1; 889 } 890 891 if (format != 0xff) { 892 if (!blk_is_available(s->qdev.conf.blk)) { 893 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 894 return -1; 895 } 896 if (media_is_cd(s)) { 897 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 898 return -1; 899 } 900 if (format >= ARRAY_SIZE(rds_caps_size)) { 901 return -1; 902 } 903 size = rds_caps_size[format]; 904 memset(outbuf, 0, size); 905 } 906 907 switch (format) { 908 case 0x00: { 909 /* Physical format information */ 910 uint64_t nb_sectors; 911 if (layer != 0) { 912 goto fail; 913 } 914 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 915 916 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 917 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 918 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 919 outbuf[7] = 0; /* default densities */ 920 921 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 922 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 923 break; 924 } 925 926 case 0x01: /* DVD copyright information, all zeros */ 927 break; 928 929 case 0x03: /* BCA information - invalid field for no BCA info */ 930 return -1; 931 932 case 0x04: /* DVD disc manufacturing information, all zeros */ 933 break; 934 935 case 0xff: { /* List capabilities */ 936 int i; 937 size = 4; 938 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 939 if (!rds_caps_size[i]) { 940 continue; 941 } 942 outbuf[size] = i; 943 outbuf[size + 1] = 0x40; /* Not writable, readable */ 944 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 945 size += 4; 946 } 947 break; 948 } 949 950 default: 951 return -1; 952 } 953 954 /* Size of buffer, not including 2 byte size field */ 955 stw_be_p(outbuf, size - 2); 956 return size; 957 958 fail: 959 return -1; 960 } 961 962 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 963 { 964 uint8_t event_code, media_status; 965 966 media_status = 0; 967 if (s->tray_open) { 968 media_status = MS_TRAY_OPEN; 969 } else if (blk_is_inserted(s->qdev.conf.blk)) { 970 media_status = MS_MEDIA_PRESENT; 971 } 972 973 /* Event notification descriptor */ 974 event_code = MEC_NO_CHANGE; 975 if (media_status != MS_TRAY_OPEN) { 976 if (s->media_event) { 977 event_code = MEC_NEW_MEDIA; 978 s->media_event = false; 979 } else if (s->eject_request) { 980 event_code = MEC_EJECT_REQUESTED; 981 s->eject_request = false; 982 } 983 } 984 985 outbuf[0] = event_code; 986 outbuf[1] = media_status; 987 988 /* These fields are reserved, just clear them. */ 989 outbuf[2] = 0; 990 outbuf[3] = 0; 991 return 4; 992 } 993 994 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 995 uint8_t *outbuf) 996 { 997 int size; 998 uint8_t *buf = r->req.cmd.buf; 999 uint8_t notification_class_request = buf[4]; 1000 if (s->qdev.type != TYPE_ROM) { 1001 return -1; 1002 } 1003 if ((buf[1] & 1) == 0) { 1004 /* asynchronous */ 1005 return -1; 1006 } 1007 1008 size = 4; 1009 outbuf[0] = outbuf[1] = 0; 1010 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1011 if (notification_class_request & (1 << GESN_MEDIA)) { 1012 outbuf[2] = GESN_MEDIA; 1013 size += scsi_event_status_media(s, &outbuf[size]); 1014 } else { 1015 outbuf[2] = 0x80; 1016 } 1017 stw_be_p(outbuf, size - 4); 1018 return size; 1019 } 1020 1021 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1022 { 1023 int current; 1024 1025 if (s->qdev.type != TYPE_ROM) { 1026 return -1; 1027 } 1028 1029 if (media_is_dvd(s)) { 1030 current = MMC_PROFILE_DVD_ROM; 1031 } else if (media_is_cd(s)) { 1032 current = MMC_PROFILE_CD_ROM; 1033 } else { 1034 current = MMC_PROFILE_NONE; 1035 } 1036 1037 memset(outbuf, 0, 40); 1038 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1039 stw_be_p(&outbuf[6], current); 1040 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1041 outbuf[10] = 0x03; /* persistent, current */ 1042 outbuf[11] = 8; /* two profiles */ 1043 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1044 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1045 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1046 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1047 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1048 stw_be_p(&outbuf[20], 1); 1049 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1050 outbuf[23] = 8; 1051 stl_be_p(&outbuf[24], 1); /* SCSI */ 1052 outbuf[28] = 1; /* DBE = 1, mandatory */ 1053 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1054 stw_be_p(&outbuf[32], 3); 1055 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1056 outbuf[35] = 4; 1057 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1058 /* TODO: Random readable, CD read, DVD read, drive serial number, 1059 power management */ 1060 return 40; 1061 } 1062 1063 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1064 { 1065 if (s->qdev.type != TYPE_ROM) { 1066 return -1; 1067 } 1068 memset(outbuf, 0, 8); 1069 outbuf[5] = 1; /* CD-ROM */ 1070 return 8; 1071 } 1072 1073 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1074 int page_control) 1075 { 1076 static const int mode_sense_valid[0x3f] = { 1077 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1078 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1079 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1080 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1081 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1082 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1083 }; 1084 1085 uint8_t *p = *p_outbuf + 2; 1086 int length; 1087 1088 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1089 return -1; 1090 } 1091 1092 /* 1093 * If Changeable Values are requested, a mask denoting those mode parameters 1094 * that are changeable shall be returned. As we currently don't support 1095 * parameter changes via MODE_SELECT all bits are returned set to zero. 1096 * The buffer was already menset to zero by the caller of this function. 1097 * 1098 * The offsets here are off by two compared to the descriptions in the 1099 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1100 * but it is done so that offsets are consistent within our implementation 1101 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1102 * 2-byte and 4-byte headers. 1103 */ 1104 switch (page) { 1105 case MODE_PAGE_HD_GEOMETRY: 1106 length = 0x16; 1107 if (page_control == 1) { /* Changeable Values */ 1108 break; 1109 } 1110 /* if a geometry hint is available, use it */ 1111 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1112 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1113 p[2] = s->qdev.conf.cyls & 0xff; 1114 p[3] = s->qdev.conf.heads & 0xff; 1115 /* Write precomp start cylinder, disabled */ 1116 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1117 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1118 p[6] = s->qdev.conf.cyls & 0xff; 1119 /* Reduced current start cylinder, disabled */ 1120 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1121 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1122 p[9] = s->qdev.conf.cyls & 0xff; 1123 /* Device step rate [ns], 200ns */ 1124 p[10] = 0; 1125 p[11] = 200; 1126 /* Landing zone cylinder */ 1127 p[12] = 0xff; 1128 p[13] = 0xff; 1129 p[14] = 0xff; 1130 /* Medium rotation rate [rpm], 5400 rpm */ 1131 p[18] = (5400 >> 8) & 0xff; 1132 p[19] = 5400 & 0xff; 1133 break; 1134 1135 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1136 length = 0x1e; 1137 if (page_control == 1) { /* Changeable Values */ 1138 break; 1139 } 1140 /* Transfer rate [kbit/s], 5Mbit/s */ 1141 p[0] = 5000 >> 8; 1142 p[1] = 5000 & 0xff; 1143 /* if a geometry hint is available, use it */ 1144 p[2] = s->qdev.conf.heads & 0xff; 1145 p[3] = s->qdev.conf.secs & 0xff; 1146 p[4] = s->qdev.blocksize >> 8; 1147 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1148 p[7] = s->qdev.conf.cyls & 0xff; 1149 /* Write precomp start cylinder, disabled */ 1150 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1151 p[9] = s->qdev.conf.cyls & 0xff; 1152 /* Reduced current start cylinder, disabled */ 1153 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1154 p[11] = s->qdev.conf.cyls & 0xff; 1155 /* Device step rate [100us], 100us */ 1156 p[12] = 0; 1157 p[13] = 1; 1158 /* Device step pulse width [us], 1us */ 1159 p[14] = 1; 1160 /* Device head settle delay [100us], 100us */ 1161 p[15] = 0; 1162 p[16] = 1; 1163 /* Motor on delay [0.1s], 0.1s */ 1164 p[17] = 1; 1165 /* Motor off delay [0.1s], 0.1s */ 1166 p[18] = 1; 1167 /* Medium rotation rate [rpm], 5400 rpm */ 1168 p[26] = (5400 >> 8) & 0xff; 1169 p[27] = 5400 & 0xff; 1170 break; 1171 1172 case MODE_PAGE_CACHING: 1173 length = 0x12; 1174 if (page_control == 1 || /* Changeable Values */ 1175 blk_enable_write_cache(s->qdev.conf.blk)) { 1176 p[0] = 4; /* WCE */ 1177 } 1178 break; 1179 1180 case MODE_PAGE_R_W_ERROR: 1181 length = 10; 1182 if (page_control == 1) { /* Changeable Values */ 1183 break; 1184 } 1185 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1186 if (s->qdev.type == TYPE_ROM) { 1187 p[1] = 0x20; /* Read Retry Count */ 1188 } 1189 break; 1190 1191 case MODE_PAGE_AUDIO_CTL: 1192 length = 14; 1193 break; 1194 1195 case MODE_PAGE_CAPABILITIES: 1196 length = 0x14; 1197 if (page_control == 1) { /* Changeable Values */ 1198 break; 1199 } 1200 1201 p[0] = 0x3b; /* CD-R & CD-RW read */ 1202 p[1] = 0; /* Writing not supported */ 1203 p[2] = 0x7f; /* Audio, composite, digital out, 1204 mode 2 form 1&2, multi session */ 1205 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1206 RW corrected, C2 errors, ISRC, 1207 UPC, Bar code */ 1208 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1209 /* Locking supported, jumper present, eject, tray */ 1210 p[5] = 0; /* no volume & mute control, no 1211 changer */ 1212 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1213 p[7] = (50 * 176) & 0xff; 1214 p[8] = 2 >> 8; /* Two volume levels */ 1215 p[9] = 2 & 0xff; 1216 p[10] = 2048 >> 8; /* 2M buffer */ 1217 p[11] = 2048 & 0xff; 1218 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1219 p[13] = (16 * 176) & 0xff; 1220 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1221 p[17] = (16 * 176) & 0xff; 1222 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1223 p[19] = (16 * 176) & 0xff; 1224 break; 1225 1226 default: 1227 return -1; 1228 } 1229 1230 assert(length < 256); 1231 (*p_outbuf)[0] = page; 1232 (*p_outbuf)[1] = length; 1233 *p_outbuf += length + 2; 1234 return length + 2; 1235 } 1236 1237 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1238 { 1239 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1240 uint64_t nb_sectors; 1241 bool dbd; 1242 int page, buflen, ret, page_control; 1243 uint8_t *p; 1244 uint8_t dev_specific_param; 1245 1246 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1247 page = r->req.cmd.buf[2] & 0x3f; 1248 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1249 1250 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 1251 10, page, r->req.cmd.xfer, page_control); 1252 memset(outbuf, 0, r->req.cmd.xfer); 1253 p = outbuf; 1254 1255 if (s->qdev.type == TYPE_DISK) { 1256 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1257 if (blk_is_read_only(s->qdev.conf.blk)) { 1258 dev_specific_param |= 0x80; /* Readonly. */ 1259 } 1260 } else { 1261 /* MMC prescribes that CD/DVD drives have no block descriptors, 1262 * and defines no device-specific parameter. */ 1263 dev_specific_param = 0x00; 1264 dbd = true; 1265 } 1266 1267 if (r->req.cmd.buf[0] == MODE_SENSE) { 1268 p[1] = 0; /* Default media type. */ 1269 p[2] = dev_specific_param; 1270 p[3] = 0; /* Block descriptor length. */ 1271 p += 4; 1272 } else { /* MODE_SENSE_10 */ 1273 p[2] = 0; /* Default media type. */ 1274 p[3] = dev_specific_param; 1275 p[6] = p[7] = 0; /* Block descriptor length. */ 1276 p += 8; 1277 } 1278 1279 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1280 if (!dbd && nb_sectors) { 1281 if (r->req.cmd.buf[0] == MODE_SENSE) { 1282 outbuf[3] = 8; /* Block descriptor length */ 1283 } else { /* MODE_SENSE_10 */ 1284 outbuf[7] = 8; /* Block descriptor length */ 1285 } 1286 nb_sectors /= (s->qdev.blocksize / 512); 1287 if (nb_sectors > 0xffffff) { 1288 nb_sectors = 0; 1289 } 1290 p[0] = 0; /* media density code */ 1291 p[1] = (nb_sectors >> 16) & 0xff; 1292 p[2] = (nb_sectors >> 8) & 0xff; 1293 p[3] = nb_sectors & 0xff; 1294 p[4] = 0; /* reserved */ 1295 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1296 p[6] = s->qdev.blocksize >> 8; 1297 p[7] = 0; 1298 p += 8; 1299 } 1300 1301 if (page_control == 3) { 1302 /* Saved Values */ 1303 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1304 return -1; 1305 } 1306 1307 if (page == 0x3f) { 1308 for (page = 0; page <= 0x3e; page++) { 1309 mode_sense_page(s, page, &p, page_control); 1310 } 1311 } else { 1312 ret = mode_sense_page(s, page, &p, page_control); 1313 if (ret == -1) { 1314 return -1; 1315 } 1316 } 1317 1318 buflen = p - outbuf; 1319 /* 1320 * The mode data length field specifies the length in bytes of the 1321 * following data that is available to be transferred. The mode data 1322 * length does not include itself. 1323 */ 1324 if (r->req.cmd.buf[0] == MODE_SENSE) { 1325 outbuf[0] = buflen - 1; 1326 } else { /* MODE_SENSE_10 */ 1327 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1328 outbuf[1] = (buflen - 2) & 0xff; 1329 } 1330 return buflen; 1331 } 1332 1333 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1334 { 1335 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1336 int start_track, format, msf, toclen; 1337 uint64_t nb_sectors; 1338 1339 msf = req->cmd.buf[1] & 2; 1340 format = req->cmd.buf[2] & 0xf; 1341 start_track = req->cmd.buf[6]; 1342 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1343 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); 1344 nb_sectors /= s->qdev.blocksize / 512; 1345 switch (format) { 1346 case 0: 1347 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1348 break; 1349 case 1: 1350 /* multi session : only a single session defined */ 1351 toclen = 12; 1352 memset(outbuf, 0, 12); 1353 outbuf[1] = 0x0a; 1354 outbuf[2] = 0x01; 1355 outbuf[3] = 0x01; 1356 break; 1357 case 2: 1358 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1359 break; 1360 default: 1361 return -1; 1362 } 1363 return toclen; 1364 } 1365 1366 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1367 { 1368 SCSIRequest *req = &r->req; 1369 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1370 bool start = req->cmd.buf[4] & 1; 1371 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1372 int pwrcnd = req->cmd.buf[4] & 0xf0; 1373 1374 if (pwrcnd) { 1375 /* eject/load only happens for power condition == 0 */ 1376 return 0; 1377 } 1378 1379 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1380 if (!start && !s->tray_open && s->tray_locked) { 1381 scsi_check_condition(r, 1382 blk_is_inserted(s->qdev.conf.blk) 1383 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1384 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1385 return -1; 1386 } 1387 1388 if (s->tray_open != !start) { 1389 blk_eject(s->qdev.conf.blk, !start); 1390 s->tray_open = !start; 1391 } 1392 } 1393 return 0; 1394 } 1395 1396 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1397 { 1398 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1399 int buflen = r->iov.iov_len; 1400 1401 if (buflen) { 1402 trace_scsi_disk_emulate_read_data(buflen); 1403 r->iov.iov_len = 0; 1404 r->started = true; 1405 scsi_req_data(&r->req, buflen); 1406 return; 1407 } 1408 1409 /* This also clears the sense buffer for REQUEST SENSE. */ 1410 scsi_req_complete(&r->req, GOOD); 1411 } 1412 1413 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1414 uint8_t *inbuf, int inlen) 1415 { 1416 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1417 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1418 uint8_t *p; 1419 int len, expected_len, changeable_len, i; 1420 1421 /* The input buffer does not include the page header, so it is 1422 * off by 2 bytes. 1423 */ 1424 expected_len = inlen + 2; 1425 if (expected_len > SCSI_MAX_MODE_LEN) { 1426 return -1; 1427 } 1428 1429 p = mode_current; 1430 memset(mode_current, 0, inlen + 2); 1431 len = mode_sense_page(s, page, &p, 0); 1432 if (len < 0 || len != expected_len) { 1433 return -1; 1434 } 1435 1436 p = mode_changeable; 1437 memset(mode_changeable, 0, inlen + 2); 1438 changeable_len = mode_sense_page(s, page, &p, 1); 1439 assert(changeable_len == len); 1440 1441 /* Check that unchangeable bits are the same as what MODE SENSE 1442 * would return. 1443 */ 1444 for (i = 2; i < len; i++) { 1445 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1446 return -1; 1447 } 1448 } 1449 return 0; 1450 } 1451 1452 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1453 { 1454 switch (page) { 1455 case MODE_PAGE_CACHING: 1456 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1457 break; 1458 1459 default: 1460 break; 1461 } 1462 } 1463 1464 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1465 { 1466 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1467 1468 while (len > 0) { 1469 int page, subpage, page_len; 1470 1471 /* Parse both possible formats for the mode page headers. */ 1472 page = p[0] & 0x3f; 1473 if (p[0] & 0x40) { 1474 if (len < 4) { 1475 goto invalid_param_len; 1476 } 1477 subpage = p[1]; 1478 page_len = lduw_be_p(&p[2]); 1479 p += 4; 1480 len -= 4; 1481 } else { 1482 if (len < 2) { 1483 goto invalid_param_len; 1484 } 1485 subpage = 0; 1486 page_len = p[1]; 1487 p += 2; 1488 len -= 2; 1489 } 1490 1491 if (subpage) { 1492 goto invalid_param; 1493 } 1494 if (page_len > len) { 1495 goto invalid_param_len; 1496 } 1497 1498 if (!change) { 1499 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1500 goto invalid_param; 1501 } 1502 } else { 1503 scsi_disk_apply_mode_select(s, page, p); 1504 } 1505 1506 p += page_len; 1507 len -= page_len; 1508 } 1509 return 0; 1510 1511 invalid_param: 1512 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1513 return -1; 1514 1515 invalid_param_len: 1516 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1517 return -1; 1518 } 1519 1520 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1521 { 1522 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1523 uint8_t *p = inbuf; 1524 int cmd = r->req.cmd.buf[0]; 1525 int len = r->req.cmd.xfer; 1526 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1527 int bd_len; 1528 int pass; 1529 1530 /* We only support PF=1, SP=0. */ 1531 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1532 goto invalid_field; 1533 } 1534 1535 if (len < hdr_len) { 1536 goto invalid_param_len; 1537 } 1538 1539 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1540 len -= hdr_len; 1541 p += hdr_len; 1542 if (len < bd_len) { 1543 goto invalid_param_len; 1544 } 1545 if (bd_len != 0 && bd_len != 8) { 1546 goto invalid_param; 1547 } 1548 1549 len -= bd_len; 1550 p += bd_len; 1551 1552 /* Ensure no change is made if there is an error! */ 1553 for (pass = 0; pass < 2; pass++) { 1554 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1555 assert(pass == 0); 1556 return; 1557 } 1558 } 1559 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1560 /* The request is used as the AIO opaque value, so add a ref. */ 1561 scsi_req_ref(&r->req); 1562 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1563 BLOCK_ACCT_FLUSH); 1564 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1565 return; 1566 } 1567 1568 scsi_req_complete(&r->req, GOOD); 1569 return; 1570 1571 invalid_param: 1572 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1573 return; 1574 1575 invalid_param_len: 1576 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1577 return; 1578 1579 invalid_field: 1580 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1581 } 1582 1583 static inline bool check_lba_range(SCSIDiskState *s, 1584 uint64_t sector_num, uint32_t nb_sectors) 1585 { 1586 /* 1587 * The first line tests that no overflow happens when computing the last 1588 * sector. The second line tests that the last accessed sector is in 1589 * range. 1590 * 1591 * Careful, the computations should not underflow for nb_sectors == 0, 1592 * and a 0-block read to the first LBA beyond the end of device is 1593 * valid. 1594 */ 1595 return (sector_num <= sector_num + nb_sectors && 1596 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1597 } 1598 1599 typedef struct UnmapCBData { 1600 SCSIDiskReq *r; 1601 uint8_t *inbuf; 1602 int count; 1603 } UnmapCBData; 1604 1605 static void scsi_unmap_complete(void *opaque, int ret); 1606 1607 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1608 { 1609 SCSIDiskReq *r = data->r; 1610 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1611 uint64_t sector_num; 1612 uint32_t nb_sectors; 1613 1614 assert(r->req.aiocb == NULL); 1615 if (scsi_disk_req_check_error(r, ret, false)) { 1616 goto done; 1617 } 1618 1619 if (data->count > 0) { 1620 sector_num = ldq_be_p(&data->inbuf[0]); 1621 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1622 if (!check_lba_range(s, sector_num, nb_sectors)) { 1623 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1624 goto done; 1625 } 1626 1627 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1628 sector_num * s->qdev.blocksize, 1629 nb_sectors * s->qdev.blocksize, 1630 scsi_unmap_complete, data); 1631 data->count--; 1632 data->inbuf += 16; 1633 return; 1634 } 1635 1636 scsi_req_complete(&r->req, GOOD); 1637 1638 done: 1639 scsi_req_unref(&r->req); 1640 g_free(data); 1641 } 1642 1643 static void scsi_unmap_complete(void *opaque, int ret) 1644 { 1645 UnmapCBData *data = opaque; 1646 SCSIDiskReq *r = data->r; 1647 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1648 1649 assert(r->req.aiocb != NULL); 1650 r->req.aiocb = NULL; 1651 1652 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1653 scsi_unmap_complete_noio(data, ret); 1654 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1655 } 1656 1657 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1658 { 1659 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1660 uint8_t *p = inbuf; 1661 int len = r->req.cmd.xfer; 1662 UnmapCBData *data; 1663 1664 /* Reject ANCHOR=1. */ 1665 if (r->req.cmd.buf[1] & 0x1) { 1666 goto invalid_field; 1667 } 1668 1669 if (len < 8) { 1670 goto invalid_param_len; 1671 } 1672 if (len < lduw_be_p(&p[0]) + 2) { 1673 goto invalid_param_len; 1674 } 1675 if (len < lduw_be_p(&p[2]) + 8) { 1676 goto invalid_param_len; 1677 } 1678 if (lduw_be_p(&p[2]) & 15) { 1679 goto invalid_param_len; 1680 } 1681 1682 if (blk_is_read_only(s->qdev.conf.blk)) { 1683 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1684 return; 1685 } 1686 1687 data = g_new0(UnmapCBData, 1); 1688 data->r = r; 1689 data->inbuf = &p[8]; 1690 data->count = lduw_be_p(&p[2]) >> 4; 1691 1692 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1693 scsi_req_ref(&r->req); 1694 scsi_unmap_complete_noio(data, 0); 1695 return; 1696 1697 invalid_param_len: 1698 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1699 return; 1700 1701 invalid_field: 1702 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1703 } 1704 1705 typedef struct WriteSameCBData { 1706 SCSIDiskReq *r; 1707 int64_t sector; 1708 int nb_sectors; 1709 QEMUIOVector qiov; 1710 struct iovec iov; 1711 } WriteSameCBData; 1712 1713 static void scsi_write_same_complete(void *opaque, int ret) 1714 { 1715 WriteSameCBData *data = opaque; 1716 SCSIDiskReq *r = data->r; 1717 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1718 1719 assert(r->req.aiocb != NULL); 1720 r->req.aiocb = NULL; 1721 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1722 if (scsi_disk_req_check_error(r, ret, true)) { 1723 goto done; 1724 } 1725 1726 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1727 1728 data->nb_sectors -= data->iov.iov_len / 512; 1729 data->sector += data->iov.iov_len / 512; 1730 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1731 if (data->iov.iov_len) { 1732 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1733 data->iov.iov_len, BLOCK_ACCT_WRITE); 1734 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1735 * where final qiov may need smaller size */ 1736 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1737 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1738 data->sector << BDRV_SECTOR_BITS, 1739 &data->qiov, 0, 1740 scsi_write_same_complete, data); 1741 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1742 return; 1743 } 1744 1745 scsi_req_complete(&r->req, GOOD); 1746 1747 done: 1748 scsi_req_unref(&r->req); 1749 qemu_vfree(data->iov.iov_base); 1750 g_free(data); 1751 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1752 } 1753 1754 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1755 { 1756 SCSIRequest *req = &r->req; 1757 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1758 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1759 WriteSameCBData *data; 1760 uint8_t *buf; 1761 int i; 1762 1763 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1764 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1765 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1766 return; 1767 } 1768 1769 if (blk_is_read_only(s->qdev.conf.blk)) { 1770 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1771 return; 1772 } 1773 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1774 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1775 return; 1776 } 1777 1778 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1779 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1780 1781 /* The request is used as the AIO opaque value, so add a ref. */ 1782 scsi_req_ref(&r->req); 1783 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1784 nb_sectors * s->qdev.blocksize, 1785 BLOCK_ACCT_WRITE); 1786 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1787 r->req.cmd.lba * s->qdev.blocksize, 1788 nb_sectors * s->qdev.blocksize, 1789 flags, scsi_aio_complete, r); 1790 return; 1791 } 1792 1793 data = g_new0(WriteSameCBData, 1); 1794 data->r = r; 1795 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1796 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1797 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1798 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1799 data->iov.iov_len); 1800 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1801 1802 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1803 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1804 } 1805 1806 scsi_req_ref(&r->req); 1807 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1808 data->iov.iov_len, BLOCK_ACCT_WRITE); 1809 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1810 data->sector << BDRV_SECTOR_BITS, 1811 &data->qiov, 0, 1812 scsi_write_same_complete, data); 1813 } 1814 1815 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1816 { 1817 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1818 1819 if (r->iov.iov_len) { 1820 int buflen = r->iov.iov_len; 1821 trace_scsi_disk_emulate_write_data(buflen); 1822 r->iov.iov_len = 0; 1823 scsi_req_data(&r->req, buflen); 1824 return; 1825 } 1826 1827 switch (req->cmd.buf[0]) { 1828 case MODE_SELECT: 1829 case MODE_SELECT_10: 1830 /* This also clears the sense buffer for REQUEST SENSE. */ 1831 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1832 break; 1833 1834 case UNMAP: 1835 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1836 break; 1837 1838 case VERIFY_10: 1839 case VERIFY_12: 1840 case VERIFY_16: 1841 if (r->req.status == -1) { 1842 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1843 } 1844 break; 1845 1846 case WRITE_SAME_10: 1847 case WRITE_SAME_16: 1848 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1849 break; 1850 1851 default: 1852 abort(); 1853 } 1854 } 1855 1856 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1857 { 1858 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1859 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1860 uint64_t nb_sectors; 1861 uint8_t *outbuf; 1862 int buflen; 1863 1864 switch (req->cmd.buf[0]) { 1865 case INQUIRY: 1866 case MODE_SENSE: 1867 case MODE_SENSE_10: 1868 case RESERVE: 1869 case RESERVE_10: 1870 case RELEASE: 1871 case RELEASE_10: 1872 case START_STOP: 1873 case ALLOW_MEDIUM_REMOVAL: 1874 case GET_CONFIGURATION: 1875 case GET_EVENT_STATUS_NOTIFICATION: 1876 case MECHANISM_STATUS: 1877 case REQUEST_SENSE: 1878 break; 1879 1880 default: 1881 if (!blk_is_available(s->qdev.conf.blk)) { 1882 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1883 return 0; 1884 } 1885 break; 1886 } 1887 1888 /* 1889 * FIXME: we shouldn't return anything bigger than 4k, but the code 1890 * requires the buffer to be as big as req->cmd.xfer in several 1891 * places. So, do not allow CDBs with a very large ALLOCATION 1892 * LENGTH. The real fix would be to modify scsi_read_data and 1893 * dma_buf_read, so that they return data beyond the buflen 1894 * as all zeros. 1895 */ 1896 if (req->cmd.xfer > 65536) { 1897 goto illegal_request; 1898 } 1899 r->buflen = MAX(4096, req->cmd.xfer); 1900 1901 if (!r->iov.iov_base) { 1902 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1903 } 1904 1905 buflen = req->cmd.xfer; 1906 outbuf = r->iov.iov_base; 1907 memset(outbuf, 0, r->buflen); 1908 switch (req->cmd.buf[0]) { 1909 case TEST_UNIT_READY: 1910 assert(blk_is_available(s->qdev.conf.blk)); 1911 break; 1912 case INQUIRY: 1913 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1914 if (buflen < 0) { 1915 goto illegal_request; 1916 } 1917 break; 1918 case MODE_SENSE: 1919 case MODE_SENSE_10: 1920 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1921 if (buflen < 0) { 1922 goto illegal_request; 1923 } 1924 break; 1925 case READ_TOC: 1926 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1927 if (buflen < 0) { 1928 goto illegal_request; 1929 } 1930 break; 1931 case RESERVE: 1932 if (req->cmd.buf[1] & 1) { 1933 goto illegal_request; 1934 } 1935 break; 1936 case RESERVE_10: 1937 if (req->cmd.buf[1] & 3) { 1938 goto illegal_request; 1939 } 1940 break; 1941 case RELEASE: 1942 if (req->cmd.buf[1] & 1) { 1943 goto illegal_request; 1944 } 1945 break; 1946 case RELEASE_10: 1947 if (req->cmd.buf[1] & 3) { 1948 goto illegal_request; 1949 } 1950 break; 1951 case START_STOP: 1952 if (scsi_disk_emulate_start_stop(r) < 0) { 1953 return 0; 1954 } 1955 break; 1956 case ALLOW_MEDIUM_REMOVAL: 1957 s->tray_locked = req->cmd.buf[4] & 1; 1958 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1959 break; 1960 case READ_CAPACITY_10: 1961 /* The normal LEN field for this command is zero. */ 1962 memset(outbuf, 0, 8); 1963 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1964 if (!nb_sectors) { 1965 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1966 return 0; 1967 } 1968 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1969 goto illegal_request; 1970 } 1971 nb_sectors /= s->qdev.blocksize / 512; 1972 /* Returned value is the address of the last sector. */ 1973 nb_sectors--; 1974 /* Remember the new size for read/write sanity checking. */ 1975 s->qdev.max_lba = nb_sectors; 1976 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 1977 if (nb_sectors > UINT32_MAX) { 1978 nb_sectors = UINT32_MAX; 1979 } 1980 outbuf[0] = (nb_sectors >> 24) & 0xff; 1981 outbuf[1] = (nb_sectors >> 16) & 0xff; 1982 outbuf[2] = (nb_sectors >> 8) & 0xff; 1983 outbuf[3] = nb_sectors & 0xff; 1984 outbuf[4] = 0; 1985 outbuf[5] = 0; 1986 outbuf[6] = s->qdev.blocksize >> 8; 1987 outbuf[7] = 0; 1988 break; 1989 case REQUEST_SENSE: 1990 /* Just return "NO SENSE". */ 1991 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 1992 (req->cmd.buf[1] & 1) == 0); 1993 if (buflen < 0) { 1994 goto illegal_request; 1995 } 1996 break; 1997 case MECHANISM_STATUS: 1998 buflen = scsi_emulate_mechanism_status(s, outbuf); 1999 if (buflen < 0) { 2000 goto illegal_request; 2001 } 2002 break; 2003 case GET_CONFIGURATION: 2004 buflen = scsi_get_configuration(s, outbuf); 2005 if (buflen < 0) { 2006 goto illegal_request; 2007 } 2008 break; 2009 case GET_EVENT_STATUS_NOTIFICATION: 2010 buflen = scsi_get_event_status_notification(s, r, outbuf); 2011 if (buflen < 0) { 2012 goto illegal_request; 2013 } 2014 break; 2015 case READ_DISC_INFORMATION: 2016 buflen = scsi_read_disc_information(s, r, outbuf); 2017 if (buflen < 0) { 2018 goto illegal_request; 2019 } 2020 break; 2021 case READ_DVD_STRUCTURE: 2022 buflen = scsi_read_dvd_structure(s, r, outbuf); 2023 if (buflen < 0) { 2024 goto illegal_request; 2025 } 2026 break; 2027 case SERVICE_ACTION_IN_16: 2028 /* Service Action In subcommands. */ 2029 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2030 trace_scsi_disk_emulate_command_SAI_16(); 2031 memset(outbuf, 0, req->cmd.xfer); 2032 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2033 if (!nb_sectors) { 2034 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2035 return 0; 2036 } 2037 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2038 goto illegal_request; 2039 } 2040 nb_sectors /= s->qdev.blocksize / 512; 2041 /* Returned value is the address of the last sector. */ 2042 nb_sectors--; 2043 /* Remember the new size for read/write sanity checking. */ 2044 s->qdev.max_lba = nb_sectors; 2045 outbuf[0] = (nb_sectors >> 56) & 0xff; 2046 outbuf[1] = (nb_sectors >> 48) & 0xff; 2047 outbuf[2] = (nb_sectors >> 40) & 0xff; 2048 outbuf[3] = (nb_sectors >> 32) & 0xff; 2049 outbuf[4] = (nb_sectors >> 24) & 0xff; 2050 outbuf[5] = (nb_sectors >> 16) & 0xff; 2051 outbuf[6] = (nb_sectors >> 8) & 0xff; 2052 outbuf[7] = nb_sectors & 0xff; 2053 outbuf[8] = 0; 2054 outbuf[9] = 0; 2055 outbuf[10] = s->qdev.blocksize >> 8; 2056 outbuf[11] = 0; 2057 outbuf[12] = 0; 2058 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2059 2060 /* set TPE bit if the format supports discard */ 2061 if (s->qdev.conf.discard_granularity) { 2062 outbuf[14] = 0x80; 2063 } 2064 2065 /* Protection, exponent and lowest lba field left blank. */ 2066 break; 2067 } 2068 trace_scsi_disk_emulate_command_SAI_unsupported(); 2069 goto illegal_request; 2070 case SYNCHRONIZE_CACHE: 2071 /* The request is used as the AIO opaque value, so add a ref. */ 2072 scsi_req_ref(&r->req); 2073 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2074 BLOCK_ACCT_FLUSH); 2075 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2076 return 0; 2077 case SEEK_10: 2078 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); 2079 if (r->req.cmd.lba > s->qdev.max_lba) { 2080 goto illegal_lba; 2081 } 2082 break; 2083 case MODE_SELECT: 2084 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); 2085 break; 2086 case MODE_SELECT_10: 2087 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 2088 break; 2089 case UNMAP: 2090 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); 2091 break; 2092 case VERIFY_10: 2093 case VERIFY_12: 2094 case VERIFY_16: 2095 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 2096 if (req->cmd.buf[1] & 6) { 2097 goto illegal_request; 2098 } 2099 break; 2100 case WRITE_SAME_10: 2101 case WRITE_SAME_16: 2102 trace_scsi_disk_emulate_command_WRITE_SAME( 2103 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); 2104 break; 2105 default: 2106 trace_scsi_disk_emulate_command_UNKNOWN(buf[0], 2107 scsi_command_name(buf[0])); 2108 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2109 return 0; 2110 } 2111 assert(!r->req.aiocb); 2112 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2113 if (r->iov.iov_len == 0) { 2114 scsi_req_complete(&r->req, GOOD); 2115 } 2116 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2117 assert(r->iov.iov_len == req->cmd.xfer); 2118 return -r->iov.iov_len; 2119 } else { 2120 return r->iov.iov_len; 2121 } 2122 2123 illegal_request: 2124 if (r->req.status == -1) { 2125 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2126 } 2127 return 0; 2128 2129 illegal_lba: 2130 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2131 return 0; 2132 } 2133 2134 /* Execute a scsi command. Returns the length of the data expected by the 2135 command. This will be Positive for data transfers from the device 2136 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2137 and zero if the command does not transfer any data. */ 2138 2139 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2140 { 2141 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2142 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2143 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2144 uint32_t len; 2145 uint8_t command; 2146 2147 command = buf[0]; 2148 2149 if (!blk_is_available(s->qdev.conf.blk)) { 2150 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2151 return 0; 2152 } 2153 2154 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2155 switch (command) { 2156 case READ_6: 2157 case READ_10: 2158 case READ_12: 2159 case READ_16: 2160 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); 2161 /* Protection information is not supported. For SCSI versions 2 and 2162 * older (as determined by snooping the guest's INQUIRY commands), 2163 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2164 */ 2165 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2166 goto illegal_request; 2167 } 2168 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2169 goto illegal_lba; 2170 } 2171 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2172 r->sector_count = len * (s->qdev.blocksize / 512); 2173 break; 2174 case WRITE_6: 2175 case WRITE_10: 2176 case WRITE_12: 2177 case WRITE_16: 2178 case WRITE_VERIFY_10: 2179 case WRITE_VERIFY_12: 2180 case WRITE_VERIFY_16: 2181 if (blk_is_read_only(s->qdev.conf.blk)) { 2182 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2183 return 0; 2184 } 2185 trace_scsi_disk_dma_command_WRITE( 2186 (command & 0xe) == 0xe ? "And Verify " : "", 2187 r->req.cmd.lba, len); 2188 /* fall through */ 2189 case VERIFY_10: 2190 case VERIFY_12: 2191 case VERIFY_16: 2192 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2193 * As far as DMA is concerned, we can treat it the same as a write; 2194 * scsi_block_do_sgio will send VERIFY commands. 2195 */ 2196 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2197 goto illegal_request; 2198 } 2199 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2200 goto illegal_lba; 2201 } 2202 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2203 r->sector_count = len * (s->qdev.blocksize / 512); 2204 break; 2205 default: 2206 abort(); 2207 illegal_request: 2208 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2209 return 0; 2210 illegal_lba: 2211 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2212 return 0; 2213 } 2214 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2215 if (r->sector_count == 0) { 2216 scsi_req_complete(&r->req, GOOD); 2217 } 2218 assert(r->iov.iov_len == 0); 2219 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2220 return -r->sector_count * 512; 2221 } else { 2222 return r->sector_count * 512; 2223 } 2224 } 2225 2226 static void scsi_disk_reset(DeviceState *dev) 2227 { 2228 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2229 uint64_t nb_sectors; 2230 2231 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2232 2233 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2234 nb_sectors /= s->qdev.blocksize / 512; 2235 if (nb_sectors) { 2236 nb_sectors--; 2237 } 2238 s->qdev.max_lba = nb_sectors; 2239 /* reset tray statuses */ 2240 s->tray_locked = 0; 2241 s->tray_open = 0; 2242 2243 s->qdev.scsi_version = s->qdev.default_scsi_version; 2244 } 2245 2246 static void scsi_disk_resize_cb(void *opaque) 2247 { 2248 SCSIDiskState *s = opaque; 2249 2250 /* SPC lists this sense code as available only for 2251 * direct-access devices. 2252 */ 2253 if (s->qdev.type == TYPE_DISK) { 2254 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2255 } 2256 } 2257 2258 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2259 { 2260 SCSIDiskState *s = opaque; 2261 2262 /* 2263 * When a CD gets changed, we have to report an ejected state and 2264 * then a loaded state to guests so that they detect tray 2265 * open/close and media change events. Guests that do not use 2266 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2267 * states rely on this behavior. 2268 * 2269 * media_changed governs the state machine used for unit attention 2270 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2271 */ 2272 s->media_changed = load; 2273 s->tray_open = !load; 2274 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2275 s->media_event = true; 2276 s->eject_request = false; 2277 } 2278 2279 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2280 { 2281 SCSIDiskState *s = opaque; 2282 2283 s->eject_request = true; 2284 if (force) { 2285 s->tray_locked = false; 2286 } 2287 } 2288 2289 static bool scsi_cd_is_tray_open(void *opaque) 2290 { 2291 return ((SCSIDiskState *)opaque)->tray_open; 2292 } 2293 2294 static bool scsi_cd_is_medium_locked(void *opaque) 2295 { 2296 return ((SCSIDiskState *)opaque)->tray_locked; 2297 } 2298 2299 static const BlockDevOps scsi_disk_removable_block_ops = { 2300 .change_media_cb = scsi_cd_change_media_cb, 2301 .eject_request_cb = scsi_cd_eject_request_cb, 2302 .is_tray_open = scsi_cd_is_tray_open, 2303 .is_medium_locked = scsi_cd_is_medium_locked, 2304 2305 .resize_cb = scsi_disk_resize_cb, 2306 }; 2307 2308 static const BlockDevOps scsi_disk_block_ops = { 2309 .resize_cb = scsi_disk_resize_cb, 2310 }; 2311 2312 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2313 { 2314 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2315 if (s->media_changed) { 2316 s->media_changed = false; 2317 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2318 } 2319 } 2320 2321 static void scsi_realize(SCSIDevice *dev, Error **errp) 2322 { 2323 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2324 bool read_only; 2325 2326 if (!s->qdev.conf.blk) { 2327 error_setg(errp, "drive property not set"); 2328 return; 2329 } 2330 2331 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2332 !blk_is_inserted(s->qdev.conf.blk)) { 2333 error_setg(errp, "Device needs media, but drive is empty"); 2334 return; 2335 } 2336 2337 blkconf_blocksizes(&s->qdev.conf); 2338 2339 if (s->qdev.conf.logical_block_size > 2340 s->qdev.conf.physical_block_size) { 2341 error_setg(errp, 2342 "logical_block_size > physical_block_size not supported"); 2343 return; 2344 } 2345 2346 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && 2347 !s->qdev.hba_supports_iothread) 2348 { 2349 error_setg(errp, "HBA does not support iothreads"); 2350 return; 2351 } 2352 2353 if (dev->type == TYPE_DISK) { 2354 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2355 return; 2356 } 2357 } 2358 2359 read_only = blk_is_read_only(s->qdev.conf.blk); 2360 if (dev->type == TYPE_ROM) { 2361 read_only = true; 2362 } 2363 2364 if (!blkconf_apply_backend_options(&dev->conf, read_only, 2365 dev->type == TYPE_DISK, errp)) { 2366 return; 2367 } 2368 2369 if (s->qdev.conf.discard_granularity == -1) { 2370 s->qdev.conf.discard_granularity = 2371 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2372 } 2373 2374 if (!s->version) { 2375 s->version = g_strdup(qemu_hw_version()); 2376 } 2377 if (!s->vendor) { 2378 s->vendor = g_strdup("QEMU"); 2379 } 2380 if (!s->device_id) { 2381 if (s->serial) { 2382 s->device_id = g_strdup_printf("%.20s", s->serial); 2383 } else { 2384 const char *str = blk_name(s->qdev.conf.blk); 2385 if (str && *str) { 2386 s->device_id = g_strdup(str); 2387 } 2388 } 2389 } 2390 2391 if (blk_is_sg(s->qdev.conf.blk)) { 2392 error_setg(errp, "unwanted /dev/sg*"); 2393 return; 2394 } 2395 2396 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2397 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2398 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2399 } else { 2400 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2401 } 2402 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2403 2404 blk_iostatus_enable(s->qdev.conf.blk); 2405 } 2406 2407 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2408 { 2409 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2410 AioContext *ctx = NULL; 2411 /* can happen for devices without drive. The error message for missing 2412 * backend will be issued in scsi_realize 2413 */ 2414 if (s->qdev.conf.blk) { 2415 ctx = blk_get_aio_context(s->qdev.conf.blk); 2416 aio_context_acquire(ctx); 2417 blkconf_blocksizes(&s->qdev.conf); 2418 } 2419 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2420 s->qdev.type = TYPE_DISK; 2421 if (!s->product) { 2422 s->product = g_strdup("QEMU HARDDISK"); 2423 } 2424 scsi_realize(&s->qdev, errp); 2425 if (ctx) { 2426 aio_context_release(ctx); 2427 } 2428 } 2429 2430 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2431 { 2432 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2433 AioContext *ctx; 2434 int ret; 2435 2436 if (!dev->conf.blk) { 2437 /* Anonymous BlockBackend for an empty drive. As we put it into 2438 * dev->conf, qdev takes care of detaching on unplug. */ 2439 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 2440 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2441 assert(ret == 0); 2442 } 2443 2444 ctx = blk_get_aio_context(dev->conf.blk); 2445 aio_context_acquire(ctx); 2446 s->qdev.blocksize = 2048; 2447 s->qdev.type = TYPE_ROM; 2448 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2449 if (!s->product) { 2450 s->product = g_strdup("QEMU CD-ROM"); 2451 } 2452 scsi_realize(&s->qdev, errp); 2453 aio_context_release(ctx); 2454 } 2455 2456 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2457 { 2458 DriveInfo *dinfo; 2459 Error *local_err = NULL; 2460 2461 if (!dev->conf.blk) { 2462 scsi_realize(dev, &local_err); 2463 assert(local_err); 2464 error_propagate(errp, local_err); 2465 return; 2466 } 2467 2468 dinfo = blk_legacy_dinfo(dev->conf.blk); 2469 if (dinfo && dinfo->media_cd) { 2470 scsi_cd_realize(dev, errp); 2471 } else { 2472 scsi_hd_realize(dev, errp); 2473 } 2474 } 2475 2476 static const SCSIReqOps scsi_disk_emulate_reqops = { 2477 .size = sizeof(SCSIDiskReq), 2478 .free_req = scsi_free_request, 2479 .send_command = scsi_disk_emulate_command, 2480 .read_data = scsi_disk_emulate_read_data, 2481 .write_data = scsi_disk_emulate_write_data, 2482 .get_buf = scsi_get_buf, 2483 }; 2484 2485 static const SCSIReqOps scsi_disk_dma_reqops = { 2486 .size = sizeof(SCSIDiskReq), 2487 .free_req = scsi_free_request, 2488 .send_command = scsi_disk_dma_command, 2489 .read_data = scsi_read_data, 2490 .write_data = scsi_write_data, 2491 .get_buf = scsi_get_buf, 2492 .load_request = scsi_disk_load_request, 2493 .save_request = scsi_disk_save_request, 2494 }; 2495 2496 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2497 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2498 [INQUIRY] = &scsi_disk_emulate_reqops, 2499 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2500 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2501 [START_STOP] = &scsi_disk_emulate_reqops, 2502 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2503 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2504 [READ_TOC] = &scsi_disk_emulate_reqops, 2505 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2506 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2507 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2508 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2509 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2510 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2511 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2512 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2513 [SEEK_10] = &scsi_disk_emulate_reqops, 2514 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2515 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2516 [UNMAP] = &scsi_disk_emulate_reqops, 2517 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2518 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2519 [VERIFY_10] = &scsi_disk_emulate_reqops, 2520 [VERIFY_12] = &scsi_disk_emulate_reqops, 2521 [VERIFY_16] = &scsi_disk_emulate_reqops, 2522 2523 [READ_6] = &scsi_disk_dma_reqops, 2524 [READ_10] = &scsi_disk_dma_reqops, 2525 [READ_12] = &scsi_disk_dma_reqops, 2526 [READ_16] = &scsi_disk_dma_reqops, 2527 [WRITE_6] = &scsi_disk_dma_reqops, 2528 [WRITE_10] = &scsi_disk_dma_reqops, 2529 [WRITE_12] = &scsi_disk_dma_reqops, 2530 [WRITE_16] = &scsi_disk_dma_reqops, 2531 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2532 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2533 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2534 }; 2535 2536 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) 2537 { 2538 int i; 2539 int len = scsi_cdb_length(buf); 2540 char *line_buffer, *p; 2541 2542 line_buffer = g_malloc(len * 5 + 1); 2543 2544 for (i = 0, p = line_buffer; i < len; i++) { 2545 p += sprintf(p, " 0x%02x", buf[i]); 2546 } 2547 trace_scsi_disk_new_request(lun, tag, line_buffer); 2548 2549 g_free(line_buffer); 2550 } 2551 2552 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2553 uint8_t *buf, void *hba_private) 2554 { 2555 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2556 SCSIRequest *req; 2557 const SCSIReqOps *ops; 2558 uint8_t command; 2559 2560 command = buf[0]; 2561 ops = scsi_disk_reqops_dispatch[command]; 2562 if (!ops) { 2563 ops = &scsi_disk_emulate_reqops; 2564 } 2565 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2566 2567 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { 2568 scsi_disk_new_request_dump(lun, tag, buf); 2569 } 2570 2571 return req; 2572 } 2573 2574 #ifdef __linux__ 2575 static int get_device_type(SCSIDiskState *s) 2576 { 2577 uint8_t cmd[16]; 2578 uint8_t buf[36]; 2579 int ret; 2580 2581 memset(cmd, 0, sizeof(cmd)); 2582 memset(buf, 0, sizeof(buf)); 2583 cmd[0] = INQUIRY; 2584 cmd[4] = sizeof(buf); 2585 2586 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2587 buf, sizeof(buf)); 2588 if (ret < 0) { 2589 return -1; 2590 } 2591 s->qdev.type = buf[0]; 2592 if (buf[1] & 0x80) { 2593 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2594 } 2595 return 0; 2596 } 2597 2598 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2599 { 2600 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2601 AioContext *ctx; 2602 int sg_version; 2603 int rc; 2604 2605 if (!s->qdev.conf.blk) { 2606 error_setg(errp, "drive property not set"); 2607 return; 2608 } 2609 2610 if (s->rotation_rate) { 2611 error_report_once("rotation_rate is specified for scsi-block but is " 2612 "not implemented. This option is deprecated and will " 2613 "be removed in a future version"); 2614 } 2615 2616 ctx = blk_get_aio_context(s->qdev.conf.blk); 2617 aio_context_acquire(ctx); 2618 2619 /* check we are using a driver managing SG_IO (version 3 and after) */ 2620 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2621 if (rc < 0) { 2622 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2623 if (rc != -EPERM) { 2624 error_append_hint(errp, "Is this a SCSI device?\n"); 2625 } 2626 goto out; 2627 } 2628 if (sg_version < 30000) { 2629 error_setg(errp, "scsi generic interface too old"); 2630 goto out; 2631 } 2632 2633 /* get device type from INQUIRY data */ 2634 rc = get_device_type(s); 2635 if (rc < 0) { 2636 error_setg(errp, "INQUIRY failed"); 2637 goto out; 2638 } 2639 2640 /* Make a guess for the block size, we'll fix it when the guest sends. 2641 * READ CAPACITY. If they don't, they likely would assume these sizes 2642 * anyway. (TODO: check in /sys). 2643 */ 2644 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2645 s->qdev.blocksize = 2048; 2646 } else { 2647 s->qdev.blocksize = 512; 2648 } 2649 2650 /* Makes the scsi-block device not removable by using HMP and QMP eject 2651 * command. 2652 */ 2653 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2654 2655 scsi_realize(&s->qdev, errp); 2656 scsi_generic_read_device_inquiry(&s->qdev); 2657 2658 out: 2659 aio_context_release(ctx); 2660 } 2661 2662 typedef struct SCSIBlockReq { 2663 SCSIDiskReq req; 2664 sg_io_hdr_t io_header; 2665 2666 /* Selected bytes of the original CDB, copied into our own CDB. */ 2667 uint8_t cmd, cdb1, group_number; 2668 2669 /* CDB passed to SG_IO. */ 2670 uint8_t cdb[16]; 2671 } SCSIBlockReq; 2672 2673 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2674 int64_t offset, QEMUIOVector *iov, 2675 int direction, 2676 BlockCompletionFunc *cb, void *opaque) 2677 { 2678 sg_io_hdr_t *io_header = &req->io_header; 2679 SCSIDiskReq *r = &req->req; 2680 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2681 int nb_logical_blocks; 2682 uint64_t lba; 2683 BlockAIOCB *aiocb; 2684 2685 /* This is not supported yet. It can only happen if the guest does 2686 * reads and writes that are not aligned to one logical sectors 2687 * _and_ cover multiple MemoryRegions. 2688 */ 2689 assert(offset % s->qdev.blocksize == 0); 2690 assert(iov->size % s->qdev.blocksize == 0); 2691 2692 io_header->interface_id = 'S'; 2693 2694 /* The data transfer comes from the QEMUIOVector. */ 2695 io_header->dxfer_direction = direction; 2696 io_header->dxfer_len = iov->size; 2697 io_header->dxferp = (void *)iov->iov; 2698 io_header->iovec_count = iov->niov; 2699 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2700 2701 /* Build a new CDB with the LBA and length patched in, in case 2702 * DMA helpers split the transfer in multiple segments. Do not 2703 * build a CDB smaller than what the guest wanted, and only build 2704 * a larger one if strictly necessary. 2705 */ 2706 io_header->cmdp = req->cdb; 2707 lba = offset / s->qdev.blocksize; 2708 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2709 2710 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2711 /* 6-byte CDB */ 2712 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2713 req->cdb[4] = nb_logical_blocks; 2714 req->cdb[5] = 0; 2715 io_header->cmd_len = 6; 2716 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2717 /* 10-byte CDB */ 2718 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2719 req->cdb[1] = req->cdb1; 2720 stl_be_p(&req->cdb[2], lba); 2721 req->cdb[6] = req->group_number; 2722 stw_be_p(&req->cdb[7], nb_logical_blocks); 2723 req->cdb[9] = 0; 2724 io_header->cmd_len = 10; 2725 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2726 /* 12-byte CDB */ 2727 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2728 req->cdb[1] = req->cdb1; 2729 stl_be_p(&req->cdb[2], lba); 2730 stl_be_p(&req->cdb[6], nb_logical_blocks); 2731 req->cdb[10] = req->group_number; 2732 req->cdb[11] = 0; 2733 io_header->cmd_len = 12; 2734 } else { 2735 /* 16-byte CDB */ 2736 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2737 req->cdb[1] = req->cdb1; 2738 stq_be_p(&req->cdb[2], lba); 2739 stl_be_p(&req->cdb[10], nb_logical_blocks); 2740 req->cdb[14] = req->group_number; 2741 req->cdb[15] = 0; 2742 io_header->cmd_len = 16; 2743 } 2744 2745 /* The rest is as in scsi-generic.c. */ 2746 io_header->mx_sb_len = sizeof(r->req.sense); 2747 io_header->sbp = r->req.sense; 2748 io_header->timeout = UINT_MAX; 2749 io_header->usr_ptr = r; 2750 io_header->flags |= SG_FLAG_DIRECT_IO; 2751 2752 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2753 assert(aiocb != NULL); 2754 return aiocb; 2755 } 2756 2757 static bool scsi_block_no_fua(SCSICommand *cmd) 2758 { 2759 return false; 2760 } 2761 2762 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2763 QEMUIOVector *iov, 2764 BlockCompletionFunc *cb, void *cb_opaque, 2765 void *opaque) 2766 { 2767 SCSIBlockReq *r = opaque; 2768 return scsi_block_do_sgio(r, offset, iov, 2769 SG_DXFER_FROM_DEV, cb, cb_opaque); 2770 } 2771 2772 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2773 QEMUIOVector *iov, 2774 BlockCompletionFunc *cb, void *cb_opaque, 2775 void *opaque) 2776 { 2777 SCSIBlockReq *r = opaque; 2778 return scsi_block_do_sgio(r, offset, iov, 2779 SG_DXFER_TO_DEV, cb, cb_opaque); 2780 } 2781 2782 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2783 { 2784 switch (buf[0]) { 2785 case VERIFY_10: 2786 case VERIFY_12: 2787 case VERIFY_16: 2788 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2789 * for the number of logical blocks specified in the length 2790 * field). For other modes, do not use scatter/gather operation. 2791 */ 2792 if ((buf[1] & 6) == 2) { 2793 return false; 2794 } 2795 break; 2796 2797 case READ_6: 2798 case READ_10: 2799 case READ_12: 2800 case READ_16: 2801 case WRITE_6: 2802 case WRITE_10: 2803 case WRITE_12: 2804 case WRITE_16: 2805 case WRITE_VERIFY_10: 2806 case WRITE_VERIFY_12: 2807 case WRITE_VERIFY_16: 2808 /* MMC writing cannot be done via DMA helpers, because it sometimes 2809 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2810 * We might use scsi_block_dma_reqops as long as no writing commands are 2811 * seen, but performance usually isn't paramount on optical media. So, 2812 * just make scsi-block operate the same as scsi-generic for them. 2813 */ 2814 if (s->qdev.type != TYPE_ROM) { 2815 return false; 2816 } 2817 break; 2818 2819 default: 2820 break; 2821 } 2822 2823 return true; 2824 } 2825 2826 2827 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2828 { 2829 SCSIBlockReq *r = (SCSIBlockReq *)req; 2830 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2831 2832 r->cmd = req->cmd.buf[0]; 2833 switch (r->cmd >> 5) { 2834 case 0: 2835 /* 6-byte CDB. */ 2836 r->cdb1 = r->group_number = 0; 2837 break; 2838 case 1: 2839 /* 10-byte CDB. */ 2840 r->cdb1 = req->cmd.buf[1]; 2841 r->group_number = req->cmd.buf[6]; 2842 break; 2843 case 4: 2844 /* 12-byte CDB. */ 2845 r->cdb1 = req->cmd.buf[1]; 2846 r->group_number = req->cmd.buf[10]; 2847 break; 2848 case 5: 2849 /* 16-byte CDB. */ 2850 r->cdb1 = req->cmd.buf[1]; 2851 r->group_number = req->cmd.buf[14]; 2852 break; 2853 default: 2854 abort(); 2855 } 2856 2857 /* Protection information is not supported. For SCSI versions 2 and 2858 * older (as determined by snooping the guest's INQUIRY commands), 2859 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2860 */ 2861 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2862 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2863 return 0; 2864 } 2865 2866 r->req.status = &r->io_header.status; 2867 return scsi_disk_dma_command(req, buf); 2868 } 2869 2870 static const SCSIReqOps scsi_block_dma_reqops = { 2871 .size = sizeof(SCSIBlockReq), 2872 .free_req = scsi_free_request, 2873 .send_command = scsi_block_dma_command, 2874 .read_data = scsi_read_data, 2875 .write_data = scsi_write_data, 2876 .get_buf = scsi_get_buf, 2877 .load_request = scsi_disk_load_request, 2878 .save_request = scsi_disk_save_request, 2879 }; 2880 2881 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2882 uint32_t lun, uint8_t *buf, 2883 void *hba_private) 2884 { 2885 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2886 2887 if (scsi_block_is_passthrough(s, buf)) { 2888 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2889 hba_private); 2890 } else { 2891 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2892 hba_private); 2893 } 2894 } 2895 2896 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2897 uint8_t *buf, void *hba_private) 2898 { 2899 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2900 2901 if (scsi_block_is_passthrough(s, buf)) { 2902 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2903 } else { 2904 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2905 } 2906 } 2907 2908 static void scsi_block_update_sense(SCSIRequest *req) 2909 { 2910 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2911 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); 2912 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); 2913 } 2914 #endif 2915 2916 static 2917 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2918 BlockCompletionFunc *cb, void *cb_opaque, 2919 void *opaque) 2920 { 2921 SCSIDiskReq *r = opaque; 2922 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2923 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2924 } 2925 2926 static 2927 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2928 BlockCompletionFunc *cb, void *cb_opaque, 2929 void *opaque) 2930 { 2931 SCSIDiskReq *r = opaque; 2932 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2933 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2934 } 2935 2936 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2937 { 2938 DeviceClass *dc = DEVICE_CLASS(klass); 2939 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2940 2941 dc->fw_name = "disk"; 2942 dc->reset = scsi_disk_reset; 2943 sdc->dma_readv = scsi_dma_readv; 2944 sdc->dma_writev = scsi_dma_writev; 2945 sdc->need_fua_emulation = scsi_is_cmd_fua; 2946 } 2947 2948 static const TypeInfo scsi_disk_base_info = { 2949 .name = TYPE_SCSI_DISK_BASE, 2950 .parent = TYPE_SCSI_DEVICE, 2951 .class_init = scsi_disk_base_class_initfn, 2952 .instance_size = sizeof(SCSIDiskState), 2953 .class_size = sizeof(SCSIDiskClass), 2954 .abstract = true, 2955 }; 2956 2957 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2958 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ 2959 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ 2960 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2961 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2962 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2963 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2964 DEFINE_PROP_STRING("product", SCSIDiskState, product), \ 2965 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) 2966 2967 2968 static Property scsi_hd_properties[] = { 2969 DEFINE_SCSI_DISK_PROPERTIES(), 2970 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2971 SCSI_DISK_F_REMOVABLE, false), 2972 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2973 SCSI_DISK_F_DPOFUA, false), 2974 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2975 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2976 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2977 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2978 DEFAULT_MAX_UNMAP_SIZE), 2979 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2980 DEFAULT_MAX_IO_SIZE), 2981 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2982 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2983 5), 2984 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2985 DEFINE_PROP_END_OF_LIST(), 2986 }; 2987 2988 static const VMStateDescription vmstate_scsi_disk_state = { 2989 .name = "scsi-disk", 2990 .version_id = 1, 2991 .minimum_version_id = 1, 2992 .fields = (VMStateField[]) { 2993 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2994 VMSTATE_BOOL(media_changed, SCSIDiskState), 2995 VMSTATE_BOOL(media_event, SCSIDiskState), 2996 VMSTATE_BOOL(eject_request, SCSIDiskState), 2997 VMSTATE_BOOL(tray_open, SCSIDiskState), 2998 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2999 VMSTATE_END_OF_LIST() 3000 } 3001 }; 3002 3003 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 3004 { 3005 DeviceClass *dc = DEVICE_CLASS(klass); 3006 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3007 3008 sc->realize = scsi_hd_realize; 3009 sc->alloc_req = scsi_new_request; 3010 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3011 dc->desc = "virtual SCSI disk"; 3012 dc->props = scsi_hd_properties; 3013 dc->vmsd = &vmstate_scsi_disk_state; 3014 } 3015 3016 static const TypeInfo scsi_hd_info = { 3017 .name = "scsi-hd", 3018 .parent = TYPE_SCSI_DISK_BASE, 3019 .class_init = scsi_hd_class_initfn, 3020 }; 3021 3022 static Property scsi_cd_properties[] = { 3023 DEFINE_SCSI_DISK_PROPERTIES(), 3024 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3025 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3026 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3027 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3028 DEFAULT_MAX_IO_SIZE), 3029 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3030 5), 3031 DEFINE_PROP_END_OF_LIST(), 3032 }; 3033 3034 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3035 { 3036 DeviceClass *dc = DEVICE_CLASS(klass); 3037 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3038 3039 sc->realize = scsi_cd_realize; 3040 sc->alloc_req = scsi_new_request; 3041 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3042 dc->desc = "virtual SCSI CD-ROM"; 3043 dc->props = scsi_cd_properties; 3044 dc->vmsd = &vmstate_scsi_disk_state; 3045 } 3046 3047 static const TypeInfo scsi_cd_info = { 3048 .name = "scsi-cd", 3049 .parent = TYPE_SCSI_DISK_BASE, 3050 .class_init = scsi_cd_class_initfn, 3051 }; 3052 3053 #ifdef __linux__ 3054 static Property scsi_block_properties[] = { 3055 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3056 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3057 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3058 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3059 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3060 DEFAULT_MAX_UNMAP_SIZE), 3061 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3062 DEFAULT_MAX_IO_SIZE), 3063 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3064 -1), 3065 DEFINE_PROP_END_OF_LIST(), 3066 }; 3067 3068 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3069 { 3070 DeviceClass *dc = DEVICE_CLASS(klass); 3071 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3072 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3073 3074 sc->realize = scsi_block_realize; 3075 sc->alloc_req = scsi_block_new_request; 3076 sc->parse_cdb = scsi_block_parse_cdb; 3077 sdc->dma_readv = scsi_block_dma_readv; 3078 sdc->dma_writev = scsi_block_dma_writev; 3079 sdc->update_sense = scsi_block_update_sense; 3080 sdc->need_fua_emulation = scsi_block_no_fua; 3081 dc->desc = "SCSI block device passthrough"; 3082 dc->props = scsi_block_properties; 3083 dc->vmsd = &vmstate_scsi_disk_state; 3084 } 3085 3086 static const TypeInfo scsi_block_info = { 3087 .name = "scsi-block", 3088 .parent = TYPE_SCSI_DISK_BASE, 3089 .class_init = scsi_block_class_initfn, 3090 }; 3091 #endif 3092 3093 static Property scsi_disk_properties[] = { 3094 DEFINE_SCSI_DISK_PROPERTIES(), 3095 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3096 SCSI_DISK_F_REMOVABLE, false), 3097 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3098 SCSI_DISK_F_DPOFUA, false), 3099 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3100 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3101 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3102 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3103 DEFAULT_MAX_UNMAP_SIZE), 3104 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3105 DEFAULT_MAX_IO_SIZE), 3106 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3107 5), 3108 DEFINE_PROP_END_OF_LIST(), 3109 }; 3110 3111 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3112 { 3113 DeviceClass *dc = DEVICE_CLASS(klass); 3114 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3115 3116 sc->realize = scsi_disk_realize; 3117 sc->alloc_req = scsi_new_request; 3118 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3119 dc->fw_name = "disk"; 3120 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3121 dc->reset = scsi_disk_reset; 3122 dc->props = scsi_disk_properties; 3123 dc->vmsd = &vmstate_scsi_disk_state; 3124 } 3125 3126 static const TypeInfo scsi_disk_info = { 3127 .name = "scsi-disk", 3128 .parent = TYPE_SCSI_DISK_BASE, 3129 .class_init = scsi_disk_class_initfn, 3130 }; 3131 3132 static void scsi_disk_register_types(void) 3133 { 3134 type_register_static(&scsi_disk_base_info); 3135 type_register_static(&scsi_hd_info); 3136 type_register_static(&scsi_cd_info); 3137 #ifdef __linux__ 3138 type_register_static(&scsi_block_info); 3139 #endif 3140 type_register_static(&scsi_disk_info); 3141 } 3142 3143 type_init(scsi_disk_register_types) 3144