1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/units.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "qemu/module.h" 27 #include "hw/scsi/scsi.h" 28 #include "migration/qemu-file-types.h" 29 #include "migration/vmstate.h" 30 #include "hw/scsi/emulation.h" 31 #include "scsi/constants.h" 32 #include "sysemu/sysemu.h" 33 #include "sysemu/block-backend.h" 34 #include "sysemu/blockdev.h" 35 #include "hw/block/block.h" 36 #include "sysemu/dma.h" 37 #include "qemu/cutils.h" 38 #include "trace.h" 39 40 #ifdef __linux 41 #include <scsi/sg.h> 42 #endif 43 44 #define SCSI_WRITE_SAME_MAX (512 * KiB) 45 #define SCSI_DMA_BUF_SIZE (128 * KiB) 46 #define SCSI_MAX_INQUIRY_LEN 256 47 #define SCSI_MAX_MODE_LEN 256 48 49 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 50 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 51 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 52 53 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 54 55 #define SCSI_DISK_BASE(obj) \ 56 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 57 #define SCSI_DISK_BASE_CLASS(klass) \ 58 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 59 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 60 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 61 62 typedef struct SCSIDiskClass { 63 SCSIDeviceClass parent_class; 64 DMAIOFunc *dma_readv; 65 DMAIOFunc *dma_writev; 66 bool (*need_fua_emulation)(SCSICommand *cmd); 67 void (*update_sense)(SCSIRequest *r); 68 } SCSIDiskClass; 69 70 typedef struct SCSIDiskReq { 71 SCSIRequest req; 72 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 73 uint64_t sector; 74 uint32_t sector_count; 75 uint32_t buflen; 76 bool started; 77 bool need_fua_emulation; 78 struct iovec iov; 79 QEMUIOVector qiov; 80 BlockAcctCookie acct; 81 unsigned char *status; 82 } SCSIDiskReq; 83 84 #define SCSI_DISK_F_REMOVABLE 0 85 #define SCSI_DISK_F_DPOFUA 1 86 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 87 88 typedef struct SCSIDiskState 89 { 90 SCSIDevice qdev; 91 uint32_t features; 92 bool media_changed; 93 bool media_event; 94 bool eject_request; 95 uint16_t port_index; 96 uint64_t max_unmap_size; 97 uint64_t max_io_size; 98 QEMUBH *bh; 99 char *version; 100 char *serial; 101 char *vendor; 102 char *product; 103 char *device_id; 104 bool tray_open; 105 bool tray_locked; 106 /* 107 * 0x0000 - rotation rate not reported 108 * 0x0001 - non-rotating medium (SSD) 109 * 0x0002-0x0400 - reserved 110 * 0x0401-0xffe - rotations per minute 111 * 0xffff - reserved 112 */ 113 uint16_t rotation_rate; 114 } SCSIDiskState; 115 116 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 117 118 static void scsi_free_request(SCSIRequest *req) 119 { 120 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 121 122 qemu_vfree(r->iov.iov_base); 123 } 124 125 /* Helper function for command completion with sense. */ 126 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 127 { 128 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, 129 sense.ascq); 130 scsi_req_build_sense(&r->req, sense); 131 scsi_req_complete(&r->req, CHECK_CONDITION); 132 } 133 134 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 135 { 136 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 137 138 if (!r->iov.iov_base) { 139 r->buflen = size; 140 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 141 } 142 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 143 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 144 } 145 146 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 147 { 148 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 149 150 qemu_put_be64s(f, &r->sector); 151 qemu_put_be32s(f, &r->sector_count); 152 qemu_put_be32s(f, &r->buflen); 153 if (r->buflen) { 154 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 155 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 156 } else if (!req->retry) { 157 uint32_t len = r->iov.iov_len; 158 qemu_put_be32s(f, &len); 159 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 160 } 161 } 162 } 163 164 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 165 { 166 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 167 168 qemu_get_be64s(f, &r->sector); 169 qemu_get_be32s(f, &r->sector_count); 170 qemu_get_be32s(f, &r->buflen); 171 if (r->buflen) { 172 scsi_init_iovec(r, r->buflen); 173 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 174 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 175 } else if (!r->req.retry) { 176 uint32_t len; 177 qemu_get_be32s(f, &len); 178 r->iov.iov_len = len; 179 assert(r->iov.iov_len <= r->buflen); 180 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 181 } 182 } 183 184 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 185 } 186 187 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 188 { 189 if (r->req.io_canceled) { 190 scsi_req_cancel_complete(&r->req); 191 return true; 192 } 193 194 if (ret < 0 || (r->status && *r->status)) { 195 return scsi_handle_rw_error(r, -ret, acct_failed); 196 } 197 198 return false; 199 } 200 201 static void scsi_aio_complete(void *opaque, int ret) 202 { 203 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 204 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 205 206 assert(r->req.aiocb != NULL); 207 r->req.aiocb = NULL; 208 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 209 if (scsi_disk_req_check_error(r, ret, true)) { 210 goto done; 211 } 212 213 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 214 scsi_req_complete(&r->req, GOOD); 215 216 done: 217 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 218 scsi_req_unref(&r->req); 219 } 220 221 static bool scsi_is_cmd_fua(SCSICommand *cmd) 222 { 223 switch (cmd->buf[0]) { 224 case READ_10: 225 case READ_12: 226 case READ_16: 227 case WRITE_10: 228 case WRITE_12: 229 case WRITE_16: 230 return (cmd->buf[1] & 8) != 0; 231 232 case VERIFY_10: 233 case VERIFY_12: 234 case VERIFY_16: 235 case WRITE_VERIFY_10: 236 case WRITE_VERIFY_12: 237 case WRITE_VERIFY_16: 238 return true; 239 240 case READ_6: 241 case WRITE_6: 242 default: 243 return false; 244 } 245 } 246 247 static void scsi_write_do_fua(SCSIDiskReq *r) 248 { 249 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 250 251 assert(r->req.aiocb == NULL); 252 assert(!r->req.io_canceled); 253 254 if (r->need_fua_emulation) { 255 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 256 BLOCK_ACCT_FLUSH); 257 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 258 return; 259 } 260 261 scsi_req_complete(&r->req, GOOD); 262 scsi_req_unref(&r->req); 263 } 264 265 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 266 { 267 assert(r->req.aiocb == NULL); 268 if (scsi_disk_req_check_error(r, ret, false)) { 269 goto done; 270 } 271 272 r->sector += r->sector_count; 273 r->sector_count = 0; 274 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 275 scsi_write_do_fua(r); 276 return; 277 } else { 278 scsi_req_complete(&r->req, GOOD); 279 } 280 281 done: 282 scsi_req_unref(&r->req); 283 } 284 285 static void scsi_dma_complete(void *opaque, int ret) 286 { 287 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 288 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 289 290 assert(r->req.aiocb != NULL); 291 r->req.aiocb = NULL; 292 293 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 294 if (ret < 0) { 295 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 296 } else { 297 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 298 } 299 scsi_dma_complete_noio(r, ret); 300 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 301 } 302 303 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) 304 { 305 uint32_t n; 306 307 assert(r->req.aiocb == NULL); 308 if (scsi_disk_req_check_error(r, ret, false)) { 309 goto done; 310 } 311 312 n = r->qiov.size / 512; 313 r->sector += n; 314 r->sector_count -= n; 315 scsi_req_data(&r->req, r->qiov.size); 316 317 done: 318 scsi_req_unref(&r->req); 319 } 320 321 static void scsi_read_complete(void *opaque, int ret) 322 { 323 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 324 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 325 326 assert(r->req.aiocb != NULL); 327 r->req.aiocb = NULL; 328 329 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 330 if (ret < 0) { 331 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 332 } else { 333 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 334 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); 335 } 336 scsi_read_complete_noio(r, ret); 337 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 338 } 339 340 /* Actually issue a read to the block device. */ 341 static void scsi_do_read(SCSIDiskReq *r, int ret) 342 { 343 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 344 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 345 346 assert (r->req.aiocb == NULL); 347 if (scsi_disk_req_check_error(r, ret, false)) { 348 goto done; 349 } 350 351 /* The request is used as the AIO opaque value, so add a ref. */ 352 scsi_req_ref(&r->req); 353 354 if (r->req.sg) { 355 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 356 r->req.resid -= r->req.sg->size; 357 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 358 r->req.sg, r->sector << BDRV_SECTOR_BITS, 359 BDRV_SECTOR_SIZE, 360 sdc->dma_readv, r, scsi_dma_complete, r, 361 DMA_DIRECTION_FROM_DEVICE); 362 } else { 363 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 364 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 365 r->qiov.size, BLOCK_ACCT_READ); 366 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 367 scsi_read_complete, r, r); 368 } 369 370 done: 371 scsi_req_unref(&r->req); 372 } 373 374 static void scsi_do_read_cb(void *opaque, int ret) 375 { 376 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 377 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 378 379 assert (r->req.aiocb != NULL); 380 r->req.aiocb = NULL; 381 382 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 383 if (ret < 0) { 384 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 385 } else { 386 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 387 } 388 scsi_do_read(opaque, ret); 389 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 390 } 391 392 /* Read more data from scsi device into buffer. */ 393 static void scsi_read_data(SCSIRequest *req) 394 { 395 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 396 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 397 bool first; 398 399 trace_scsi_disk_read_data_count(r->sector_count); 400 if (r->sector_count == 0) { 401 /* This also clears the sense buffer for REQUEST SENSE. */ 402 scsi_req_complete(&r->req, GOOD); 403 return; 404 } 405 406 /* No data transfer may already be in progress */ 407 assert(r->req.aiocb == NULL); 408 409 /* The request is used as the AIO opaque value, so add a ref. */ 410 scsi_req_ref(&r->req); 411 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 412 trace_scsi_disk_read_data_invalid(); 413 scsi_read_complete_noio(r, -EINVAL); 414 return; 415 } 416 417 if (!blk_is_available(req->dev->conf.blk)) { 418 scsi_read_complete_noio(r, -ENOMEDIUM); 419 return; 420 } 421 422 first = !r->started; 423 r->started = true; 424 if (first && r->need_fua_emulation) { 425 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 426 BLOCK_ACCT_FLUSH); 427 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 428 } else { 429 scsi_do_read(r, 0); 430 } 431 } 432 433 /* 434 * scsi_handle_rw_error has two return values. False means that the error 435 * must be ignored, true means that the error has been processed and the 436 * caller should not do anything else for this request. Note that 437 * scsi_handle_rw_error always manages its reference counts, independent 438 * of the return value. 439 */ 440 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 441 { 442 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 443 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 444 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 445 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 446 is_read, error); 447 448 if (action == BLOCK_ERROR_ACTION_REPORT) { 449 if (acct_failed) { 450 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 451 } 452 switch (error) { 453 case 0: 454 /* A passthrough command has run and has produced sense data; check 455 * whether the error has to be handled by the guest or should rather 456 * pause the host. 457 */ 458 assert(r->status && *r->status); 459 if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { 460 /* These errors are handled by guest. */ 461 sdc->update_sense(&r->req); 462 scsi_req_complete(&r->req, *r->status); 463 return true; 464 } 465 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 466 break; 467 case ENOMEDIUM: 468 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 469 break; 470 case ENOMEM: 471 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 472 break; 473 case EINVAL: 474 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 475 break; 476 case ENOSPC: 477 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 478 break; 479 default: 480 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 481 break; 482 } 483 } 484 485 blk_error_action(s->qdev.conf.blk, action, is_read, error); 486 if (action == BLOCK_ERROR_ACTION_IGNORE) { 487 scsi_req_complete(&r->req, 0); 488 return true; 489 } 490 491 if (action == BLOCK_ERROR_ACTION_STOP) { 492 scsi_req_retry(&r->req); 493 } 494 return true; 495 } 496 497 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 498 { 499 uint32_t n; 500 501 assert (r->req.aiocb == NULL); 502 if (scsi_disk_req_check_error(r, ret, false)) { 503 goto done; 504 } 505 506 n = r->qiov.size / 512; 507 r->sector += n; 508 r->sector_count -= n; 509 if (r->sector_count == 0) { 510 scsi_write_do_fua(r); 511 return; 512 } else { 513 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 514 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); 515 scsi_req_data(&r->req, r->qiov.size); 516 } 517 518 done: 519 scsi_req_unref(&r->req); 520 } 521 522 static void scsi_write_complete(void * opaque, int ret) 523 { 524 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 525 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 526 527 assert (r->req.aiocb != NULL); 528 r->req.aiocb = NULL; 529 530 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 531 if (ret < 0) { 532 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 533 } else { 534 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 535 } 536 scsi_write_complete_noio(r, ret); 537 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 538 } 539 540 static void scsi_write_data(SCSIRequest *req) 541 { 542 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 543 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 544 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 545 546 /* No data transfer may already be in progress */ 547 assert(r->req.aiocb == NULL); 548 549 /* The request is used as the AIO opaque value, so add a ref. */ 550 scsi_req_ref(&r->req); 551 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 552 trace_scsi_disk_write_data_invalid(); 553 scsi_write_complete_noio(r, -EINVAL); 554 return; 555 } 556 557 if (!r->req.sg && !r->qiov.size) { 558 /* Called for the first time. Ask the driver to send us more data. */ 559 r->started = true; 560 scsi_write_complete_noio(r, 0); 561 return; 562 } 563 if (!blk_is_available(req->dev->conf.blk)) { 564 scsi_write_complete_noio(r, -ENOMEDIUM); 565 return; 566 } 567 568 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 569 r->req.cmd.buf[0] == VERIFY_16) { 570 if (r->req.sg) { 571 scsi_dma_complete_noio(r, 0); 572 } else { 573 scsi_write_complete_noio(r, 0); 574 } 575 return; 576 } 577 578 if (r->req.sg) { 579 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 580 r->req.resid -= r->req.sg->size; 581 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 582 r->req.sg, r->sector << BDRV_SECTOR_BITS, 583 BDRV_SECTOR_SIZE, 584 sdc->dma_writev, r, scsi_dma_complete, r, 585 DMA_DIRECTION_TO_DEVICE); 586 } else { 587 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 588 r->qiov.size, BLOCK_ACCT_WRITE); 589 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 590 scsi_write_complete, r, r); 591 } 592 } 593 594 /* Return a pointer to the data buffer. */ 595 static uint8_t *scsi_get_buf(SCSIRequest *req) 596 { 597 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 598 599 return (uint8_t *)r->iov.iov_base; 600 } 601 602 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 603 { 604 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 605 uint8_t page_code = req->cmd.buf[2]; 606 int start, buflen = 0; 607 608 outbuf[buflen++] = s->qdev.type & 0x1f; 609 outbuf[buflen++] = page_code; 610 outbuf[buflen++] = 0x00; 611 outbuf[buflen++] = 0x00; 612 start = buflen; 613 614 switch (page_code) { 615 case 0x00: /* Supported page codes, mandatory */ 616 { 617 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); 618 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 619 if (s->serial) { 620 outbuf[buflen++] = 0x80; /* unit serial number */ 621 } 622 outbuf[buflen++] = 0x83; /* device identification */ 623 if (s->qdev.type == TYPE_DISK) { 624 outbuf[buflen++] = 0xb0; /* block limits */ 625 outbuf[buflen++] = 0xb1; /* block device characteristics */ 626 outbuf[buflen++] = 0xb2; /* thin provisioning */ 627 } 628 break; 629 } 630 case 0x80: /* Device serial number, optional */ 631 { 632 int l; 633 634 if (!s->serial) { 635 trace_scsi_disk_emulate_vpd_page_80_not_supported(); 636 return -1; 637 } 638 639 l = strlen(s->serial); 640 if (l > 36) { 641 l = 36; 642 } 643 644 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); 645 memcpy(outbuf + buflen, s->serial, l); 646 buflen += l; 647 break; 648 } 649 650 case 0x83: /* Device identification page, mandatory */ 651 { 652 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; 653 654 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); 655 656 if (id_len) { 657 outbuf[buflen++] = 0x2; /* ASCII */ 658 outbuf[buflen++] = 0; /* not officially assigned */ 659 outbuf[buflen++] = 0; /* reserved */ 660 outbuf[buflen++] = id_len; /* length of data following */ 661 memcpy(outbuf + buflen, s->device_id, id_len); 662 buflen += id_len; 663 } 664 665 if (s->qdev.wwn) { 666 outbuf[buflen++] = 0x1; /* Binary */ 667 outbuf[buflen++] = 0x3; /* NAA */ 668 outbuf[buflen++] = 0; /* reserved */ 669 outbuf[buflen++] = 8; 670 stq_be_p(&outbuf[buflen], s->qdev.wwn); 671 buflen += 8; 672 } 673 674 if (s->qdev.port_wwn) { 675 outbuf[buflen++] = 0x61; /* SAS / Binary */ 676 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 677 outbuf[buflen++] = 0; /* reserved */ 678 outbuf[buflen++] = 8; 679 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 680 buflen += 8; 681 } 682 683 if (s->port_index) { 684 outbuf[buflen++] = 0x61; /* SAS / Binary */ 685 686 /* PIV/Target port/relative target port */ 687 outbuf[buflen++] = 0x94; 688 689 outbuf[buflen++] = 0; /* reserved */ 690 outbuf[buflen++] = 4; 691 stw_be_p(&outbuf[buflen + 2], s->port_index); 692 buflen += 4; 693 } 694 break; 695 } 696 case 0xb0: /* block limits */ 697 { 698 SCSIBlockLimits bl = {}; 699 700 if (s->qdev.type == TYPE_ROM) { 701 trace_scsi_disk_emulate_vpd_page_b0_not_supported(); 702 return -1; 703 } 704 bl.wsnz = 1; 705 bl.unmap_sectors = 706 s->qdev.conf.discard_granularity / s->qdev.blocksize; 707 bl.min_io_size = 708 s->qdev.conf.min_io_size / s->qdev.blocksize; 709 bl.opt_io_size = 710 s->qdev.conf.opt_io_size / s->qdev.blocksize; 711 bl.max_unmap_sectors = 712 s->max_unmap_size / s->qdev.blocksize; 713 bl.max_io_sectors = 714 s->max_io_size / s->qdev.blocksize; 715 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 716 bl.max_unmap_descr = 255; 717 718 if (s->qdev.type == TYPE_DISK) { 719 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 720 int max_io_sectors_blk = 721 max_transfer_blk / s->qdev.blocksize; 722 723 bl.max_io_sectors = 724 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 725 } 726 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 727 break; 728 } 729 case 0xb1: /* block device characteristics */ 730 { 731 buflen = 0x40; 732 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 733 outbuf[5] = s->rotation_rate & 0xff; 734 outbuf[6] = 0; /* PRODUCT TYPE */ 735 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 736 outbuf[8] = 0; /* VBULS */ 737 break; 738 } 739 case 0xb2: /* thin provisioning */ 740 { 741 buflen = 8; 742 outbuf[4] = 0; 743 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 744 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 745 outbuf[7] = 0; 746 break; 747 } 748 default: 749 return -1; 750 } 751 /* done with EVPD */ 752 assert(buflen - start <= 255); 753 outbuf[start - 1] = buflen - start; 754 return buflen; 755 } 756 757 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 758 { 759 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 760 int buflen = 0; 761 762 if (req->cmd.buf[1] & 0x1) { 763 /* Vital product data */ 764 return scsi_disk_emulate_vpd_page(req, outbuf); 765 } 766 767 /* Standard INQUIRY data */ 768 if (req->cmd.buf[2] != 0) { 769 return -1; 770 } 771 772 /* PAGE CODE == 0 */ 773 buflen = req->cmd.xfer; 774 if (buflen > SCSI_MAX_INQUIRY_LEN) { 775 buflen = SCSI_MAX_INQUIRY_LEN; 776 } 777 778 outbuf[0] = s->qdev.type & 0x1f; 779 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 780 781 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 782 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 783 784 memset(&outbuf[32], 0, 4); 785 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 786 /* 787 * We claim conformance to SPC-3, which is required for guests 788 * to ask for modern features like READ CAPACITY(16) or the 789 * block characteristics VPD page by default. Not all of SPC-3 790 * is actually implemented, but we're good enough. 791 */ 792 outbuf[2] = s->qdev.default_scsi_version; 793 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 794 795 if (buflen > 36) { 796 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 797 } else { 798 /* If the allocation length of CDB is too small, 799 the additional length is not adjusted */ 800 outbuf[4] = 36 - 5; 801 } 802 803 /* Sync data transfer and TCQ. */ 804 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 805 return buflen; 806 } 807 808 static inline bool media_is_dvd(SCSIDiskState *s) 809 { 810 uint64_t nb_sectors; 811 if (s->qdev.type != TYPE_ROM) { 812 return false; 813 } 814 if (!blk_is_available(s->qdev.conf.blk)) { 815 return false; 816 } 817 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 818 return nb_sectors > CD_MAX_SECTORS; 819 } 820 821 static inline bool media_is_cd(SCSIDiskState *s) 822 { 823 uint64_t nb_sectors; 824 if (s->qdev.type != TYPE_ROM) { 825 return false; 826 } 827 if (!blk_is_available(s->qdev.conf.blk)) { 828 return false; 829 } 830 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 831 return nb_sectors <= CD_MAX_SECTORS; 832 } 833 834 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 835 uint8_t *outbuf) 836 { 837 uint8_t type = r->req.cmd.buf[1] & 7; 838 839 if (s->qdev.type != TYPE_ROM) { 840 return -1; 841 } 842 843 /* Types 1/2 are only defined for Blu-Ray. */ 844 if (type != 0) { 845 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 846 return -1; 847 } 848 849 memset(outbuf, 0, 34); 850 outbuf[1] = 32; 851 outbuf[2] = 0xe; /* last session complete, disc finalized */ 852 outbuf[3] = 1; /* first track on disc */ 853 outbuf[4] = 1; /* # of sessions */ 854 outbuf[5] = 1; /* first track of last session */ 855 outbuf[6] = 1; /* last track of last session */ 856 outbuf[7] = 0x20; /* unrestricted use */ 857 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 858 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 859 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 860 /* 24-31: disc bar code */ 861 /* 32: disc application code */ 862 /* 33: number of OPC tables */ 863 864 return 34; 865 } 866 867 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 868 uint8_t *outbuf) 869 { 870 static const int rds_caps_size[5] = { 871 [0] = 2048 + 4, 872 [1] = 4 + 4, 873 [3] = 188 + 4, 874 [4] = 2048 + 4, 875 }; 876 877 uint8_t media = r->req.cmd.buf[1]; 878 uint8_t layer = r->req.cmd.buf[6]; 879 uint8_t format = r->req.cmd.buf[7]; 880 int size = -1; 881 882 if (s->qdev.type != TYPE_ROM) { 883 return -1; 884 } 885 if (media != 0) { 886 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 887 return -1; 888 } 889 890 if (format != 0xff) { 891 if (!blk_is_available(s->qdev.conf.blk)) { 892 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 893 return -1; 894 } 895 if (media_is_cd(s)) { 896 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 897 return -1; 898 } 899 if (format >= ARRAY_SIZE(rds_caps_size)) { 900 return -1; 901 } 902 size = rds_caps_size[format]; 903 memset(outbuf, 0, size); 904 } 905 906 switch (format) { 907 case 0x00: { 908 /* Physical format information */ 909 uint64_t nb_sectors; 910 if (layer != 0) { 911 goto fail; 912 } 913 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 914 915 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 916 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 917 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 918 outbuf[7] = 0; /* default densities */ 919 920 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 921 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 922 break; 923 } 924 925 case 0x01: /* DVD copyright information, all zeros */ 926 break; 927 928 case 0x03: /* BCA information - invalid field for no BCA info */ 929 return -1; 930 931 case 0x04: /* DVD disc manufacturing information, all zeros */ 932 break; 933 934 case 0xff: { /* List capabilities */ 935 int i; 936 size = 4; 937 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 938 if (!rds_caps_size[i]) { 939 continue; 940 } 941 outbuf[size] = i; 942 outbuf[size + 1] = 0x40; /* Not writable, readable */ 943 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 944 size += 4; 945 } 946 break; 947 } 948 949 default: 950 return -1; 951 } 952 953 /* Size of buffer, not including 2 byte size field */ 954 stw_be_p(outbuf, size - 2); 955 return size; 956 957 fail: 958 return -1; 959 } 960 961 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 962 { 963 uint8_t event_code, media_status; 964 965 media_status = 0; 966 if (s->tray_open) { 967 media_status = MS_TRAY_OPEN; 968 } else if (blk_is_inserted(s->qdev.conf.blk)) { 969 media_status = MS_MEDIA_PRESENT; 970 } 971 972 /* Event notification descriptor */ 973 event_code = MEC_NO_CHANGE; 974 if (media_status != MS_TRAY_OPEN) { 975 if (s->media_event) { 976 event_code = MEC_NEW_MEDIA; 977 s->media_event = false; 978 } else if (s->eject_request) { 979 event_code = MEC_EJECT_REQUESTED; 980 s->eject_request = false; 981 } 982 } 983 984 outbuf[0] = event_code; 985 outbuf[1] = media_status; 986 987 /* These fields are reserved, just clear them. */ 988 outbuf[2] = 0; 989 outbuf[3] = 0; 990 return 4; 991 } 992 993 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 994 uint8_t *outbuf) 995 { 996 int size; 997 uint8_t *buf = r->req.cmd.buf; 998 uint8_t notification_class_request = buf[4]; 999 if (s->qdev.type != TYPE_ROM) { 1000 return -1; 1001 } 1002 if ((buf[1] & 1) == 0) { 1003 /* asynchronous */ 1004 return -1; 1005 } 1006 1007 size = 4; 1008 outbuf[0] = outbuf[1] = 0; 1009 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1010 if (notification_class_request & (1 << GESN_MEDIA)) { 1011 outbuf[2] = GESN_MEDIA; 1012 size += scsi_event_status_media(s, &outbuf[size]); 1013 } else { 1014 outbuf[2] = 0x80; 1015 } 1016 stw_be_p(outbuf, size - 4); 1017 return size; 1018 } 1019 1020 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1021 { 1022 int current; 1023 1024 if (s->qdev.type != TYPE_ROM) { 1025 return -1; 1026 } 1027 1028 if (media_is_dvd(s)) { 1029 current = MMC_PROFILE_DVD_ROM; 1030 } else if (media_is_cd(s)) { 1031 current = MMC_PROFILE_CD_ROM; 1032 } else { 1033 current = MMC_PROFILE_NONE; 1034 } 1035 1036 memset(outbuf, 0, 40); 1037 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1038 stw_be_p(&outbuf[6], current); 1039 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1040 outbuf[10] = 0x03; /* persistent, current */ 1041 outbuf[11] = 8; /* two profiles */ 1042 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1043 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1044 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1045 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1046 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1047 stw_be_p(&outbuf[20], 1); 1048 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1049 outbuf[23] = 8; 1050 stl_be_p(&outbuf[24], 1); /* SCSI */ 1051 outbuf[28] = 1; /* DBE = 1, mandatory */ 1052 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1053 stw_be_p(&outbuf[32], 3); 1054 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1055 outbuf[35] = 4; 1056 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1057 /* TODO: Random readable, CD read, DVD read, drive serial number, 1058 power management */ 1059 return 40; 1060 } 1061 1062 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1063 { 1064 if (s->qdev.type != TYPE_ROM) { 1065 return -1; 1066 } 1067 memset(outbuf, 0, 8); 1068 outbuf[5] = 1; /* CD-ROM */ 1069 return 8; 1070 } 1071 1072 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1073 int page_control) 1074 { 1075 static const int mode_sense_valid[0x3f] = { 1076 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1077 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1078 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1079 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1080 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1081 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1082 }; 1083 1084 uint8_t *p = *p_outbuf + 2; 1085 int length; 1086 1087 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1088 return -1; 1089 } 1090 1091 /* 1092 * If Changeable Values are requested, a mask denoting those mode parameters 1093 * that are changeable shall be returned. As we currently don't support 1094 * parameter changes via MODE_SELECT all bits are returned set to zero. 1095 * The buffer was already menset to zero by the caller of this function. 1096 * 1097 * The offsets here are off by two compared to the descriptions in the 1098 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1099 * but it is done so that offsets are consistent within our implementation 1100 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1101 * 2-byte and 4-byte headers. 1102 */ 1103 switch (page) { 1104 case MODE_PAGE_HD_GEOMETRY: 1105 length = 0x16; 1106 if (page_control == 1) { /* Changeable Values */ 1107 break; 1108 } 1109 /* if a geometry hint is available, use it */ 1110 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1111 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1112 p[2] = s->qdev.conf.cyls & 0xff; 1113 p[3] = s->qdev.conf.heads & 0xff; 1114 /* Write precomp start cylinder, disabled */ 1115 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1116 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1117 p[6] = s->qdev.conf.cyls & 0xff; 1118 /* Reduced current start cylinder, disabled */ 1119 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1120 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1121 p[9] = s->qdev.conf.cyls & 0xff; 1122 /* Device step rate [ns], 200ns */ 1123 p[10] = 0; 1124 p[11] = 200; 1125 /* Landing zone cylinder */ 1126 p[12] = 0xff; 1127 p[13] = 0xff; 1128 p[14] = 0xff; 1129 /* Medium rotation rate [rpm], 5400 rpm */ 1130 p[18] = (5400 >> 8) & 0xff; 1131 p[19] = 5400 & 0xff; 1132 break; 1133 1134 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1135 length = 0x1e; 1136 if (page_control == 1) { /* Changeable Values */ 1137 break; 1138 } 1139 /* Transfer rate [kbit/s], 5Mbit/s */ 1140 p[0] = 5000 >> 8; 1141 p[1] = 5000 & 0xff; 1142 /* if a geometry hint is available, use it */ 1143 p[2] = s->qdev.conf.heads & 0xff; 1144 p[3] = s->qdev.conf.secs & 0xff; 1145 p[4] = s->qdev.blocksize >> 8; 1146 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1147 p[7] = s->qdev.conf.cyls & 0xff; 1148 /* Write precomp start cylinder, disabled */ 1149 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1150 p[9] = s->qdev.conf.cyls & 0xff; 1151 /* Reduced current start cylinder, disabled */ 1152 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1153 p[11] = s->qdev.conf.cyls & 0xff; 1154 /* Device step rate [100us], 100us */ 1155 p[12] = 0; 1156 p[13] = 1; 1157 /* Device step pulse width [us], 1us */ 1158 p[14] = 1; 1159 /* Device head settle delay [100us], 100us */ 1160 p[15] = 0; 1161 p[16] = 1; 1162 /* Motor on delay [0.1s], 0.1s */ 1163 p[17] = 1; 1164 /* Motor off delay [0.1s], 0.1s */ 1165 p[18] = 1; 1166 /* Medium rotation rate [rpm], 5400 rpm */ 1167 p[26] = (5400 >> 8) & 0xff; 1168 p[27] = 5400 & 0xff; 1169 break; 1170 1171 case MODE_PAGE_CACHING: 1172 length = 0x12; 1173 if (page_control == 1 || /* Changeable Values */ 1174 blk_enable_write_cache(s->qdev.conf.blk)) { 1175 p[0] = 4; /* WCE */ 1176 } 1177 break; 1178 1179 case MODE_PAGE_R_W_ERROR: 1180 length = 10; 1181 if (page_control == 1) { /* Changeable Values */ 1182 break; 1183 } 1184 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1185 if (s->qdev.type == TYPE_ROM) { 1186 p[1] = 0x20; /* Read Retry Count */ 1187 } 1188 break; 1189 1190 case MODE_PAGE_AUDIO_CTL: 1191 length = 14; 1192 break; 1193 1194 case MODE_PAGE_CAPABILITIES: 1195 length = 0x14; 1196 if (page_control == 1) { /* Changeable Values */ 1197 break; 1198 } 1199 1200 p[0] = 0x3b; /* CD-R & CD-RW read */ 1201 p[1] = 0; /* Writing not supported */ 1202 p[2] = 0x7f; /* Audio, composite, digital out, 1203 mode 2 form 1&2, multi session */ 1204 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1205 RW corrected, C2 errors, ISRC, 1206 UPC, Bar code */ 1207 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1208 /* Locking supported, jumper present, eject, tray */ 1209 p[5] = 0; /* no volume & mute control, no 1210 changer */ 1211 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1212 p[7] = (50 * 176) & 0xff; 1213 p[8] = 2 >> 8; /* Two volume levels */ 1214 p[9] = 2 & 0xff; 1215 p[10] = 2048 >> 8; /* 2M buffer */ 1216 p[11] = 2048 & 0xff; 1217 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1218 p[13] = (16 * 176) & 0xff; 1219 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1220 p[17] = (16 * 176) & 0xff; 1221 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1222 p[19] = (16 * 176) & 0xff; 1223 break; 1224 1225 default: 1226 return -1; 1227 } 1228 1229 assert(length < 256); 1230 (*p_outbuf)[0] = page; 1231 (*p_outbuf)[1] = length; 1232 *p_outbuf += length + 2; 1233 return length + 2; 1234 } 1235 1236 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1237 { 1238 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1239 uint64_t nb_sectors; 1240 bool dbd; 1241 int page, buflen, ret, page_control; 1242 uint8_t *p; 1243 uint8_t dev_specific_param; 1244 1245 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1246 page = r->req.cmd.buf[2] & 0x3f; 1247 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1248 1249 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 1250 10, page, r->req.cmd.xfer, page_control); 1251 memset(outbuf, 0, r->req.cmd.xfer); 1252 p = outbuf; 1253 1254 if (s->qdev.type == TYPE_DISK) { 1255 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1256 if (blk_is_read_only(s->qdev.conf.blk)) { 1257 dev_specific_param |= 0x80; /* Readonly. */ 1258 } 1259 } else { 1260 /* MMC prescribes that CD/DVD drives have no block descriptors, 1261 * and defines no device-specific parameter. */ 1262 dev_specific_param = 0x00; 1263 dbd = true; 1264 } 1265 1266 if (r->req.cmd.buf[0] == MODE_SENSE) { 1267 p[1] = 0; /* Default media type. */ 1268 p[2] = dev_specific_param; 1269 p[3] = 0; /* Block descriptor length. */ 1270 p += 4; 1271 } else { /* MODE_SENSE_10 */ 1272 p[2] = 0; /* Default media type. */ 1273 p[3] = dev_specific_param; 1274 p[6] = p[7] = 0; /* Block descriptor length. */ 1275 p += 8; 1276 } 1277 1278 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1279 if (!dbd && nb_sectors) { 1280 if (r->req.cmd.buf[0] == MODE_SENSE) { 1281 outbuf[3] = 8; /* Block descriptor length */ 1282 } else { /* MODE_SENSE_10 */ 1283 outbuf[7] = 8; /* Block descriptor length */ 1284 } 1285 nb_sectors /= (s->qdev.blocksize / 512); 1286 if (nb_sectors > 0xffffff) { 1287 nb_sectors = 0; 1288 } 1289 p[0] = 0; /* media density code */ 1290 p[1] = (nb_sectors >> 16) & 0xff; 1291 p[2] = (nb_sectors >> 8) & 0xff; 1292 p[3] = nb_sectors & 0xff; 1293 p[4] = 0; /* reserved */ 1294 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1295 p[6] = s->qdev.blocksize >> 8; 1296 p[7] = 0; 1297 p += 8; 1298 } 1299 1300 if (page_control == 3) { 1301 /* Saved Values */ 1302 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1303 return -1; 1304 } 1305 1306 if (page == 0x3f) { 1307 for (page = 0; page <= 0x3e; page++) { 1308 mode_sense_page(s, page, &p, page_control); 1309 } 1310 } else { 1311 ret = mode_sense_page(s, page, &p, page_control); 1312 if (ret == -1) { 1313 return -1; 1314 } 1315 } 1316 1317 buflen = p - outbuf; 1318 /* 1319 * The mode data length field specifies the length in bytes of the 1320 * following data that is available to be transferred. The mode data 1321 * length does not include itself. 1322 */ 1323 if (r->req.cmd.buf[0] == MODE_SENSE) { 1324 outbuf[0] = buflen - 1; 1325 } else { /* MODE_SENSE_10 */ 1326 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1327 outbuf[1] = (buflen - 2) & 0xff; 1328 } 1329 return buflen; 1330 } 1331 1332 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1333 { 1334 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1335 int start_track, format, msf, toclen; 1336 uint64_t nb_sectors; 1337 1338 msf = req->cmd.buf[1] & 2; 1339 format = req->cmd.buf[2] & 0xf; 1340 start_track = req->cmd.buf[6]; 1341 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1342 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); 1343 nb_sectors /= s->qdev.blocksize / 512; 1344 switch (format) { 1345 case 0: 1346 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1347 break; 1348 case 1: 1349 /* multi session : only a single session defined */ 1350 toclen = 12; 1351 memset(outbuf, 0, 12); 1352 outbuf[1] = 0x0a; 1353 outbuf[2] = 0x01; 1354 outbuf[3] = 0x01; 1355 break; 1356 case 2: 1357 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1358 break; 1359 default: 1360 return -1; 1361 } 1362 return toclen; 1363 } 1364 1365 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1366 { 1367 SCSIRequest *req = &r->req; 1368 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1369 bool start = req->cmd.buf[4] & 1; 1370 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1371 int pwrcnd = req->cmd.buf[4] & 0xf0; 1372 1373 if (pwrcnd) { 1374 /* eject/load only happens for power condition == 0 */ 1375 return 0; 1376 } 1377 1378 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1379 if (!start && !s->tray_open && s->tray_locked) { 1380 scsi_check_condition(r, 1381 blk_is_inserted(s->qdev.conf.blk) 1382 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1383 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1384 return -1; 1385 } 1386 1387 if (s->tray_open != !start) { 1388 blk_eject(s->qdev.conf.blk, !start); 1389 s->tray_open = !start; 1390 } 1391 } 1392 return 0; 1393 } 1394 1395 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1396 { 1397 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1398 int buflen = r->iov.iov_len; 1399 1400 if (buflen) { 1401 trace_scsi_disk_emulate_read_data(buflen); 1402 r->iov.iov_len = 0; 1403 r->started = true; 1404 scsi_req_data(&r->req, buflen); 1405 return; 1406 } 1407 1408 /* This also clears the sense buffer for REQUEST SENSE. */ 1409 scsi_req_complete(&r->req, GOOD); 1410 } 1411 1412 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1413 uint8_t *inbuf, int inlen) 1414 { 1415 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1416 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1417 uint8_t *p; 1418 int len, expected_len, changeable_len, i; 1419 1420 /* The input buffer does not include the page header, so it is 1421 * off by 2 bytes. 1422 */ 1423 expected_len = inlen + 2; 1424 if (expected_len > SCSI_MAX_MODE_LEN) { 1425 return -1; 1426 } 1427 1428 p = mode_current; 1429 memset(mode_current, 0, inlen + 2); 1430 len = mode_sense_page(s, page, &p, 0); 1431 if (len < 0 || len != expected_len) { 1432 return -1; 1433 } 1434 1435 p = mode_changeable; 1436 memset(mode_changeable, 0, inlen + 2); 1437 changeable_len = mode_sense_page(s, page, &p, 1); 1438 assert(changeable_len == len); 1439 1440 /* Check that unchangeable bits are the same as what MODE SENSE 1441 * would return. 1442 */ 1443 for (i = 2; i < len; i++) { 1444 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1445 return -1; 1446 } 1447 } 1448 return 0; 1449 } 1450 1451 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1452 { 1453 switch (page) { 1454 case MODE_PAGE_CACHING: 1455 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1456 break; 1457 1458 default: 1459 break; 1460 } 1461 } 1462 1463 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1464 { 1465 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1466 1467 while (len > 0) { 1468 int page, subpage, page_len; 1469 1470 /* Parse both possible formats for the mode page headers. */ 1471 page = p[0] & 0x3f; 1472 if (p[0] & 0x40) { 1473 if (len < 4) { 1474 goto invalid_param_len; 1475 } 1476 subpage = p[1]; 1477 page_len = lduw_be_p(&p[2]); 1478 p += 4; 1479 len -= 4; 1480 } else { 1481 if (len < 2) { 1482 goto invalid_param_len; 1483 } 1484 subpage = 0; 1485 page_len = p[1]; 1486 p += 2; 1487 len -= 2; 1488 } 1489 1490 if (subpage) { 1491 goto invalid_param; 1492 } 1493 if (page_len > len) { 1494 goto invalid_param_len; 1495 } 1496 1497 if (!change) { 1498 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1499 goto invalid_param; 1500 } 1501 } else { 1502 scsi_disk_apply_mode_select(s, page, p); 1503 } 1504 1505 p += page_len; 1506 len -= page_len; 1507 } 1508 return 0; 1509 1510 invalid_param: 1511 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1512 return -1; 1513 1514 invalid_param_len: 1515 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1516 return -1; 1517 } 1518 1519 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1520 { 1521 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1522 uint8_t *p = inbuf; 1523 int cmd = r->req.cmd.buf[0]; 1524 int len = r->req.cmd.xfer; 1525 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1526 int bd_len; 1527 int pass; 1528 1529 /* We only support PF=1, SP=0. */ 1530 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1531 goto invalid_field; 1532 } 1533 1534 if (len < hdr_len) { 1535 goto invalid_param_len; 1536 } 1537 1538 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1539 len -= hdr_len; 1540 p += hdr_len; 1541 if (len < bd_len) { 1542 goto invalid_param_len; 1543 } 1544 if (bd_len != 0 && bd_len != 8) { 1545 goto invalid_param; 1546 } 1547 1548 len -= bd_len; 1549 p += bd_len; 1550 1551 /* Ensure no change is made if there is an error! */ 1552 for (pass = 0; pass < 2; pass++) { 1553 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1554 assert(pass == 0); 1555 return; 1556 } 1557 } 1558 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1559 /* The request is used as the AIO opaque value, so add a ref. */ 1560 scsi_req_ref(&r->req); 1561 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1562 BLOCK_ACCT_FLUSH); 1563 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1564 return; 1565 } 1566 1567 scsi_req_complete(&r->req, GOOD); 1568 return; 1569 1570 invalid_param: 1571 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1572 return; 1573 1574 invalid_param_len: 1575 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1576 return; 1577 1578 invalid_field: 1579 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1580 } 1581 1582 static inline bool check_lba_range(SCSIDiskState *s, 1583 uint64_t sector_num, uint32_t nb_sectors) 1584 { 1585 /* 1586 * The first line tests that no overflow happens when computing the last 1587 * sector. The second line tests that the last accessed sector is in 1588 * range. 1589 * 1590 * Careful, the computations should not underflow for nb_sectors == 0, 1591 * and a 0-block read to the first LBA beyond the end of device is 1592 * valid. 1593 */ 1594 return (sector_num <= sector_num + nb_sectors && 1595 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1596 } 1597 1598 typedef struct UnmapCBData { 1599 SCSIDiskReq *r; 1600 uint8_t *inbuf; 1601 int count; 1602 } UnmapCBData; 1603 1604 static void scsi_unmap_complete(void *opaque, int ret); 1605 1606 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1607 { 1608 SCSIDiskReq *r = data->r; 1609 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1610 uint64_t sector_num; 1611 uint32_t nb_sectors; 1612 1613 assert(r->req.aiocb == NULL); 1614 if (scsi_disk_req_check_error(r, ret, false)) { 1615 goto done; 1616 } 1617 1618 if (data->count > 0) { 1619 sector_num = ldq_be_p(&data->inbuf[0]); 1620 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1621 if (!check_lba_range(s, sector_num, nb_sectors)) { 1622 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1623 goto done; 1624 } 1625 1626 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1627 sector_num * s->qdev.blocksize, 1628 nb_sectors * s->qdev.blocksize, 1629 scsi_unmap_complete, data); 1630 data->count--; 1631 data->inbuf += 16; 1632 return; 1633 } 1634 1635 scsi_req_complete(&r->req, GOOD); 1636 1637 done: 1638 scsi_req_unref(&r->req); 1639 g_free(data); 1640 } 1641 1642 static void scsi_unmap_complete(void *opaque, int ret) 1643 { 1644 UnmapCBData *data = opaque; 1645 SCSIDiskReq *r = data->r; 1646 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1647 1648 assert(r->req.aiocb != NULL); 1649 r->req.aiocb = NULL; 1650 1651 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1652 scsi_unmap_complete_noio(data, ret); 1653 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1654 } 1655 1656 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1657 { 1658 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1659 uint8_t *p = inbuf; 1660 int len = r->req.cmd.xfer; 1661 UnmapCBData *data; 1662 1663 /* Reject ANCHOR=1. */ 1664 if (r->req.cmd.buf[1] & 0x1) { 1665 goto invalid_field; 1666 } 1667 1668 if (len < 8) { 1669 goto invalid_param_len; 1670 } 1671 if (len < lduw_be_p(&p[0]) + 2) { 1672 goto invalid_param_len; 1673 } 1674 if (len < lduw_be_p(&p[2]) + 8) { 1675 goto invalid_param_len; 1676 } 1677 if (lduw_be_p(&p[2]) & 15) { 1678 goto invalid_param_len; 1679 } 1680 1681 if (blk_is_read_only(s->qdev.conf.blk)) { 1682 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1683 return; 1684 } 1685 1686 data = g_new0(UnmapCBData, 1); 1687 data->r = r; 1688 data->inbuf = &p[8]; 1689 data->count = lduw_be_p(&p[2]) >> 4; 1690 1691 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1692 scsi_req_ref(&r->req); 1693 scsi_unmap_complete_noio(data, 0); 1694 return; 1695 1696 invalid_param_len: 1697 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1698 return; 1699 1700 invalid_field: 1701 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1702 } 1703 1704 typedef struct WriteSameCBData { 1705 SCSIDiskReq *r; 1706 int64_t sector; 1707 int nb_sectors; 1708 QEMUIOVector qiov; 1709 struct iovec iov; 1710 } WriteSameCBData; 1711 1712 static void scsi_write_same_complete(void *opaque, int ret) 1713 { 1714 WriteSameCBData *data = opaque; 1715 SCSIDiskReq *r = data->r; 1716 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1717 1718 assert(r->req.aiocb != NULL); 1719 r->req.aiocb = NULL; 1720 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1721 if (scsi_disk_req_check_error(r, ret, true)) { 1722 goto done; 1723 } 1724 1725 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1726 1727 data->nb_sectors -= data->iov.iov_len / 512; 1728 data->sector += data->iov.iov_len / 512; 1729 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1730 if (data->iov.iov_len) { 1731 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1732 data->iov.iov_len, BLOCK_ACCT_WRITE); 1733 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1734 * where final qiov may need smaller size */ 1735 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1736 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1737 data->sector << BDRV_SECTOR_BITS, 1738 &data->qiov, 0, 1739 scsi_write_same_complete, data); 1740 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1741 return; 1742 } 1743 1744 scsi_req_complete(&r->req, GOOD); 1745 1746 done: 1747 scsi_req_unref(&r->req); 1748 qemu_vfree(data->iov.iov_base); 1749 g_free(data); 1750 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1751 } 1752 1753 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1754 { 1755 SCSIRequest *req = &r->req; 1756 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1757 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1758 WriteSameCBData *data; 1759 uint8_t *buf; 1760 int i; 1761 1762 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1763 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1764 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1765 return; 1766 } 1767 1768 if (blk_is_read_only(s->qdev.conf.blk)) { 1769 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1770 return; 1771 } 1772 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1773 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1774 return; 1775 } 1776 1777 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1778 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1779 1780 /* The request is used as the AIO opaque value, so add a ref. */ 1781 scsi_req_ref(&r->req); 1782 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1783 nb_sectors * s->qdev.blocksize, 1784 BLOCK_ACCT_WRITE); 1785 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1786 r->req.cmd.lba * s->qdev.blocksize, 1787 nb_sectors * s->qdev.blocksize, 1788 flags, scsi_aio_complete, r); 1789 return; 1790 } 1791 1792 data = g_new0(WriteSameCBData, 1); 1793 data->r = r; 1794 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1795 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1796 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1797 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1798 data->iov.iov_len); 1799 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1800 1801 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1802 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1803 } 1804 1805 scsi_req_ref(&r->req); 1806 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1807 data->iov.iov_len, BLOCK_ACCT_WRITE); 1808 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1809 data->sector << BDRV_SECTOR_BITS, 1810 &data->qiov, 0, 1811 scsi_write_same_complete, data); 1812 } 1813 1814 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1815 { 1816 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1817 1818 if (r->iov.iov_len) { 1819 int buflen = r->iov.iov_len; 1820 trace_scsi_disk_emulate_write_data(buflen); 1821 r->iov.iov_len = 0; 1822 scsi_req_data(&r->req, buflen); 1823 return; 1824 } 1825 1826 switch (req->cmd.buf[0]) { 1827 case MODE_SELECT: 1828 case MODE_SELECT_10: 1829 /* This also clears the sense buffer for REQUEST SENSE. */ 1830 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1831 break; 1832 1833 case UNMAP: 1834 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1835 break; 1836 1837 case VERIFY_10: 1838 case VERIFY_12: 1839 case VERIFY_16: 1840 if (r->req.status == -1) { 1841 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1842 } 1843 break; 1844 1845 case WRITE_SAME_10: 1846 case WRITE_SAME_16: 1847 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1848 break; 1849 1850 default: 1851 abort(); 1852 } 1853 } 1854 1855 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1856 { 1857 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1858 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1859 uint64_t nb_sectors; 1860 uint8_t *outbuf; 1861 int buflen; 1862 1863 switch (req->cmd.buf[0]) { 1864 case INQUIRY: 1865 case MODE_SENSE: 1866 case MODE_SENSE_10: 1867 case RESERVE: 1868 case RESERVE_10: 1869 case RELEASE: 1870 case RELEASE_10: 1871 case START_STOP: 1872 case ALLOW_MEDIUM_REMOVAL: 1873 case GET_CONFIGURATION: 1874 case GET_EVENT_STATUS_NOTIFICATION: 1875 case MECHANISM_STATUS: 1876 case REQUEST_SENSE: 1877 break; 1878 1879 default: 1880 if (!blk_is_available(s->qdev.conf.blk)) { 1881 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1882 return 0; 1883 } 1884 break; 1885 } 1886 1887 /* 1888 * FIXME: we shouldn't return anything bigger than 4k, but the code 1889 * requires the buffer to be as big as req->cmd.xfer in several 1890 * places. So, do not allow CDBs with a very large ALLOCATION 1891 * LENGTH. The real fix would be to modify scsi_read_data and 1892 * dma_buf_read, so that they return data beyond the buflen 1893 * as all zeros. 1894 */ 1895 if (req->cmd.xfer > 65536) { 1896 goto illegal_request; 1897 } 1898 r->buflen = MAX(4096, req->cmd.xfer); 1899 1900 if (!r->iov.iov_base) { 1901 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1902 } 1903 1904 buflen = req->cmd.xfer; 1905 outbuf = r->iov.iov_base; 1906 memset(outbuf, 0, r->buflen); 1907 switch (req->cmd.buf[0]) { 1908 case TEST_UNIT_READY: 1909 assert(blk_is_available(s->qdev.conf.blk)); 1910 break; 1911 case INQUIRY: 1912 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1913 if (buflen < 0) { 1914 goto illegal_request; 1915 } 1916 break; 1917 case MODE_SENSE: 1918 case MODE_SENSE_10: 1919 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1920 if (buflen < 0) { 1921 goto illegal_request; 1922 } 1923 break; 1924 case READ_TOC: 1925 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1926 if (buflen < 0) { 1927 goto illegal_request; 1928 } 1929 break; 1930 case RESERVE: 1931 if (req->cmd.buf[1] & 1) { 1932 goto illegal_request; 1933 } 1934 break; 1935 case RESERVE_10: 1936 if (req->cmd.buf[1] & 3) { 1937 goto illegal_request; 1938 } 1939 break; 1940 case RELEASE: 1941 if (req->cmd.buf[1] & 1) { 1942 goto illegal_request; 1943 } 1944 break; 1945 case RELEASE_10: 1946 if (req->cmd.buf[1] & 3) { 1947 goto illegal_request; 1948 } 1949 break; 1950 case START_STOP: 1951 if (scsi_disk_emulate_start_stop(r) < 0) { 1952 return 0; 1953 } 1954 break; 1955 case ALLOW_MEDIUM_REMOVAL: 1956 s->tray_locked = req->cmd.buf[4] & 1; 1957 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1958 break; 1959 case READ_CAPACITY_10: 1960 /* The normal LEN field for this command is zero. */ 1961 memset(outbuf, 0, 8); 1962 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1963 if (!nb_sectors) { 1964 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1965 return 0; 1966 } 1967 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1968 goto illegal_request; 1969 } 1970 nb_sectors /= s->qdev.blocksize / 512; 1971 /* Returned value is the address of the last sector. */ 1972 nb_sectors--; 1973 /* Remember the new size for read/write sanity checking. */ 1974 s->qdev.max_lba = nb_sectors; 1975 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 1976 if (nb_sectors > UINT32_MAX) { 1977 nb_sectors = UINT32_MAX; 1978 } 1979 outbuf[0] = (nb_sectors >> 24) & 0xff; 1980 outbuf[1] = (nb_sectors >> 16) & 0xff; 1981 outbuf[2] = (nb_sectors >> 8) & 0xff; 1982 outbuf[3] = nb_sectors & 0xff; 1983 outbuf[4] = 0; 1984 outbuf[5] = 0; 1985 outbuf[6] = s->qdev.blocksize >> 8; 1986 outbuf[7] = 0; 1987 break; 1988 case REQUEST_SENSE: 1989 /* Just return "NO SENSE". */ 1990 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 1991 (req->cmd.buf[1] & 1) == 0); 1992 if (buflen < 0) { 1993 goto illegal_request; 1994 } 1995 break; 1996 case MECHANISM_STATUS: 1997 buflen = scsi_emulate_mechanism_status(s, outbuf); 1998 if (buflen < 0) { 1999 goto illegal_request; 2000 } 2001 break; 2002 case GET_CONFIGURATION: 2003 buflen = scsi_get_configuration(s, outbuf); 2004 if (buflen < 0) { 2005 goto illegal_request; 2006 } 2007 break; 2008 case GET_EVENT_STATUS_NOTIFICATION: 2009 buflen = scsi_get_event_status_notification(s, r, outbuf); 2010 if (buflen < 0) { 2011 goto illegal_request; 2012 } 2013 break; 2014 case READ_DISC_INFORMATION: 2015 buflen = scsi_read_disc_information(s, r, outbuf); 2016 if (buflen < 0) { 2017 goto illegal_request; 2018 } 2019 break; 2020 case READ_DVD_STRUCTURE: 2021 buflen = scsi_read_dvd_structure(s, r, outbuf); 2022 if (buflen < 0) { 2023 goto illegal_request; 2024 } 2025 break; 2026 case SERVICE_ACTION_IN_16: 2027 /* Service Action In subcommands. */ 2028 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2029 trace_scsi_disk_emulate_command_SAI_16(); 2030 memset(outbuf, 0, req->cmd.xfer); 2031 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2032 if (!nb_sectors) { 2033 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2034 return 0; 2035 } 2036 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2037 goto illegal_request; 2038 } 2039 nb_sectors /= s->qdev.blocksize / 512; 2040 /* Returned value is the address of the last sector. */ 2041 nb_sectors--; 2042 /* Remember the new size for read/write sanity checking. */ 2043 s->qdev.max_lba = nb_sectors; 2044 outbuf[0] = (nb_sectors >> 56) & 0xff; 2045 outbuf[1] = (nb_sectors >> 48) & 0xff; 2046 outbuf[2] = (nb_sectors >> 40) & 0xff; 2047 outbuf[3] = (nb_sectors >> 32) & 0xff; 2048 outbuf[4] = (nb_sectors >> 24) & 0xff; 2049 outbuf[5] = (nb_sectors >> 16) & 0xff; 2050 outbuf[6] = (nb_sectors >> 8) & 0xff; 2051 outbuf[7] = nb_sectors & 0xff; 2052 outbuf[8] = 0; 2053 outbuf[9] = 0; 2054 outbuf[10] = s->qdev.blocksize >> 8; 2055 outbuf[11] = 0; 2056 outbuf[12] = 0; 2057 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2058 2059 /* set TPE bit if the format supports discard */ 2060 if (s->qdev.conf.discard_granularity) { 2061 outbuf[14] = 0x80; 2062 } 2063 2064 /* Protection, exponent and lowest lba field left blank. */ 2065 break; 2066 } 2067 trace_scsi_disk_emulate_command_SAI_unsupported(); 2068 goto illegal_request; 2069 case SYNCHRONIZE_CACHE: 2070 /* The request is used as the AIO opaque value, so add a ref. */ 2071 scsi_req_ref(&r->req); 2072 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2073 BLOCK_ACCT_FLUSH); 2074 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2075 return 0; 2076 case SEEK_10: 2077 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); 2078 if (r->req.cmd.lba > s->qdev.max_lba) { 2079 goto illegal_lba; 2080 } 2081 break; 2082 case MODE_SELECT: 2083 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); 2084 break; 2085 case MODE_SELECT_10: 2086 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 2087 break; 2088 case UNMAP: 2089 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); 2090 break; 2091 case VERIFY_10: 2092 case VERIFY_12: 2093 case VERIFY_16: 2094 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 2095 if (req->cmd.buf[1] & 6) { 2096 goto illegal_request; 2097 } 2098 break; 2099 case WRITE_SAME_10: 2100 case WRITE_SAME_16: 2101 trace_scsi_disk_emulate_command_WRITE_SAME( 2102 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); 2103 break; 2104 default: 2105 trace_scsi_disk_emulate_command_UNKNOWN(buf[0], 2106 scsi_command_name(buf[0])); 2107 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2108 return 0; 2109 } 2110 assert(!r->req.aiocb); 2111 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2112 if (r->iov.iov_len == 0) { 2113 scsi_req_complete(&r->req, GOOD); 2114 } 2115 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2116 assert(r->iov.iov_len == req->cmd.xfer); 2117 return -r->iov.iov_len; 2118 } else { 2119 return r->iov.iov_len; 2120 } 2121 2122 illegal_request: 2123 if (r->req.status == -1) { 2124 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2125 } 2126 return 0; 2127 2128 illegal_lba: 2129 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2130 return 0; 2131 } 2132 2133 /* Execute a scsi command. Returns the length of the data expected by the 2134 command. This will be Positive for data transfers from the device 2135 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2136 and zero if the command does not transfer any data. */ 2137 2138 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2139 { 2140 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2141 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2142 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2143 uint32_t len; 2144 uint8_t command; 2145 2146 command = buf[0]; 2147 2148 if (!blk_is_available(s->qdev.conf.blk)) { 2149 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2150 return 0; 2151 } 2152 2153 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2154 switch (command) { 2155 case READ_6: 2156 case READ_10: 2157 case READ_12: 2158 case READ_16: 2159 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); 2160 /* Protection information is not supported. For SCSI versions 2 and 2161 * older (as determined by snooping the guest's INQUIRY commands), 2162 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2163 */ 2164 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2165 goto illegal_request; 2166 } 2167 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2168 goto illegal_lba; 2169 } 2170 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2171 r->sector_count = len * (s->qdev.blocksize / 512); 2172 break; 2173 case WRITE_6: 2174 case WRITE_10: 2175 case WRITE_12: 2176 case WRITE_16: 2177 case WRITE_VERIFY_10: 2178 case WRITE_VERIFY_12: 2179 case WRITE_VERIFY_16: 2180 if (blk_is_read_only(s->qdev.conf.blk)) { 2181 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2182 return 0; 2183 } 2184 trace_scsi_disk_dma_command_WRITE( 2185 (command & 0xe) == 0xe ? "And Verify " : "", 2186 r->req.cmd.lba, len); 2187 /* fall through */ 2188 case VERIFY_10: 2189 case VERIFY_12: 2190 case VERIFY_16: 2191 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2192 * As far as DMA is concerned, we can treat it the same as a write; 2193 * scsi_block_do_sgio will send VERIFY commands. 2194 */ 2195 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2196 goto illegal_request; 2197 } 2198 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2199 goto illegal_lba; 2200 } 2201 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2202 r->sector_count = len * (s->qdev.blocksize / 512); 2203 break; 2204 default: 2205 abort(); 2206 illegal_request: 2207 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2208 return 0; 2209 illegal_lba: 2210 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2211 return 0; 2212 } 2213 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2214 if (r->sector_count == 0) { 2215 scsi_req_complete(&r->req, GOOD); 2216 } 2217 assert(r->iov.iov_len == 0); 2218 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2219 return -r->sector_count * 512; 2220 } else { 2221 return r->sector_count * 512; 2222 } 2223 } 2224 2225 static void scsi_disk_reset(DeviceState *dev) 2226 { 2227 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2228 uint64_t nb_sectors; 2229 2230 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2231 2232 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2233 nb_sectors /= s->qdev.blocksize / 512; 2234 if (nb_sectors) { 2235 nb_sectors--; 2236 } 2237 s->qdev.max_lba = nb_sectors; 2238 /* reset tray statuses */ 2239 s->tray_locked = 0; 2240 s->tray_open = 0; 2241 2242 s->qdev.scsi_version = s->qdev.default_scsi_version; 2243 } 2244 2245 static void scsi_disk_resize_cb(void *opaque) 2246 { 2247 SCSIDiskState *s = opaque; 2248 2249 /* SPC lists this sense code as available only for 2250 * direct-access devices. 2251 */ 2252 if (s->qdev.type == TYPE_DISK) { 2253 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2254 } 2255 } 2256 2257 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2258 { 2259 SCSIDiskState *s = opaque; 2260 2261 /* 2262 * When a CD gets changed, we have to report an ejected state and 2263 * then a loaded state to guests so that they detect tray 2264 * open/close and media change events. Guests that do not use 2265 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2266 * states rely on this behavior. 2267 * 2268 * media_changed governs the state machine used for unit attention 2269 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2270 */ 2271 s->media_changed = load; 2272 s->tray_open = !load; 2273 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2274 s->media_event = true; 2275 s->eject_request = false; 2276 } 2277 2278 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2279 { 2280 SCSIDiskState *s = opaque; 2281 2282 s->eject_request = true; 2283 if (force) { 2284 s->tray_locked = false; 2285 } 2286 } 2287 2288 static bool scsi_cd_is_tray_open(void *opaque) 2289 { 2290 return ((SCSIDiskState *)opaque)->tray_open; 2291 } 2292 2293 static bool scsi_cd_is_medium_locked(void *opaque) 2294 { 2295 return ((SCSIDiskState *)opaque)->tray_locked; 2296 } 2297 2298 static const BlockDevOps scsi_disk_removable_block_ops = { 2299 .change_media_cb = scsi_cd_change_media_cb, 2300 .eject_request_cb = scsi_cd_eject_request_cb, 2301 .is_tray_open = scsi_cd_is_tray_open, 2302 .is_medium_locked = scsi_cd_is_medium_locked, 2303 2304 .resize_cb = scsi_disk_resize_cb, 2305 }; 2306 2307 static const BlockDevOps scsi_disk_block_ops = { 2308 .resize_cb = scsi_disk_resize_cb, 2309 }; 2310 2311 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2312 { 2313 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2314 if (s->media_changed) { 2315 s->media_changed = false; 2316 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2317 } 2318 } 2319 2320 static void scsi_realize(SCSIDevice *dev, Error **errp) 2321 { 2322 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2323 bool read_only; 2324 2325 if (!s->qdev.conf.blk) { 2326 error_setg(errp, "drive property not set"); 2327 return; 2328 } 2329 2330 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2331 !blk_is_inserted(s->qdev.conf.blk)) { 2332 error_setg(errp, "Device needs media, but drive is empty"); 2333 return; 2334 } 2335 2336 blkconf_blocksizes(&s->qdev.conf); 2337 2338 if (s->qdev.conf.logical_block_size > 2339 s->qdev.conf.physical_block_size) { 2340 error_setg(errp, 2341 "logical_block_size > physical_block_size not supported"); 2342 return; 2343 } 2344 2345 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && 2346 !s->qdev.hba_supports_iothread) 2347 { 2348 error_setg(errp, "HBA does not support iothreads"); 2349 return; 2350 } 2351 2352 if (dev->type == TYPE_DISK) { 2353 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2354 return; 2355 } 2356 } 2357 2358 read_only = blk_is_read_only(s->qdev.conf.blk); 2359 if (dev->type == TYPE_ROM) { 2360 read_only = true; 2361 } 2362 2363 if (!blkconf_apply_backend_options(&dev->conf, read_only, 2364 dev->type == TYPE_DISK, errp)) { 2365 return; 2366 } 2367 2368 if (s->qdev.conf.discard_granularity == -1) { 2369 s->qdev.conf.discard_granularity = 2370 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2371 } 2372 2373 if (!s->version) { 2374 s->version = g_strdup(qemu_hw_version()); 2375 } 2376 if (!s->vendor) { 2377 s->vendor = g_strdup("QEMU"); 2378 } 2379 if (!s->device_id) { 2380 if (s->serial) { 2381 s->device_id = g_strdup_printf("%.20s", s->serial); 2382 } else { 2383 const char *str = blk_name(s->qdev.conf.blk); 2384 if (str && *str) { 2385 s->device_id = g_strdup(str); 2386 } 2387 } 2388 } 2389 2390 if (blk_is_sg(s->qdev.conf.blk)) { 2391 error_setg(errp, "unwanted /dev/sg*"); 2392 return; 2393 } 2394 2395 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2396 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2397 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2398 } else { 2399 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2400 } 2401 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2402 2403 blk_iostatus_enable(s->qdev.conf.blk); 2404 } 2405 2406 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2407 { 2408 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2409 AioContext *ctx = NULL; 2410 /* can happen for devices without drive. The error message for missing 2411 * backend will be issued in scsi_realize 2412 */ 2413 if (s->qdev.conf.blk) { 2414 ctx = blk_get_aio_context(s->qdev.conf.blk); 2415 aio_context_acquire(ctx); 2416 blkconf_blocksizes(&s->qdev.conf); 2417 } 2418 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2419 s->qdev.type = TYPE_DISK; 2420 if (!s->product) { 2421 s->product = g_strdup("QEMU HARDDISK"); 2422 } 2423 scsi_realize(&s->qdev, errp); 2424 if (ctx) { 2425 aio_context_release(ctx); 2426 } 2427 } 2428 2429 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2430 { 2431 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2432 AioContext *ctx; 2433 int ret; 2434 2435 if (!dev->conf.blk) { 2436 /* Anonymous BlockBackend for an empty drive. As we put it into 2437 * dev->conf, qdev takes care of detaching on unplug. */ 2438 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 2439 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2440 assert(ret == 0); 2441 } 2442 2443 ctx = blk_get_aio_context(dev->conf.blk); 2444 aio_context_acquire(ctx); 2445 s->qdev.blocksize = 2048; 2446 s->qdev.type = TYPE_ROM; 2447 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2448 if (!s->product) { 2449 s->product = g_strdup("QEMU CD-ROM"); 2450 } 2451 scsi_realize(&s->qdev, errp); 2452 aio_context_release(ctx); 2453 } 2454 2455 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2456 { 2457 DriveInfo *dinfo; 2458 Error *local_err = NULL; 2459 2460 if (!dev->conf.blk) { 2461 scsi_realize(dev, &local_err); 2462 assert(local_err); 2463 error_propagate(errp, local_err); 2464 return; 2465 } 2466 2467 dinfo = blk_legacy_dinfo(dev->conf.blk); 2468 if (dinfo && dinfo->media_cd) { 2469 scsi_cd_realize(dev, errp); 2470 } else { 2471 scsi_hd_realize(dev, errp); 2472 } 2473 } 2474 2475 static const SCSIReqOps scsi_disk_emulate_reqops = { 2476 .size = sizeof(SCSIDiskReq), 2477 .free_req = scsi_free_request, 2478 .send_command = scsi_disk_emulate_command, 2479 .read_data = scsi_disk_emulate_read_data, 2480 .write_data = scsi_disk_emulate_write_data, 2481 .get_buf = scsi_get_buf, 2482 }; 2483 2484 static const SCSIReqOps scsi_disk_dma_reqops = { 2485 .size = sizeof(SCSIDiskReq), 2486 .free_req = scsi_free_request, 2487 .send_command = scsi_disk_dma_command, 2488 .read_data = scsi_read_data, 2489 .write_data = scsi_write_data, 2490 .get_buf = scsi_get_buf, 2491 .load_request = scsi_disk_load_request, 2492 .save_request = scsi_disk_save_request, 2493 }; 2494 2495 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2496 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2497 [INQUIRY] = &scsi_disk_emulate_reqops, 2498 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2499 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2500 [START_STOP] = &scsi_disk_emulate_reqops, 2501 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2502 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2503 [READ_TOC] = &scsi_disk_emulate_reqops, 2504 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2505 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2506 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2507 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2508 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2509 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2510 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2511 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2512 [SEEK_10] = &scsi_disk_emulate_reqops, 2513 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2514 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2515 [UNMAP] = &scsi_disk_emulate_reqops, 2516 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2517 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2518 [VERIFY_10] = &scsi_disk_emulate_reqops, 2519 [VERIFY_12] = &scsi_disk_emulate_reqops, 2520 [VERIFY_16] = &scsi_disk_emulate_reqops, 2521 2522 [READ_6] = &scsi_disk_dma_reqops, 2523 [READ_10] = &scsi_disk_dma_reqops, 2524 [READ_12] = &scsi_disk_dma_reqops, 2525 [READ_16] = &scsi_disk_dma_reqops, 2526 [WRITE_6] = &scsi_disk_dma_reqops, 2527 [WRITE_10] = &scsi_disk_dma_reqops, 2528 [WRITE_12] = &scsi_disk_dma_reqops, 2529 [WRITE_16] = &scsi_disk_dma_reqops, 2530 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2531 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2532 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2533 }; 2534 2535 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) 2536 { 2537 int i; 2538 int len = scsi_cdb_length(buf); 2539 char *line_buffer, *p; 2540 2541 line_buffer = g_malloc(len * 5 + 1); 2542 2543 for (i = 0, p = line_buffer; i < len; i++) { 2544 p += sprintf(p, " 0x%02x", buf[i]); 2545 } 2546 trace_scsi_disk_new_request(lun, tag, line_buffer); 2547 2548 g_free(line_buffer); 2549 } 2550 2551 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2552 uint8_t *buf, void *hba_private) 2553 { 2554 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2555 SCSIRequest *req; 2556 const SCSIReqOps *ops; 2557 uint8_t command; 2558 2559 command = buf[0]; 2560 ops = scsi_disk_reqops_dispatch[command]; 2561 if (!ops) { 2562 ops = &scsi_disk_emulate_reqops; 2563 } 2564 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2565 2566 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { 2567 scsi_disk_new_request_dump(lun, tag, buf); 2568 } 2569 2570 return req; 2571 } 2572 2573 #ifdef __linux__ 2574 static int get_device_type(SCSIDiskState *s) 2575 { 2576 uint8_t cmd[16]; 2577 uint8_t buf[36]; 2578 int ret; 2579 2580 memset(cmd, 0, sizeof(cmd)); 2581 memset(buf, 0, sizeof(buf)); 2582 cmd[0] = INQUIRY; 2583 cmd[4] = sizeof(buf); 2584 2585 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2586 buf, sizeof(buf)); 2587 if (ret < 0) { 2588 return -1; 2589 } 2590 s->qdev.type = buf[0]; 2591 if (buf[1] & 0x80) { 2592 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2593 } 2594 return 0; 2595 } 2596 2597 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2598 { 2599 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2600 AioContext *ctx; 2601 int sg_version; 2602 int rc; 2603 2604 if (!s->qdev.conf.blk) { 2605 error_setg(errp, "drive property not set"); 2606 return; 2607 } 2608 2609 if (s->rotation_rate) { 2610 error_report_once("rotation_rate is specified for scsi-block but is " 2611 "not implemented. This option is deprecated and will " 2612 "be removed in a future version"); 2613 } 2614 2615 ctx = blk_get_aio_context(s->qdev.conf.blk); 2616 aio_context_acquire(ctx); 2617 2618 /* check we are using a driver managing SG_IO (version 3 and after) */ 2619 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2620 if (rc < 0) { 2621 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2622 if (rc != -EPERM) { 2623 error_append_hint(errp, "Is this a SCSI device?\n"); 2624 } 2625 goto out; 2626 } 2627 if (sg_version < 30000) { 2628 error_setg(errp, "scsi generic interface too old"); 2629 goto out; 2630 } 2631 2632 /* get device type from INQUIRY data */ 2633 rc = get_device_type(s); 2634 if (rc < 0) { 2635 error_setg(errp, "INQUIRY failed"); 2636 goto out; 2637 } 2638 2639 /* Make a guess for the block size, we'll fix it when the guest sends. 2640 * READ CAPACITY. If they don't, they likely would assume these sizes 2641 * anyway. (TODO: check in /sys). 2642 */ 2643 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2644 s->qdev.blocksize = 2048; 2645 } else { 2646 s->qdev.blocksize = 512; 2647 } 2648 2649 /* Makes the scsi-block device not removable by using HMP and QMP eject 2650 * command. 2651 */ 2652 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2653 2654 scsi_realize(&s->qdev, errp); 2655 scsi_generic_read_device_inquiry(&s->qdev); 2656 2657 out: 2658 aio_context_release(ctx); 2659 } 2660 2661 typedef struct SCSIBlockReq { 2662 SCSIDiskReq req; 2663 sg_io_hdr_t io_header; 2664 2665 /* Selected bytes of the original CDB, copied into our own CDB. */ 2666 uint8_t cmd, cdb1, group_number; 2667 2668 /* CDB passed to SG_IO. */ 2669 uint8_t cdb[16]; 2670 } SCSIBlockReq; 2671 2672 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2673 int64_t offset, QEMUIOVector *iov, 2674 int direction, 2675 BlockCompletionFunc *cb, void *opaque) 2676 { 2677 sg_io_hdr_t *io_header = &req->io_header; 2678 SCSIDiskReq *r = &req->req; 2679 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2680 int nb_logical_blocks; 2681 uint64_t lba; 2682 BlockAIOCB *aiocb; 2683 2684 /* This is not supported yet. It can only happen if the guest does 2685 * reads and writes that are not aligned to one logical sectors 2686 * _and_ cover multiple MemoryRegions. 2687 */ 2688 assert(offset % s->qdev.blocksize == 0); 2689 assert(iov->size % s->qdev.blocksize == 0); 2690 2691 io_header->interface_id = 'S'; 2692 2693 /* The data transfer comes from the QEMUIOVector. */ 2694 io_header->dxfer_direction = direction; 2695 io_header->dxfer_len = iov->size; 2696 io_header->dxferp = (void *)iov->iov; 2697 io_header->iovec_count = iov->niov; 2698 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2699 2700 /* Build a new CDB with the LBA and length patched in, in case 2701 * DMA helpers split the transfer in multiple segments. Do not 2702 * build a CDB smaller than what the guest wanted, and only build 2703 * a larger one if strictly necessary. 2704 */ 2705 io_header->cmdp = req->cdb; 2706 lba = offset / s->qdev.blocksize; 2707 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2708 2709 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2710 /* 6-byte CDB */ 2711 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2712 req->cdb[4] = nb_logical_blocks; 2713 req->cdb[5] = 0; 2714 io_header->cmd_len = 6; 2715 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2716 /* 10-byte CDB */ 2717 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2718 req->cdb[1] = req->cdb1; 2719 stl_be_p(&req->cdb[2], lba); 2720 req->cdb[6] = req->group_number; 2721 stw_be_p(&req->cdb[7], nb_logical_blocks); 2722 req->cdb[9] = 0; 2723 io_header->cmd_len = 10; 2724 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2725 /* 12-byte CDB */ 2726 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2727 req->cdb[1] = req->cdb1; 2728 stl_be_p(&req->cdb[2], lba); 2729 stl_be_p(&req->cdb[6], nb_logical_blocks); 2730 req->cdb[10] = req->group_number; 2731 req->cdb[11] = 0; 2732 io_header->cmd_len = 12; 2733 } else { 2734 /* 16-byte CDB */ 2735 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2736 req->cdb[1] = req->cdb1; 2737 stq_be_p(&req->cdb[2], lba); 2738 stl_be_p(&req->cdb[10], nb_logical_blocks); 2739 req->cdb[14] = req->group_number; 2740 req->cdb[15] = 0; 2741 io_header->cmd_len = 16; 2742 } 2743 2744 /* The rest is as in scsi-generic.c. */ 2745 io_header->mx_sb_len = sizeof(r->req.sense); 2746 io_header->sbp = r->req.sense; 2747 io_header->timeout = UINT_MAX; 2748 io_header->usr_ptr = r; 2749 io_header->flags |= SG_FLAG_DIRECT_IO; 2750 2751 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2752 assert(aiocb != NULL); 2753 return aiocb; 2754 } 2755 2756 static bool scsi_block_no_fua(SCSICommand *cmd) 2757 { 2758 return false; 2759 } 2760 2761 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2762 QEMUIOVector *iov, 2763 BlockCompletionFunc *cb, void *cb_opaque, 2764 void *opaque) 2765 { 2766 SCSIBlockReq *r = opaque; 2767 return scsi_block_do_sgio(r, offset, iov, 2768 SG_DXFER_FROM_DEV, cb, cb_opaque); 2769 } 2770 2771 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2772 QEMUIOVector *iov, 2773 BlockCompletionFunc *cb, void *cb_opaque, 2774 void *opaque) 2775 { 2776 SCSIBlockReq *r = opaque; 2777 return scsi_block_do_sgio(r, offset, iov, 2778 SG_DXFER_TO_DEV, cb, cb_opaque); 2779 } 2780 2781 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2782 { 2783 switch (buf[0]) { 2784 case VERIFY_10: 2785 case VERIFY_12: 2786 case VERIFY_16: 2787 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2788 * for the number of logical blocks specified in the length 2789 * field). For other modes, do not use scatter/gather operation. 2790 */ 2791 if ((buf[1] & 6) == 2) { 2792 return false; 2793 } 2794 break; 2795 2796 case READ_6: 2797 case READ_10: 2798 case READ_12: 2799 case READ_16: 2800 case WRITE_6: 2801 case WRITE_10: 2802 case WRITE_12: 2803 case WRITE_16: 2804 case WRITE_VERIFY_10: 2805 case WRITE_VERIFY_12: 2806 case WRITE_VERIFY_16: 2807 /* MMC writing cannot be done via DMA helpers, because it sometimes 2808 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2809 * We might use scsi_block_dma_reqops as long as no writing commands are 2810 * seen, but performance usually isn't paramount on optical media. So, 2811 * just make scsi-block operate the same as scsi-generic for them. 2812 */ 2813 if (s->qdev.type != TYPE_ROM) { 2814 return false; 2815 } 2816 break; 2817 2818 default: 2819 break; 2820 } 2821 2822 return true; 2823 } 2824 2825 2826 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2827 { 2828 SCSIBlockReq *r = (SCSIBlockReq *)req; 2829 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2830 2831 r->cmd = req->cmd.buf[0]; 2832 switch (r->cmd >> 5) { 2833 case 0: 2834 /* 6-byte CDB. */ 2835 r->cdb1 = r->group_number = 0; 2836 break; 2837 case 1: 2838 /* 10-byte CDB. */ 2839 r->cdb1 = req->cmd.buf[1]; 2840 r->group_number = req->cmd.buf[6]; 2841 break; 2842 case 4: 2843 /* 12-byte CDB. */ 2844 r->cdb1 = req->cmd.buf[1]; 2845 r->group_number = req->cmd.buf[10]; 2846 break; 2847 case 5: 2848 /* 16-byte CDB. */ 2849 r->cdb1 = req->cmd.buf[1]; 2850 r->group_number = req->cmd.buf[14]; 2851 break; 2852 default: 2853 abort(); 2854 } 2855 2856 /* Protection information is not supported. For SCSI versions 2 and 2857 * older (as determined by snooping the guest's INQUIRY commands), 2858 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2859 */ 2860 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2861 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2862 return 0; 2863 } 2864 2865 r->req.status = &r->io_header.status; 2866 return scsi_disk_dma_command(req, buf); 2867 } 2868 2869 static const SCSIReqOps scsi_block_dma_reqops = { 2870 .size = sizeof(SCSIBlockReq), 2871 .free_req = scsi_free_request, 2872 .send_command = scsi_block_dma_command, 2873 .read_data = scsi_read_data, 2874 .write_data = scsi_write_data, 2875 .get_buf = scsi_get_buf, 2876 .load_request = scsi_disk_load_request, 2877 .save_request = scsi_disk_save_request, 2878 }; 2879 2880 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2881 uint32_t lun, uint8_t *buf, 2882 void *hba_private) 2883 { 2884 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2885 2886 if (scsi_block_is_passthrough(s, buf)) { 2887 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2888 hba_private); 2889 } else { 2890 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2891 hba_private); 2892 } 2893 } 2894 2895 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2896 uint8_t *buf, void *hba_private) 2897 { 2898 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2899 2900 if (scsi_block_is_passthrough(s, buf)) { 2901 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2902 } else { 2903 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2904 } 2905 } 2906 2907 static void scsi_block_update_sense(SCSIRequest *req) 2908 { 2909 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2910 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); 2911 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); 2912 } 2913 #endif 2914 2915 static 2916 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2917 BlockCompletionFunc *cb, void *cb_opaque, 2918 void *opaque) 2919 { 2920 SCSIDiskReq *r = opaque; 2921 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2922 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2923 } 2924 2925 static 2926 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2927 BlockCompletionFunc *cb, void *cb_opaque, 2928 void *opaque) 2929 { 2930 SCSIDiskReq *r = opaque; 2931 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2932 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2933 } 2934 2935 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2936 { 2937 DeviceClass *dc = DEVICE_CLASS(klass); 2938 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2939 2940 dc->fw_name = "disk"; 2941 dc->reset = scsi_disk_reset; 2942 sdc->dma_readv = scsi_dma_readv; 2943 sdc->dma_writev = scsi_dma_writev; 2944 sdc->need_fua_emulation = scsi_is_cmd_fua; 2945 } 2946 2947 static const TypeInfo scsi_disk_base_info = { 2948 .name = TYPE_SCSI_DISK_BASE, 2949 .parent = TYPE_SCSI_DEVICE, 2950 .class_init = scsi_disk_base_class_initfn, 2951 .instance_size = sizeof(SCSIDiskState), 2952 .class_size = sizeof(SCSIDiskClass), 2953 .abstract = true, 2954 }; 2955 2956 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2957 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ 2958 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ 2959 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2960 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2961 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2962 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2963 DEFINE_PROP_STRING("product", SCSIDiskState, product), \ 2964 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) 2965 2966 2967 static Property scsi_hd_properties[] = { 2968 DEFINE_SCSI_DISK_PROPERTIES(), 2969 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2970 SCSI_DISK_F_REMOVABLE, false), 2971 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2972 SCSI_DISK_F_DPOFUA, false), 2973 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2974 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2975 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2976 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2977 DEFAULT_MAX_UNMAP_SIZE), 2978 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2979 DEFAULT_MAX_IO_SIZE), 2980 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2981 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2982 5), 2983 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2984 DEFINE_PROP_END_OF_LIST(), 2985 }; 2986 2987 static const VMStateDescription vmstate_scsi_disk_state = { 2988 .name = "scsi-disk", 2989 .version_id = 1, 2990 .minimum_version_id = 1, 2991 .fields = (VMStateField[]) { 2992 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2993 VMSTATE_BOOL(media_changed, SCSIDiskState), 2994 VMSTATE_BOOL(media_event, SCSIDiskState), 2995 VMSTATE_BOOL(eject_request, SCSIDiskState), 2996 VMSTATE_BOOL(tray_open, SCSIDiskState), 2997 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2998 VMSTATE_END_OF_LIST() 2999 } 3000 }; 3001 3002 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 3003 { 3004 DeviceClass *dc = DEVICE_CLASS(klass); 3005 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3006 3007 sc->realize = scsi_hd_realize; 3008 sc->alloc_req = scsi_new_request; 3009 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3010 dc->desc = "virtual SCSI disk"; 3011 dc->props = scsi_hd_properties; 3012 dc->vmsd = &vmstate_scsi_disk_state; 3013 } 3014 3015 static const TypeInfo scsi_hd_info = { 3016 .name = "scsi-hd", 3017 .parent = TYPE_SCSI_DISK_BASE, 3018 .class_init = scsi_hd_class_initfn, 3019 }; 3020 3021 static Property scsi_cd_properties[] = { 3022 DEFINE_SCSI_DISK_PROPERTIES(), 3023 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3024 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3025 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3026 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3027 DEFAULT_MAX_IO_SIZE), 3028 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3029 5), 3030 DEFINE_PROP_END_OF_LIST(), 3031 }; 3032 3033 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3034 { 3035 DeviceClass *dc = DEVICE_CLASS(klass); 3036 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3037 3038 sc->realize = scsi_cd_realize; 3039 sc->alloc_req = scsi_new_request; 3040 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3041 dc->desc = "virtual SCSI CD-ROM"; 3042 dc->props = scsi_cd_properties; 3043 dc->vmsd = &vmstate_scsi_disk_state; 3044 } 3045 3046 static const TypeInfo scsi_cd_info = { 3047 .name = "scsi-cd", 3048 .parent = TYPE_SCSI_DISK_BASE, 3049 .class_init = scsi_cd_class_initfn, 3050 }; 3051 3052 #ifdef __linux__ 3053 static Property scsi_block_properties[] = { 3054 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3055 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3056 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3057 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3058 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3059 DEFAULT_MAX_UNMAP_SIZE), 3060 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3061 DEFAULT_MAX_IO_SIZE), 3062 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3063 -1), 3064 DEFINE_PROP_END_OF_LIST(), 3065 }; 3066 3067 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3068 { 3069 DeviceClass *dc = DEVICE_CLASS(klass); 3070 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3071 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3072 3073 sc->realize = scsi_block_realize; 3074 sc->alloc_req = scsi_block_new_request; 3075 sc->parse_cdb = scsi_block_parse_cdb; 3076 sdc->dma_readv = scsi_block_dma_readv; 3077 sdc->dma_writev = scsi_block_dma_writev; 3078 sdc->update_sense = scsi_block_update_sense; 3079 sdc->need_fua_emulation = scsi_block_no_fua; 3080 dc->desc = "SCSI block device passthrough"; 3081 dc->props = scsi_block_properties; 3082 dc->vmsd = &vmstate_scsi_disk_state; 3083 } 3084 3085 static const TypeInfo scsi_block_info = { 3086 .name = "scsi-block", 3087 .parent = TYPE_SCSI_DISK_BASE, 3088 .class_init = scsi_block_class_initfn, 3089 }; 3090 #endif 3091 3092 static Property scsi_disk_properties[] = { 3093 DEFINE_SCSI_DISK_PROPERTIES(), 3094 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3095 SCSI_DISK_F_REMOVABLE, false), 3096 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3097 SCSI_DISK_F_DPOFUA, false), 3098 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3099 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3100 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3101 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3102 DEFAULT_MAX_UNMAP_SIZE), 3103 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3104 DEFAULT_MAX_IO_SIZE), 3105 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3106 5), 3107 DEFINE_PROP_END_OF_LIST(), 3108 }; 3109 3110 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3111 { 3112 DeviceClass *dc = DEVICE_CLASS(klass); 3113 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3114 3115 sc->realize = scsi_disk_realize; 3116 sc->alloc_req = scsi_new_request; 3117 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3118 dc->fw_name = "disk"; 3119 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3120 dc->reset = scsi_disk_reset; 3121 dc->props = scsi_disk_properties; 3122 dc->vmsd = &vmstate_scsi_disk_state; 3123 } 3124 3125 static const TypeInfo scsi_disk_info = { 3126 .name = "scsi-disk", 3127 .parent = TYPE_SCSI_DISK_BASE, 3128 .class_init = scsi_disk_class_initfn, 3129 }; 3130 3131 static void scsi_disk_register_types(void) 3132 { 3133 type_register_static(&scsi_disk_base_info); 3134 type_register_static(&scsi_hd_info); 3135 type_register_static(&scsi_cd_info); 3136 #ifdef __linux__ 3137 type_register_static(&scsi_block_info); 3138 #endif 3139 type_register_static(&scsi_disk_info); 3140 } 3141 3142 type_init(scsi_disk_register_types) 3143