1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/units.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "qemu/main-loop.h" 27 #include "qemu/module.h" 28 #include "hw/scsi/scsi.h" 29 #include "migration/qemu-file-types.h" 30 #include "migration/vmstate.h" 31 #include "hw/scsi/emulation.h" 32 #include "scsi/constants.h" 33 #include "sysemu/block-backend.h" 34 #include "sysemu/blockdev.h" 35 #include "hw/block/block.h" 36 #include "hw/qdev-properties.h" 37 #include "sysemu/dma.h" 38 #include "qemu/cutils.h" 39 #include "trace.h" 40 41 #ifdef __linux 42 #include <scsi/sg.h> 43 #endif 44 45 #define SCSI_WRITE_SAME_MAX (512 * KiB) 46 #define SCSI_DMA_BUF_SIZE (128 * KiB) 47 #define SCSI_MAX_INQUIRY_LEN 256 48 #define SCSI_MAX_MODE_LEN 256 49 50 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 51 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 52 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 53 54 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 55 56 #define SCSI_DISK_BASE(obj) \ 57 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 58 #define SCSI_DISK_BASE_CLASS(klass) \ 59 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 60 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 61 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 62 63 typedef struct SCSIDiskClass { 64 SCSIDeviceClass parent_class; 65 DMAIOFunc *dma_readv; 66 DMAIOFunc *dma_writev; 67 bool (*need_fua_emulation)(SCSICommand *cmd); 68 void (*update_sense)(SCSIRequest *r); 69 } SCSIDiskClass; 70 71 typedef struct SCSIDiskReq { 72 SCSIRequest req; 73 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 74 uint64_t sector; 75 uint32_t sector_count; 76 uint32_t buflen; 77 bool started; 78 bool need_fua_emulation; 79 struct iovec iov; 80 QEMUIOVector qiov; 81 BlockAcctCookie acct; 82 unsigned char *status; 83 } SCSIDiskReq; 84 85 #define SCSI_DISK_F_REMOVABLE 0 86 #define SCSI_DISK_F_DPOFUA 1 87 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 88 89 typedef struct SCSIDiskState 90 { 91 SCSIDevice qdev; 92 uint32_t features; 93 bool media_changed; 94 bool media_event; 95 bool eject_request; 96 uint16_t port_index; 97 uint64_t max_unmap_size; 98 uint64_t max_io_size; 99 QEMUBH *bh; 100 char *version; 101 char *serial; 102 char *vendor; 103 char *product; 104 char *device_id; 105 bool tray_open; 106 bool tray_locked; 107 /* 108 * 0x0000 - rotation rate not reported 109 * 0x0001 - non-rotating medium (SSD) 110 * 0x0002-0x0400 - reserved 111 * 0x0401-0xffe - rotations per minute 112 * 0xffff - reserved 113 */ 114 uint16_t rotation_rate; 115 } SCSIDiskState; 116 117 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 118 119 static void scsi_free_request(SCSIRequest *req) 120 { 121 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 122 123 qemu_vfree(r->iov.iov_base); 124 } 125 126 /* Helper function for command completion with sense. */ 127 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 128 { 129 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, 130 sense.ascq); 131 scsi_req_build_sense(&r->req, sense); 132 scsi_req_complete(&r->req, CHECK_CONDITION); 133 } 134 135 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 136 { 137 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 138 139 if (!r->iov.iov_base) { 140 r->buflen = size; 141 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 142 } 143 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 144 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 145 } 146 147 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 148 { 149 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 150 151 qemu_put_be64s(f, &r->sector); 152 qemu_put_be32s(f, &r->sector_count); 153 qemu_put_be32s(f, &r->buflen); 154 if (r->buflen) { 155 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 156 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 157 } else if (!req->retry) { 158 uint32_t len = r->iov.iov_len; 159 qemu_put_be32s(f, &len); 160 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 161 } 162 } 163 } 164 165 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 166 { 167 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 168 169 qemu_get_be64s(f, &r->sector); 170 qemu_get_be32s(f, &r->sector_count); 171 qemu_get_be32s(f, &r->buflen); 172 if (r->buflen) { 173 scsi_init_iovec(r, r->buflen); 174 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 175 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 176 } else if (!r->req.retry) { 177 uint32_t len; 178 qemu_get_be32s(f, &len); 179 r->iov.iov_len = len; 180 assert(r->iov.iov_len <= r->buflen); 181 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 182 } 183 } 184 185 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 186 } 187 188 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 189 { 190 if (r->req.io_canceled) { 191 scsi_req_cancel_complete(&r->req); 192 return true; 193 } 194 195 if (ret < 0 || (r->status && *r->status)) { 196 return scsi_handle_rw_error(r, -ret, acct_failed); 197 } 198 199 return false; 200 } 201 202 static void scsi_aio_complete(void *opaque, int ret) 203 { 204 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 205 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 206 207 assert(r->req.aiocb != NULL); 208 r->req.aiocb = NULL; 209 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 210 if (scsi_disk_req_check_error(r, ret, true)) { 211 goto done; 212 } 213 214 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 215 scsi_req_complete(&r->req, GOOD); 216 217 done: 218 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 219 scsi_req_unref(&r->req); 220 } 221 222 static bool scsi_is_cmd_fua(SCSICommand *cmd) 223 { 224 switch (cmd->buf[0]) { 225 case READ_10: 226 case READ_12: 227 case READ_16: 228 case WRITE_10: 229 case WRITE_12: 230 case WRITE_16: 231 return (cmd->buf[1] & 8) != 0; 232 233 case VERIFY_10: 234 case VERIFY_12: 235 case VERIFY_16: 236 case WRITE_VERIFY_10: 237 case WRITE_VERIFY_12: 238 case WRITE_VERIFY_16: 239 return true; 240 241 case READ_6: 242 case WRITE_6: 243 default: 244 return false; 245 } 246 } 247 248 static void scsi_write_do_fua(SCSIDiskReq *r) 249 { 250 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 251 252 assert(r->req.aiocb == NULL); 253 assert(!r->req.io_canceled); 254 255 if (r->need_fua_emulation) { 256 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 257 BLOCK_ACCT_FLUSH); 258 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 259 return; 260 } 261 262 scsi_req_complete(&r->req, GOOD); 263 scsi_req_unref(&r->req); 264 } 265 266 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 267 { 268 assert(r->req.aiocb == NULL); 269 if (scsi_disk_req_check_error(r, ret, false)) { 270 goto done; 271 } 272 273 r->sector += r->sector_count; 274 r->sector_count = 0; 275 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 276 scsi_write_do_fua(r); 277 return; 278 } else { 279 scsi_req_complete(&r->req, GOOD); 280 } 281 282 done: 283 scsi_req_unref(&r->req); 284 } 285 286 static void scsi_dma_complete(void *opaque, int ret) 287 { 288 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 289 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 290 291 assert(r->req.aiocb != NULL); 292 r->req.aiocb = NULL; 293 294 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 295 if (ret < 0) { 296 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 297 } else { 298 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 299 } 300 scsi_dma_complete_noio(r, ret); 301 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 302 } 303 304 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) 305 { 306 uint32_t n; 307 308 assert(r->req.aiocb == NULL); 309 if (scsi_disk_req_check_error(r, ret, false)) { 310 goto done; 311 } 312 313 n = r->qiov.size / 512; 314 r->sector += n; 315 r->sector_count -= n; 316 scsi_req_data(&r->req, r->qiov.size); 317 318 done: 319 scsi_req_unref(&r->req); 320 } 321 322 static void scsi_read_complete(void *opaque, int ret) 323 { 324 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 325 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 326 327 assert(r->req.aiocb != NULL); 328 r->req.aiocb = NULL; 329 330 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 331 if (ret < 0) { 332 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 333 } else { 334 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 335 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); 336 } 337 scsi_read_complete_noio(r, ret); 338 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 339 } 340 341 /* Actually issue a read to the block device. */ 342 static void scsi_do_read(SCSIDiskReq *r, int ret) 343 { 344 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 345 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 346 347 assert (r->req.aiocb == NULL); 348 if (scsi_disk_req_check_error(r, ret, false)) { 349 goto done; 350 } 351 352 /* The request is used as the AIO opaque value, so add a ref. */ 353 scsi_req_ref(&r->req); 354 355 if (r->req.sg) { 356 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 357 r->req.resid -= r->req.sg->size; 358 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 359 r->req.sg, r->sector << BDRV_SECTOR_BITS, 360 BDRV_SECTOR_SIZE, 361 sdc->dma_readv, r, scsi_dma_complete, r, 362 DMA_DIRECTION_FROM_DEVICE); 363 } else { 364 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 365 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 366 r->qiov.size, BLOCK_ACCT_READ); 367 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 368 scsi_read_complete, r, r); 369 } 370 371 done: 372 scsi_req_unref(&r->req); 373 } 374 375 static void scsi_do_read_cb(void *opaque, int ret) 376 { 377 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 378 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 379 380 assert (r->req.aiocb != NULL); 381 r->req.aiocb = NULL; 382 383 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 384 if (ret < 0) { 385 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 386 } else { 387 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 388 } 389 scsi_do_read(opaque, ret); 390 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 391 } 392 393 /* Read more data from scsi device into buffer. */ 394 static void scsi_read_data(SCSIRequest *req) 395 { 396 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 397 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 398 bool first; 399 400 trace_scsi_disk_read_data_count(r->sector_count); 401 if (r->sector_count == 0) { 402 /* This also clears the sense buffer for REQUEST SENSE. */ 403 scsi_req_complete(&r->req, GOOD); 404 return; 405 } 406 407 /* No data transfer may already be in progress */ 408 assert(r->req.aiocb == NULL); 409 410 /* The request is used as the AIO opaque value, so add a ref. */ 411 scsi_req_ref(&r->req); 412 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 413 trace_scsi_disk_read_data_invalid(); 414 scsi_read_complete_noio(r, -EINVAL); 415 return; 416 } 417 418 if (!blk_is_available(req->dev->conf.blk)) { 419 scsi_read_complete_noio(r, -ENOMEDIUM); 420 return; 421 } 422 423 first = !r->started; 424 r->started = true; 425 if (first && r->need_fua_emulation) { 426 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 427 BLOCK_ACCT_FLUSH); 428 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 429 } else { 430 scsi_do_read(r, 0); 431 } 432 } 433 434 /* 435 * scsi_handle_rw_error has two return values. False means that the error 436 * must be ignored, true means that the error has been processed and the 437 * caller should not do anything else for this request. Note that 438 * scsi_handle_rw_error always manages its reference counts, independent 439 * of the return value. 440 */ 441 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 442 { 443 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 444 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 445 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 446 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 447 is_read, error); 448 449 if (action == BLOCK_ERROR_ACTION_REPORT) { 450 if (acct_failed) { 451 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 452 } 453 switch (error) { 454 case 0: 455 /* A passthrough command has run and has produced sense data; check 456 * whether the error has to be handled by the guest or should rather 457 * pause the host. 458 */ 459 assert(r->status && *r->status); 460 if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { 461 /* These errors are handled by guest. */ 462 sdc->update_sense(&r->req); 463 scsi_req_complete(&r->req, *r->status); 464 return true; 465 } 466 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 467 break; 468 case ENOMEDIUM: 469 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 470 break; 471 case ENOMEM: 472 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 473 break; 474 case EINVAL: 475 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 476 break; 477 case ENOSPC: 478 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 479 break; 480 default: 481 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 482 break; 483 } 484 } 485 486 blk_error_action(s->qdev.conf.blk, action, is_read, error); 487 if (action == BLOCK_ERROR_ACTION_IGNORE) { 488 scsi_req_complete(&r->req, 0); 489 return true; 490 } 491 492 if (action == BLOCK_ERROR_ACTION_STOP) { 493 scsi_req_retry(&r->req); 494 } 495 return true; 496 } 497 498 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 499 { 500 uint32_t n; 501 502 assert (r->req.aiocb == NULL); 503 if (scsi_disk_req_check_error(r, ret, false)) { 504 goto done; 505 } 506 507 n = r->qiov.size / 512; 508 r->sector += n; 509 r->sector_count -= n; 510 if (r->sector_count == 0) { 511 scsi_write_do_fua(r); 512 return; 513 } else { 514 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 515 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); 516 scsi_req_data(&r->req, r->qiov.size); 517 } 518 519 done: 520 scsi_req_unref(&r->req); 521 } 522 523 static void scsi_write_complete(void * opaque, int ret) 524 { 525 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 526 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 527 528 assert (r->req.aiocb != NULL); 529 r->req.aiocb = NULL; 530 531 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 532 if (ret < 0) { 533 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 534 } else { 535 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 536 } 537 scsi_write_complete_noio(r, ret); 538 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 539 } 540 541 static void scsi_write_data(SCSIRequest *req) 542 { 543 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 544 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 545 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 546 547 /* No data transfer may already be in progress */ 548 assert(r->req.aiocb == NULL); 549 550 /* The request is used as the AIO opaque value, so add a ref. */ 551 scsi_req_ref(&r->req); 552 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 553 trace_scsi_disk_write_data_invalid(); 554 scsi_write_complete_noio(r, -EINVAL); 555 return; 556 } 557 558 if (!r->req.sg && !r->qiov.size) { 559 /* Called for the first time. Ask the driver to send us more data. */ 560 r->started = true; 561 scsi_write_complete_noio(r, 0); 562 return; 563 } 564 if (!blk_is_available(req->dev->conf.blk)) { 565 scsi_write_complete_noio(r, -ENOMEDIUM); 566 return; 567 } 568 569 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 570 r->req.cmd.buf[0] == VERIFY_16) { 571 if (r->req.sg) { 572 scsi_dma_complete_noio(r, 0); 573 } else { 574 scsi_write_complete_noio(r, 0); 575 } 576 return; 577 } 578 579 if (r->req.sg) { 580 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 581 r->req.resid -= r->req.sg->size; 582 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 583 r->req.sg, r->sector << BDRV_SECTOR_BITS, 584 BDRV_SECTOR_SIZE, 585 sdc->dma_writev, r, scsi_dma_complete, r, 586 DMA_DIRECTION_TO_DEVICE); 587 } else { 588 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 589 r->qiov.size, BLOCK_ACCT_WRITE); 590 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 591 scsi_write_complete, r, r); 592 } 593 } 594 595 /* Return a pointer to the data buffer. */ 596 static uint8_t *scsi_get_buf(SCSIRequest *req) 597 { 598 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 599 600 return (uint8_t *)r->iov.iov_base; 601 } 602 603 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 604 { 605 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 606 uint8_t page_code = req->cmd.buf[2]; 607 int start, buflen = 0; 608 609 outbuf[buflen++] = s->qdev.type & 0x1f; 610 outbuf[buflen++] = page_code; 611 outbuf[buflen++] = 0x00; 612 outbuf[buflen++] = 0x00; 613 start = buflen; 614 615 switch (page_code) { 616 case 0x00: /* Supported page codes, mandatory */ 617 { 618 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); 619 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 620 if (s->serial) { 621 outbuf[buflen++] = 0x80; /* unit serial number */ 622 } 623 outbuf[buflen++] = 0x83; /* device identification */ 624 if (s->qdev.type == TYPE_DISK) { 625 outbuf[buflen++] = 0xb0; /* block limits */ 626 outbuf[buflen++] = 0xb1; /* block device characteristics */ 627 outbuf[buflen++] = 0xb2; /* thin provisioning */ 628 } 629 break; 630 } 631 case 0x80: /* Device serial number, optional */ 632 { 633 int l; 634 635 if (!s->serial) { 636 trace_scsi_disk_emulate_vpd_page_80_not_supported(); 637 return -1; 638 } 639 640 l = strlen(s->serial); 641 if (l > 36) { 642 l = 36; 643 } 644 645 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); 646 memcpy(outbuf + buflen, s->serial, l); 647 buflen += l; 648 break; 649 } 650 651 case 0x83: /* Device identification page, mandatory */ 652 { 653 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; 654 655 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); 656 657 if (id_len) { 658 outbuf[buflen++] = 0x2; /* ASCII */ 659 outbuf[buflen++] = 0; /* not officially assigned */ 660 outbuf[buflen++] = 0; /* reserved */ 661 outbuf[buflen++] = id_len; /* length of data following */ 662 memcpy(outbuf + buflen, s->device_id, id_len); 663 buflen += id_len; 664 } 665 666 if (s->qdev.wwn) { 667 outbuf[buflen++] = 0x1; /* Binary */ 668 outbuf[buflen++] = 0x3; /* NAA */ 669 outbuf[buflen++] = 0; /* reserved */ 670 outbuf[buflen++] = 8; 671 stq_be_p(&outbuf[buflen], s->qdev.wwn); 672 buflen += 8; 673 } 674 675 if (s->qdev.port_wwn) { 676 outbuf[buflen++] = 0x61; /* SAS / Binary */ 677 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 678 outbuf[buflen++] = 0; /* reserved */ 679 outbuf[buflen++] = 8; 680 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 681 buflen += 8; 682 } 683 684 if (s->port_index) { 685 outbuf[buflen++] = 0x61; /* SAS / Binary */ 686 687 /* PIV/Target port/relative target port */ 688 outbuf[buflen++] = 0x94; 689 690 outbuf[buflen++] = 0; /* reserved */ 691 outbuf[buflen++] = 4; 692 stw_be_p(&outbuf[buflen + 2], s->port_index); 693 buflen += 4; 694 } 695 break; 696 } 697 case 0xb0: /* block limits */ 698 { 699 SCSIBlockLimits bl = {}; 700 701 if (s->qdev.type == TYPE_ROM) { 702 trace_scsi_disk_emulate_vpd_page_b0_not_supported(); 703 return -1; 704 } 705 bl.wsnz = 1; 706 bl.unmap_sectors = 707 s->qdev.conf.discard_granularity / s->qdev.blocksize; 708 bl.min_io_size = 709 s->qdev.conf.min_io_size / s->qdev.blocksize; 710 bl.opt_io_size = 711 s->qdev.conf.opt_io_size / s->qdev.blocksize; 712 bl.max_unmap_sectors = 713 s->max_unmap_size / s->qdev.blocksize; 714 bl.max_io_sectors = 715 s->max_io_size / s->qdev.blocksize; 716 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 717 bl.max_unmap_descr = 255; 718 719 if (s->qdev.type == TYPE_DISK) { 720 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 721 int max_io_sectors_blk = 722 max_transfer_blk / s->qdev.blocksize; 723 724 bl.max_io_sectors = 725 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 726 } 727 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 728 break; 729 } 730 case 0xb1: /* block device characteristics */ 731 { 732 buflen = 0x40; 733 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 734 outbuf[5] = s->rotation_rate & 0xff; 735 outbuf[6] = 0; /* PRODUCT TYPE */ 736 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 737 outbuf[8] = 0; /* VBULS */ 738 break; 739 } 740 case 0xb2: /* thin provisioning */ 741 { 742 buflen = 8; 743 outbuf[4] = 0; 744 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 745 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 746 outbuf[7] = 0; 747 break; 748 } 749 default: 750 return -1; 751 } 752 /* done with EVPD */ 753 assert(buflen - start <= 255); 754 outbuf[start - 1] = buflen - start; 755 return buflen; 756 } 757 758 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 759 { 760 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 761 int buflen = 0; 762 763 if (req->cmd.buf[1] & 0x1) { 764 /* Vital product data */ 765 return scsi_disk_emulate_vpd_page(req, outbuf); 766 } 767 768 /* Standard INQUIRY data */ 769 if (req->cmd.buf[2] != 0) { 770 return -1; 771 } 772 773 /* PAGE CODE == 0 */ 774 buflen = req->cmd.xfer; 775 if (buflen > SCSI_MAX_INQUIRY_LEN) { 776 buflen = SCSI_MAX_INQUIRY_LEN; 777 } 778 779 outbuf[0] = s->qdev.type & 0x1f; 780 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 781 782 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 783 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 784 785 memset(&outbuf[32], 0, 4); 786 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 787 /* 788 * We claim conformance to SPC-3, which is required for guests 789 * to ask for modern features like READ CAPACITY(16) or the 790 * block characteristics VPD page by default. Not all of SPC-3 791 * is actually implemented, but we're good enough. 792 */ 793 outbuf[2] = s->qdev.default_scsi_version; 794 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 795 796 if (buflen > 36) { 797 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 798 } else { 799 /* If the allocation length of CDB is too small, 800 the additional length is not adjusted */ 801 outbuf[4] = 36 - 5; 802 } 803 804 /* Sync data transfer and TCQ. */ 805 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 806 return buflen; 807 } 808 809 static inline bool media_is_dvd(SCSIDiskState *s) 810 { 811 uint64_t nb_sectors; 812 if (s->qdev.type != TYPE_ROM) { 813 return false; 814 } 815 if (!blk_is_available(s->qdev.conf.blk)) { 816 return false; 817 } 818 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 819 return nb_sectors > CD_MAX_SECTORS; 820 } 821 822 static inline bool media_is_cd(SCSIDiskState *s) 823 { 824 uint64_t nb_sectors; 825 if (s->qdev.type != TYPE_ROM) { 826 return false; 827 } 828 if (!blk_is_available(s->qdev.conf.blk)) { 829 return false; 830 } 831 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 832 return nb_sectors <= CD_MAX_SECTORS; 833 } 834 835 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 836 uint8_t *outbuf) 837 { 838 uint8_t type = r->req.cmd.buf[1] & 7; 839 840 if (s->qdev.type != TYPE_ROM) { 841 return -1; 842 } 843 844 /* Types 1/2 are only defined for Blu-Ray. */ 845 if (type != 0) { 846 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 847 return -1; 848 } 849 850 memset(outbuf, 0, 34); 851 outbuf[1] = 32; 852 outbuf[2] = 0xe; /* last session complete, disc finalized */ 853 outbuf[3] = 1; /* first track on disc */ 854 outbuf[4] = 1; /* # of sessions */ 855 outbuf[5] = 1; /* first track of last session */ 856 outbuf[6] = 1; /* last track of last session */ 857 outbuf[7] = 0x20; /* unrestricted use */ 858 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 859 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 860 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 861 /* 24-31: disc bar code */ 862 /* 32: disc application code */ 863 /* 33: number of OPC tables */ 864 865 return 34; 866 } 867 868 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 869 uint8_t *outbuf) 870 { 871 static const int rds_caps_size[5] = { 872 [0] = 2048 + 4, 873 [1] = 4 + 4, 874 [3] = 188 + 4, 875 [4] = 2048 + 4, 876 }; 877 878 uint8_t media = r->req.cmd.buf[1]; 879 uint8_t layer = r->req.cmd.buf[6]; 880 uint8_t format = r->req.cmd.buf[7]; 881 int size = -1; 882 883 if (s->qdev.type != TYPE_ROM) { 884 return -1; 885 } 886 if (media != 0) { 887 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 888 return -1; 889 } 890 891 if (format != 0xff) { 892 if (!blk_is_available(s->qdev.conf.blk)) { 893 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 894 return -1; 895 } 896 if (media_is_cd(s)) { 897 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 898 return -1; 899 } 900 if (format >= ARRAY_SIZE(rds_caps_size)) { 901 return -1; 902 } 903 size = rds_caps_size[format]; 904 memset(outbuf, 0, size); 905 } 906 907 switch (format) { 908 case 0x00: { 909 /* Physical format information */ 910 uint64_t nb_sectors; 911 if (layer != 0) { 912 goto fail; 913 } 914 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 915 916 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 917 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 918 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 919 outbuf[7] = 0; /* default densities */ 920 921 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 922 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 923 break; 924 } 925 926 case 0x01: /* DVD copyright information, all zeros */ 927 break; 928 929 case 0x03: /* BCA information - invalid field for no BCA info */ 930 return -1; 931 932 case 0x04: /* DVD disc manufacturing information, all zeros */ 933 break; 934 935 case 0xff: { /* List capabilities */ 936 int i; 937 size = 4; 938 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 939 if (!rds_caps_size[i]) { 940 continue; 941 } 942 outbuf[size] = i; 943 outbuf[size + 1] = 0x40; /* Not writable, readable */ 944 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 945 size += 4; 946 } 947 break; 948 } 949 950 default: 951 return -1; 952 } 953 954 /* Size of buffer, not including 2 byte size field */ 955 stw_be_p(outbuf, size - 2); 956 return size; 957 958 fail: 959 return -1; 960 } 961 962 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 963 { 964 uint8_t event_code, media_status; 965 966 media_status = 0; 967 if (s->tray_open) { 968 media_status = MS_TRAY_OPEN; 969 } else if (blk_is_inserted(s->qdev.conf.blk)) { 970 media_status = MS_MEDIA_PRESENT; 971 } 972 973 /* Event notification descriptor */ 974 event_code = MEC_NO_CHANGE; 975 if (media_status != MS_TRAY_OPEN) { 976 if (s->media_event) { 977 event_code = MEC_NEW_MEDIA; 978 s->media_event = false; 979 } else if (s->eject_request) { 980 event_code = MEC_EJECT_REQUESTED; 981 s->eject_request = false; 982 } 983 } 984 985 outbuf[0] = event_code; 986 outbuf[1] = media_status; 987 988 /* These fields are reserved, just clear them. */ 989 outbuf[2] = 0; 990 outbuf[3] = 0; 991 return 4; 992 } 993 994 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 995 uint8_t *outbuf) 996 { 997 int size; 998 uint8_t *buf = r->req.cmd.buf; 999 uint8_t notification_class_request = buf[4]; 1000 if (s->qdev.type != TYPE_ROM) { 1001 return -1; 1002 } 1003 if ((buf[1] & 1) == 0) { 1004 /* asynchronous */ 1005 return -1; 1006 } 1007 1008 size = 4; 1009 outbuf[0] = outbuf[1] = 0; 1010 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1011 if (notification_class_request & (1 << GESN_MEDIA)) { 1012 outbuf[2] = GESN_MEDIA; 1013 size += scsi_event_status_media(s, &outbuf[size]); 1014 } else { 1015 outbuf[2] = 0x80; 1016 } 1017 stw_be_p(outbuf, size - 4); 1018 return size; 1019 } 1020 1021 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1022 { 1023 int current; 1024 1025 if (s->qdev.type != TYPE_ROM) { 1026 return -1; 1027 } 1028 1029 if (media_is_dvd(s)) { 1030 current = MMC_PROFILE_DVD_ROM; 1031 } else if (media_is_cd(s)) { 1032 current = MMC_PROFILE_CD_ROM; 1033 } else { 1034 current = MMC_PROFILE_NONE; 1035 } 1036 1037 memset(outbuf, 0, 40); 1038 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1039 stw_be_p(&outbuf[6], current); 1040 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1041 outbuf[10] = 0x03; /* persistent, current */ 1042 outbuf[11] = 8; /* two profiles */ 1043 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1044 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1045 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1046 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1047 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1048 stw_be_p(&outbuf[20], 1); 1049 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1050 outbuf[23] = 8; 1051 stl_be_p(&outbuf[24], 1); /* SCSI */ 1052 outbuf[28] = 1; /* DBE = 1, mandatory */ 1053 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1054 stw_be_p(&outbuf[32], 3); 1055 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1056 outbuf[35] = 4; 1057 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1058 /* TODO: Random readable, CD read, DVD read, drive serial number, 1059 power management */ 1060 return 40; 1061 } 1062 1063 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1064 { 1065 if (s->qdev.type != TYPE_ROM) { 1066 return -1; 1067 } 1068 memset(outbuf, 0, 8); 1069 outbuf[5] = 1; /* CD-ROM */ 1070 return 8; 1071 } 1072 1073 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1074 int page_control) 1075 { 1076 static const int mode_sense_valid[0x3f] = { 1077 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1078 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1079 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1080 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1081 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1082 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1083 }; 1084 1085 uint8_t *p = *p_outbuf + 2; 1086 int length; 1087 1088 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1089 return -1; 1090 } 1091 1092 /* 1093 * If Changeable Values are requested, a mask denoting those mode parameters 1094 * that are changeable shall be returned. As we currently don't support 1095 * parameter changes via MODE_SELECT all bits are returned set to zero. 1096 * The buffer was already menset to zero by the caller of this function. 1097 * 1098 * The offsets here are off by two compared to the descriptions in the 1099 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1100 * but it is done so that offsets are consistent within our implementation 1101 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1102 * 2-byte and 4-byte headers. 1103 */ 1104 switch (page) { 1105 case MODE_PAGE_HD_GEOMETRY: 1106 length = 0x16; 1107 if (page_control == 1) { /* Changeable Values */ 1108 break; 1109 } 1110 /* if a geometry hint is available, use it */ 1111 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1112 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1113 p[2] = s->qdev.conf.cyls & 0xff; 1114 p[3] = s->qdev.conf.heads & 0xff; 1115 /* Write precomp start cylinder, disabled */ 1116 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1117 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1118 p[6] = s->qdev.conf.cyls & 0xff; 1119 /* Reduced current start cylinder, disabled */ 1120 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1121 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1122 p[9] = s->qdev.conf.cyls & 0xff; 1123 /* Device step rate [ns], 200ns */ 1124 p[10] = 0; 1125 p[11] = 200; 1126 /* Landing zone cylinder */ 1127 p[12] = 0xff; 1128 p[13] = 0xff; 1129 p[14] = 0xff; 1130 /* Medium rotation rate [rpm], 5400 rpm */ 1131 p[18] = (5400 >> 8) & 0xff; 1132 p[19] = 5400 & 0xff; 1133 break; 1134 1135 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1136 length = 0x1e; 1137 if (page_control == 1) { /* Changeable Values */ 1138 break; 1139 } 1140 /* Transfer rate [kbit/s], 5Mbit/s */ 1141 p[0] = 5000 >> 8; 1142 p[1] = 5000 & 0xff; 1143 /* if a geometry hint is available, use it */ 1144 p[2] = s->qdev.conf.heads & 0xff; 1145 p[3] = s->qdev.conf.secs & 0xff; 1146 p[4] = s->qdev.blocksize >> 8; 1147 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1148 p[7] = s->qdev.conf.cyls & 0xff; 1149 /* Write precomp start cylinder, disabled */ 1150 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1151 p[9] = s->qdev.conf.cyls & 0xff; 1152 /* Reduced current start cylinder, disabled */ 1153 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1154 p[11] = s->qdev.conf.cyls & 0xff; 1155 /* Device step rate [100us], 100us */ 1156 p[12] = 0; 1157 p[13] = 1; 1158 /* Device step pulse width [us], 1us */ 1159 p[14] = 1; 1160 /* Device head settle delay [100us], 100us */ 1161 p[15] = 0; 1162 p[16] = 1; 1163 /* Motor on delay [0.1s], 0.1s */ 1164 p[17] = 1; 1165 /* Motor off delay [0.1s], 0.1s */ 1166 p[18] = 1; 1167 /* Medium rotation rate [rpm], 5400 rpm */ 1168 p[26] = (5400 >> 8) & 0xff; 1169 p[27] = 5400 & 0xff; 1170 break; 1171 1172 case MODE_PAGE_CACHING: 1173 length = 0x12; 1174 if (page_control == 1 || /* Changeable Values */ 1175 blk_enable_write_cache(s->qdev.conf.blk)) { 1176 p[0] = 4; /* WCE */ 1177 } 1178 break; 1179 1180 case MODE_PAGE_R_W_ERROR: 1181 length = 10; 1182 if (page_control == 1) { /* Changeable Values */ 1183 break; 1184 } 1185 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1186 if (s->qdev.type == TYPE_ROM) { 1187 p[1] = 0x20; /* Read Retry Count */ 1188 } 1189 break; 1190 1191 case MODE_PAGE_AUDIO_CTL: 1192 length = 14; 1193 break; 1194 1195 case MODE_PAGE_CAPABILITIES: 1196 length = 0x14; 1197 if (page_control == 1) { /* Changeable Values */ 1198 break; 1199 } 1200 1201 p[0] = 0x3b; /* CD-R & CD-RW read */ 1202 p[1] = 0; /* Writing not supported */ 1203 p[2] = 0x7f; /* Audio, composite, digital out, 1204 mode 2 form 1&2, multi session */ 1205 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1206 RW corrected, C2 errors, ISRC, 1207 UPC, Bar code */ 1208 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1209 /* Locking supported, jumper present, eject, tray */ 1210 p[5] = 0; /* no volume & mute control, no 1211 changer */ 1212 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1213 p[7] = (50 * 176) & 0xff; 1214 p[8] = 2 >> 8; /* Two volume levels */ 1215 p[9] = 2 & 0xff; 1216 p[10] = 2048 >> 8; /* 2M buffer */ 1217 p[11] = 2048 & 0xff; 1218 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1219 p[13] = (16 * 176) & 0xff; 1220 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1221 p[17] = (16 * 176) & 0xff; 1222 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1223 p[19] = (16 * 176) & 0xff; 1224 break; 1225 1226 default: 1227 return -1; 1228 } 1229 1230 assert(length < 256); 1231 (*p_outbuf)[0] = page; 1232 (*p_outbuf)[1] = length; 1233 *p_outbuf += length + 2; 1234 return length + 2; 1235 } 1236 1237 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1238 { 1239 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1240 uint64_t nb_sectors; 1241 bool dbd; 1242 int page, buflen, ret, page_control; 1243 uint8_t *p; 1244 uint8_t dev_specific_param; 1245 1246 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1247 page = r->req.cmd.buf[2] & 0x3f; 1248 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1249 1250 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 1251 10, page, r->req.cmd.xfer, page_control); 1252 memset(outbuf, 0, r->req.cmd.xfer); 1253 p = outbuf; 1254 1255 if (s->qdev.type == TYPE_DISK) { 1256 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1257 if (blk_is_read_only(s->qdev.conf.blk)) { 1258 dev_specific_param |= 0x80; /* Readonly. */ 1259 } 1260 } else { 1261 /* MMC prescribes that CD/DVD drives have no block descriptors, 1262 * and defines no device-specific parameter. */ 1263 dev_specific_param = 0x00; 1264 dbd = true; 1265 } 1266 1267 if (r->req.cmd.buf[0] == MODE_SENSE) { 1268 p[1] = 0; /* Default media type. */ 1269 p[2] = dev_specific_param; 1270 p[3] = 0; /* Block descriptor length. */ 1271 p += 4; 1272 } else { /* MODE_SENSE_10 */ 1273 p[2] = 0; /* Default media type. */ 1274 p[3] = dev_specific_param; 1275 p[6] = p[7] = 0; /* Block descriptor length. */ 1276 p += 8; 1277 } 1278 1279 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1280 if (!dbd && nb_sectors) { 1281 if (r->req.cmd.buf[0] == MODE_SENSE) { 1282 outbuf[3] = 8; /* Block descriptor length */ 1283 } else { /* MODE_SENSE_10 */ 1284 outbuf[7] = 8; /* Block descriptor length */ 1285 } 1286 nb_sectors /= (s->qdev.blocksize / 512); 1287 if (nb_sectors > 0xffffff) { 1288 nb_sectors = 0; 1289 } 1290 p[0] = 0; /* media density code */ 1291 p[1] = (nb_sectors >> 16) & 0xff; 1292 p[2] = (nb_sectors >> 8) & 0xff; 1293 p[3] = nb_sectors & 0xff; 1294 p[4] = 0; /* reserved */ 1295 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1296 p[6] = s->qdev.blocksize >> 8; 1297 p[7] = 0; 1298 p += 8; 1299 } 1300 1301 if (page_control == 3) { 1302 /* Saved Values */ 1303 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1304 return -1; 1305 } 1306 1307 if (page == 0x3f) { 1308 for (page = 0; page <= 0x3e; page++) { 1309 mode_sense_page(s, page, &p, page_control); 1310 } 1311 } else { 1312 ret = mode_sense_page(s, page, &p, page_control); 1313 if (ret == -1) { 1314 return -1; 1315 } 1316 } 1317 1318 buflen = p - outbuf; 1319 /* 1320 * The mode data length field specifies the length in bytes of the 1321 * following data that is available to be transferred. The mode data 1322 * length does not include itself. 1323 */ 1324 if (r->req.cmd.buf[0] == MODE_SENSE) { 1325 outbuf[0] = buflen - 1; 1326 } else { /* MODE_SENSE_10 */ 1327 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1328 outbuf[1] = (buflen - 2) & 0xff; 1329 } 1330 return buflen; 1331 } 1332 1333 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1334 { 1335 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1336 int start_track, format, msf, toclen; 1337 uint64_t nb_sectors; 1338 1339 msf = req->cmd.buf[1] & 2; 1340 format = req->cmd.buf[2] & 0xf; 1341 start_track = req->cmd.buf[6]; 1342 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1343 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); 1344 nb_sectors /= s->qdev.blocksize / 512; 1345 switch (format) { 1346 case 0: 1347 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1348 break; 1349 case 1: 1350 /* multi session : only a single session defined */ 1351 toclen = 12; 1352 memset(outbuf, 0, 12); 1353 outbuf[1] = 0x0a; 1354 outbuf[2] = 0x01; 1355 outbuf[3] = 0x01; 1356 break; 1357 case 2: 1358 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1359 break; 1360 default: 1361 return -1; 1362 } 1363 return toclen; 1364 } 1365 1366 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1367 { 1368 SCSIRequest *req = &r->req; 1369 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1370 bool start = req->cmd.buf[4] & 1; 1371 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1372 int pwrcnd = req->cmd.buf[4] & 0xf0; 1373 1374 if (pwrcnd) { 1375 /* eject/load only happens for power condition == 0 */ 1376 return 0; 1377 } 1378 1379 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1380 if (!start && !s->tray_open && s->tray_locked) { 1381 scsi_check_condition(r, 1382 blk_is_inserted(s->qdev.conf.blk) 1383 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1384 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1385 return -1; 1386 } 1387 1388 if (s->tray_open != !start) { 1389 blk_eject(s->qdev.conf.blk, !start); 1390 s->tray_open = !start; 1391 } 1392 } 1393 return 0; 1394 } 1395 1396 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1397 { 1398 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1399 int buflen = r->iov.iov_len; 1400 1401 if (buflen) { 1402 trace_scsi_disk_emulate_read_data(buflen); 1403 r->iov.iov_len = 0; 1404 r->started = true; 1405 scsi_req_data(&r->req, buflen); 1406 return; 1407 } 1408 1409 /* This also clears the sense buffer for REQUEST SENSE. */ 1410 scsi_req_complete(&r->req, GOOD); 1411 } 1412 1413 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1414 uint8_t *inbuf, int inlen) 1415 { 1416 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1417 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1418 uint8_t *p; 1419 int len, expected_len, changeable_len, i; 1420 1421 /* The input buffer does not include the page header, so it is 1422 * off by 2 bytes. 1423 */ 1424 expected_len = inlen + 2; 1425 if (expected_len > SCSI_MAX_MODE_LEN) { 1426 return -1; 1427 } 1428 1429 p = mode_current; 1430 memset(mode_current, 0, inlen + 2); 1431 len = mode_sense_page(s, page, &p, 0); 1432 if (len < 0 || len != expected_len) { 1433 return -1; 1434 } 1435 1436 p = mode_changeable; 1437 memset(mode_changeable, 0, inlen + 2); 1438 changeable_len = mode_sense_page(s, page, &p, 1); 1439 assert(changeable_len == len); 1440 1441 /* Check that unchangeable bits are the same as what MODE SENSE 1442 * would return. 1443 */ 1444 for (i = 2; i < len; i++) { 1445 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1446 return -1; 1447 } 1448 } 1449 return 0; 1450 } 1451 1452 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1453 { 1454 switch (page) { 1455 case MODE_PAGE_CACHING: 1456 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1457 break; 1458 1459 default: 1460 break; 1461 } 1462 } 1463 1464 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1465 { 1466 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1467 1468 while (len > 0) { 1469 int page, subpage, page_len; 1470 1471 /* Parse both possible formats for the mode page headers. */ 1472 page = p[0] & 0x3f; 1473 if (p[0] & 0x40) { 1474 if (len < 4) { 1475 goto invalid_param_len; 1476 } 1477 subpage = p[1]; 1478 page_len = lduw_be_p(&p[2]); 1479 p += 4; 1480 len -= 4; 1481 } else { 1482 if (len < 2) { 1483 goto invalid_param_len; 1484 } 1485 subpage = 0; 1486 page_len = p[1]; 1487 p += 2; 1488 len -= 2; 1489 } 1490 1491 if (subpage) { 1492 goto invalid_param; 1493 } 1494 if (page_len > len) { 1495 goto invalid_param_len; 1496 } 1497 1498 if (!change) { 1499 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1500 goto invalid_param; 1501 } 1502 } else { 1503 scsi_disk_apply_mode_select(s, page, p); 1504 } 1505 1506 p += page_len; 1507 len -= page_len; 1508 } 1509 return 0; 1510 1511 invalid_param: 1512 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1513 return -1; 1514 1515 invalid_param_len: 1516 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1517 return -1; 1518 } 1519 1520 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1521 { 1522 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1523 uint8_t *p = inbuf; 1524 int cmd = r->req.cmd.buf[0]; 1525 int len = r->req.cmd.xfer; 1526 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1527 int bd_len; 1528 int pass; 1529 1530 /* We only support PF=1, SP=0. */ 1531 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1532 goto invalid_field; 1533 } 1534 1535 if (len < hdr_len) { 1536 goto invalid_param_len; 1537 } 1538 1539 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1540 len -= hdr_len; 1541 p += hdr_len; 1542 if (len < bd_len) { 1543 goto invalid_param_len; 1544 } 1545 if (bd_len != 0 && bd_len != 8) { 1546 goto invalid_param; 1547 } 1548 1549 len -= bd_len; 1550 p += bd_len; 1551 1552 /* Ensure no change is made if there is an error! */ 1553 for (pass = 0; pass < 2; pass++) { 1554 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1555 assert(pass == 0); 1556 return; 1557 } 1558 } 1559 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1560 /* The request is used as the AIO opaque value, so add a ref. */ 1561 scsi_req_ref(&r->req); 1562 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1563 BLOCK_ACCT_FLUSH); 1564 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1565 return; 1566 } 1567 1568 scsi_req_complete(&r->req, GOOD); 1569 return; 1570 1571 invalid_param: 1572 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1573 return; 1574 1575 invalid_param_len: 1576 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1577 return; 1578 1579 invalid_field: 1580 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1581 } 1582 1583 static inline bool check_lba_range(SCSIDiskState *s, 1584 uint64_t sector_num, uint32_t nb_sectors) 1585 { 1586 /* 1587 * The first line tests that no overflow happens when computing the last 1588 * sector. The second line tests that the last accessed sector is in 1589 * range. 1590 * 1591 * Careful, the computations should not underflow for nb_sectors == 0, 1592 * and a 0-block read to the first LBA beyond the end of device is 1593 * valid. 1594 */ 1595 return (sector_num <= sector_num + nb_sectors && 1596 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1597 } 1598 1599 typedef struct UnmapCBData { 1600 SCSIDiskReq *r; 1601 uint8_t *inbuf; 1602 int count; 1603 } UnmapCBData; 1604 1605 static void scsi_unmap_complete(void *opaque, int ret); 1606 1607 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1608 { 1609 SCSIDiskReq *r = data->r; 1610 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1611 1612 assert(r->req.aiocb == NULL); 1613 1614 if (data->count > 0) { 1615 r->sector = ldq_be_p(&data->inbuf[0]) 1616 * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1617 r->sector_count = (ldl_be_p(&data->inbuf[8]) & 0xffffffffULL) 1618 * (s->qdev.blocksize / BDRV_SECTOR_SIZE); 1619 if (!check_lba_range(s, r->sector, r->sector_count)) { 1620 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), 1621 BLOCK_ACCT_UNMAP); 1622 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1623 goto done; 1624 } 1625 1626 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1627 r->sector_count * BDRV_SECTOR_SIZE, 1628 BLOCK_ACCT_UNMAP); 1629 1630 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1631 r->sector * BDRV_SECTOR_SIZE, 1632 r->sector_count * BDRV_SECTOR_SIZE, 1633 scsi_unmap_complete, data); 1634 data->count--; 1635 data->inbuf += 16; 1636 return; 1637 } 1638 1639 scsi_req_complete(&r->req, GOOD); 1640 1641 done: 1642 scsi_req_unref(&r->req); 1643 g_free(data); 1644 } 1645 1646 static void scsi_unmap_complete(void *opaque, int ret) 1647 { 1648 UnmapCBData *data = opaque; 1649 SCSIDiskReq *r = data->r; 1650 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1651 1652 assert(r->req.aiocb != NULL); 1653 r->req.aiocb = NULL; 1654 1655 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1656 if (scsi_disk_req_check_error(r, ret, true)) { 1657 scsi_req_unref(&r->req); 1658 g_free(data); 1659 } else { 1660 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1661 scsi_unmap_complete_noio(data, ret); 1662 } 1663 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1664 } 1665 1666 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1667 { 1668 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1669 uint8_t *p = inbuf; 1670 int len = r->req.cmd.xfer; 1671 UnmapCBData *data; 1672 1673 /* Reject ANCHOR=1. */ 1674 if (r->req.cmd.buf[1] & 0x1) { 1675 goto invalid_field; 1676 } 1677 1678 if (len < 8) { 1679 goto invalid_param_len; 1680 } 1681 if (len < lduw_be_p(&p[0]) + 2) { 1682 goto invalid_param_len; 1683 } 1684 if (len < lduw_be_p(&p[2]) + 8) { 1685 goto invalid_param_len; 1686 } 1687 if (lduw_be_p(&p[2]) & 15) { 1688 goto invalid_param_len; 1689 } 1690 1691 if (blk_is_read_only(s->qdev.conf.blk)) { 1692 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1693 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1694 return; 1695 } 1696 1697 data = g_new0(UnmapCBData, 1); 1698 data->r = r; 1699 data->inbuf = &p[8]; 1700 data->count = lduw_be_p(&p[2]) >> 4; 1701 1702 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1703 scsi_req_ref(&r->req); 1704 scsi_unmap_complete_noio(data, 0); 1705 return; 1706 1707 invalid_param_len: 1708 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1709 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1710 return; 1711 1712 invalid_field: 1713 block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP); 1714 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1715 } 1716 1717 typedef struct WriteSameCBData { 1718 SCSIDiskReq *r; 1719 int64_t sector; 1720 int nb_sectors; 1721 QEMUIOVector qiov; 1722 struct iovec iov; 1723 } WriteSameCBData; 1724 1725 static void scsi_write_same_complete(void *opaque, int ret) 1726 { 1727 WriteSameCBData *data = opaque; 1728 SCSIDiskReq *r = data->r; 1729 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1730 1731 assert(r->req.aiocb != NULL); 1732 r->req.aiocb = NULL; 1733 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1734 if (scsi_disk_req_check_error(r, ret, true)) { 1735 goto done; 1736 } 1737 1738 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1739 1740 data->nb_sectors -= data->iov.iov_len / 512; 1741 data->sector += data->iov.iov_len / 512; 1742 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1743 if (data->iov.iov_len) { 1744 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1745 data->iov.iov_len, BLOCK_ACCT_WRITE); 1746 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1747 * where final qiov may need smaller size */ 1748 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1749 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1750 data->sector << BDRV_SECTOR_BITS, 1751 &data->qiov, 0, 1752 scsi_write_same_complete, data); 1753 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1754 return; 1755 } 1756 1757 scsi_req_complete(&r->req, GOOD); 1758 1759 done: 1760 scsi_req_unref(&r->req); 1761 qemu_vfree(data->iov.iov_base); 1762 g_free(data); 1763 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1764 } 1765 1766 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1767 { 1768 SCSIRequest *req = &r->req; 1769 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1770 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1771 WriteSameCBData *data; 1772 uint8_t *buf; 1773 int i; 1774 1775 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1776 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1777 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1778 return; 1779 } 1780 1781 if (blk_is_read_only(s->qdev.conf.blk)) { 1782 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1783 return; 1784 } 1785 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1786 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1787 return; 1788 } 1789 1790 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1791 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1792 1793 /* The request is used as the AIO opaque value, so add a ref. */ 1794 scsi_req_ref(&r->req); 1795 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1796 nb_sectors * s->qdev.blocksize, 1797 BLOCK_ACCT_WRITE); 1798 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1799 r->req.cmd.lba * s->qdev.blocksize, 1800 nb_sectors * s->qdev.blocksize, 1801 flags, scsi_aio_complete, r); 1802 return; 1803 } 1804 1805 data = g_new0(WriteSameCBData, 1); 1806 data->r = r; 1807 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1808 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1809 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1810 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1811 data->iov.iov_len); 1812 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1813 1814 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1815 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1816 } 1817 1818 scsi_req_ref(&r->req); 1819 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1820 data->iov.iov_len, BLOCK_ACCT_WRITE); 1821 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1822 data->sector << BDRV_SECTOR_BITS, 1823 &data->qiov, 0, 1824 scsi_write_same_complete, data); 1825 } 1826 1827 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1828 { 1829 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1830 1831 if (r->iov.iov_len) { 1832 int buflen = r->iov.iov_len; 1833 trace_scsi_disk_emulate_write_data(buflen); 1834 r->iov.iov_len = 0; 1835 scsi_req_data(&r->req, buflen); 1836 return; 1837 } 1838 1839 switch (req->cmd.buf[0]) { 1840 case MODE_SELECT: 1841 case MODE_SELECT_10: 1842 /* This also clears the sense buffer for REQUEST SENSE. */ 1843 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1844 break; 1845 1846 case UNMAP: 1847 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1848 break; 1849 1850 case VERIFY_10: 1851 case VERIFY_12: 1852 case VERIFY_16: 1853 if (r->req.status == -1) { 1854 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1855 } 1856 break; 1857 1858 case WRITE_SAME_10: 1859 case WRITE_SAME_16: 1860 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1861 break; 1862 1863 default: 1864 abort(); 1865 } 1866 } 1867 1868 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1869 { 1870 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1871 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1872 uint64_t nb_sectors; 1873 uint8_t *outbuf; 1874 int buflen; 1875 1876 switch (req->cmd.buf[0]) { 1877 case INQUIRY: 1878 case MODE_SENSE: 1879 case MODE_SENSE_10: 1880 case RESERVE: 1881 case RESERVE_10: 1882 case RELEASE: 1883 case RELEASE_10: 1884 case START_STOP: 1885 case ALLOW_MEDIUM_REMOVAL: 1886 case GET_CONFIGURATION: 1887 case GET_EVENT_STATUS_NOTIFICATION: 1888 case MECHANISM_STATUS: 1889 case REQUEST_SENSE: 1890 break; 1891 1892 default: 1893 if (!blk_is_available(s->qdev.conf.blk)) { 1894 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1895 return 0; 1896 } 1897 break; 1898 } 1899 1900 /* 1901 * FIXME: we shouldn't return anything bigger than 4k, but the code 1902 * requires the buffer to be as big as req->cmd.xfer in several 1903 * places. So, do not allow CDBs with a very large ALLOCATION 1904 * LENGTH. The real fix would be to modify scsi_read_data and 1905 * dma_buf_read, so that they return data beyond the buflen 1906 * as all zeros. 1907 */ 1908 if (req->cmd.xfer > 65536) { 1909 goto illegal_request; 1910 } 1911 r->buflen = MAX(4096, req->cmd.xfer); 1912 1913 if (!r->iov.iov_base) { 1914 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1915 } 1916 1917 buflen = req->cmd.xfer; 1918 outbuf = r->iov.iov_base; 1919 memset(outbuf, 0, r->buflen); 1920 switch (req->cmd.buf[0]) { 1921 case TEST_UNIT_READY: 1922 assert(blk_is_available(s->qdev.conf.blk)); 1923 break; 1924 case INQUIRY: 1925 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1926 if (buflen < 0) { 1927 goto illegal_request; 1928 } 1929 break; 1930 case MODE_SENSE: 1931 case MODE_SENSE_10: 1932 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1933 if (buflen < 0) { 1934 goto illegal_request; 1935 } 1936 break; 1937 case READ_TOC: 1938 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1939 if (buflen < 0) { 1940 goto illegal_request; 1941 } 1942 break; 1943 case RESERVE: 1944 if (req->cmd.buf[1] & 1) { 1945 goto illegal_request; 1946 } 1947 break; 1948 case RESERVE_10: 1949 if (req->cmd.buf[1] & 3) { 1950 goto illegal_request; 1951 } 1952 break; 1953 case RELEASE: 1954 if (req->cmd.buf[1] & 1) { 1955 goto illegal_request; 1956 } 1957 break; 1958 case RELEASE_10: 1959 if (req->cmd.buf[1] & 3) { 1960 goto illegal_request; 1961 } 1962 break; 1963 case START_STOP: 1964 if (scsi_disk_emulate_start_stop(r) < 0) { 1965 return 0; 1966 } 1967 break; 1968 case ALLOW_MEDIUM_REMOVAL: 1969 s->tray_locked = req->cmd.buf[4] & 1; 1970 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1971 break; 1972 case READ_CAPACITY_10: 1973 /* The normal LEN field for this command is zero. */ 1974 memset(outbuf, 0, 8); 1975 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1976 if (!nb_sectors) { 1977 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1978 return 0; 1979 } 1980 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1981 goto illegal_request; 1982 } 1983 nb_sectors /= s->qdev.blocksize / 512; 1984 /* Returned value is the address of the last sector. */ 1985 nb_sectors--; 1986 /* Remember the new size for read/write sanity checking. */ 1987 s->qdev.max_lba = nb_sectors; 1988 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 1989 if (nb_sectors > UINT32_MAX) { 1990 nb_sectors = UINT32_MAX; 1991 } 1992 outbuf[0] = (nb_sectors >> 24) & 0xff; 1993 outbuf[1] = (nb_sectors >> 16) & 0xff; 1994 outbuf[2] = (nb_sectors >> 8) & 0xff; 1995 outbuf[3] = nb_sectors & 0xff; 1996 outbuf[4] = 0; 1997 outbuf[5] = 0; 1998 outbuf[6] = s->qdev.blocksize >> 8; 1999 outbuf[7] = 0; 2000 break; 2001 case REQUEST_SENSE: 2002 /* Just return "NO SENSE". */ 2003 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 2004 (req->cmd.buf[1] & 1) == 0); 2005 if (buflen < 0) { 2006 goto illegal_request; 2007 } 2008 break; 2009 case MECHANISM_STATUS: 2010 buflen = scsi_emulate_mechanism_status(s, outbuf); 2011 if (buflen < 0) { 2012 goto illegal_request; 2013 } 2014 break; 2015 case GET_CONFIGURATION: 2016 buflen = scsi_get_configuration(s, outbuf); 2017 if (buflen < 0) { 2018 goto illegal_request; 2019 } 2020 break; 2021 case GET_EVENT_STATUS_NOTIFICATION: 2022 buflen = scsi_get_event_status_notification(s, r, outbuf); 2023 if (buflen < 0) { 2024 goto illegal_request; 2025 } 2026 break; 2027 case READ_DISC_INFORMATION: 2028 buflen = scsi_read_disc_information(s, r, outbuf); 2029 if (buflen < 0) { 2030 goto illegal_request; 2031 } 2032 break; 2033 case READ_DVD_STRUCTURE: 2034 buflen = scsi_read_dvd_structure(s, r, outbuf); 2035 if (buflen < 0) { 2036 goto illegal_request; 2037 } 2038 break; 2039 case SERVICE_ACTION_IN_16: 2040 /* Service Action In subcommands. */ 2041 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2042 trace_scsi_disk_emulate_command_SAI_16(); 2043 memset(outbuf, 0, req->cmd.xfer); 2044 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2045 if (!nb_sectors) { 2046 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2047 return 0; 2048 } 2049 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2050 goto illegal_request; 2051 } 2052 nb_sectors /= s->qdev.blocksize / 512; 2053 /* Returned value is the address of the last sector. */ 2054 nb_sectors--; 2055 /* Remember the new size for read/write sanity checking. */ 2056 s->qdev.max_lba = nb_sectors; 2057 outbuf[0] = (nb_sectors >> 56) & 0xff; 2058 outbuf[1] = (nb_sectors >> 48) & 0xff; 2059 outbuf[2] = (nb_sectors >> 40) & 0xff; 2060 outbuf[3] = (nb_sectors >> 32) & 0xff; 2061 outbuf[4] = (nb_sectors >> 24) & 0xff; 2062 outbuf[5] = (nb_sectors >> 16) & 0xff; 2063 outbuf[6] = (nb_sectors >> 8) & 0xff; 2064 outbuf[7] = nb_sectors & 0xff; 2065 outbuf[8] = 0; 2066 outbuf[9] = 0; 2067 outbuf[10] = s->qdev.blocksize >> 8; 2068 outbuf[11] = 0; 2069 outbuf[12] = 0; 2070 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2071 2072 /* set TPE bit if the format supports discard */ 2073 if (s->qdev.conf.discard_granularity) { 2074 outbuf[14] = 0x80; 2075 } 2076 2077 /* Protection, exponent and lowest lba field left blank. */ 2078 break; 2079 } 2080 trace_scsi_disk_emulate_command_SAI_unsupported(); 2081 goto illegal_request; 2082 case SYNCHRONIZE_CACHE: 2083 /* The request is used as the AIO opaque value, so add a ref. */ 2084 scsi_req_ref(&r->req); 2085 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2086 BLOCK_ACCT_FLUSH); 2087 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2088 return 0; 2089 case SEEK_10: 2090 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); 2091 if (r->req.cmd.lba > s->qdev.max_lba) { 2092 goto illegal_lba; 2093 } 2094 break; 2095 case MODE_SELECT: 2096 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); 2097 break; 2098 case MODE_SELECT_10: 2099 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 2100 break; 2101 case UNMAP: 2102 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); 2103 break; 2104 case VERIFY_10: 2105 case VERIFY_12: 2106 case VERIFY_16: 2107 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 2108 if (req->cmd.buf[1] & 6) { 2109 goto illegal_request; 2110 } 2111 break; 2112 case WRITE_SAME_10: 2113 case WRITE_SAME_16: 2114 trace_scsi_disk_emulate_command_WRITE_SAME( 2115 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); 2116 break; 2117 default: 2118 trace_scsi_disk_emulate_command_UNKNOWN(buf[0], 2119 scsi_command_name(buf[0])); 2120 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2121 return 0; 2122 } 2123 assert(!r->req.aiocb); 2124 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2125 if (r->iov.iov_len == 0) { 2126 scsi_req_complete(&r->req, GOOD); 2127 } 2128 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2129 assert(r->iov.iov_len == req->cmd.xfer); 2130 return -r->iov.iov_len; 2131 } else { 2132 return r->iov.iov_len; 2133 } 2134 2135 illegal_request: 2136 if (r->req.status == -1) { 2137 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2138 } 2139 return 0; 2140 2141 illegal_lba: 2142 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2143 return 0; 2144 } 2145 2146 /* Execute a scsi command. Returns the length of the data expected by the 2147 command. This will be Positive for data transfers from the device 2148 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2149 and zero if the command does not transfer any data. */ 2150 2151 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2152 { 2153 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2154 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2155 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2156 uint32_t len; 2157 uint8_t command; 2158 2159 command = buf[0]; 2160 2161 if (!blk_is_available(s->qdev.conf.blk)) { 2162 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2163 return 0; 2164 } 2165 2166 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2167 switch (command) { 2168 case READ_6: 2169 case READ_10: 2170 case READ_12: 2171 case READ_16: 2172 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); 2173 /* Protection information is not supported. For SCSI versions 2 and 2174 * older (as determined by snooping the guest's INQUIRY commands), 2175 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2176 */ 2177 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2178 goto illegal_request; 2179 } 2180 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2181 goto illegal_lba; 2182 } 2183 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2184 r->sector_count = len * (s->qdev.blocksize / 512); 2185 break; 2186 case WRITE_6: 2187 case WRITE_10: 2188 case WRITE_12: 2189 case WRITE_16: 2190 case WRITE_VERIFY_10: 2191 case WRITE_VERIFY_12: 2192 case WRITE_VERIFY_16: 2193 if (blk_is_read_only(s->qdev.conf.blk)) { 2194 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2195 return 0; 2196 } 2197 trace_scsi_disk_dma_command_WRITE( 2198 (command & 0xe) == 0xe ? "And Verify " : "", 2199 r->req.cmd.lba, len); 2200 /* fall through */ 2201 case VERIFY_10: 2202 case VERIFY_12: 2203 case VERIFY_16: 2204 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2205 * As far as DMA is concerned, we can treat it the same as a write; 2206 * scsi_block_do_sgio will send VERIFY commands. 2207 */ 2208 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2209 goto illegal_request; 2210 } 2211 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2212 goto illegal_lba; 2213 } 2214 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2215 r->sector_count = len * (s->qdev.blocksize / 512); 2216 break; 2217 default: 2218 abort(); 2219 illegal_request: 2220 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2221 return 0; 2222 illegal_lba: 2223 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2224 return 0; 2225 } 2226 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2227 if (r->sector_count == 0) { 2228 scsi_req_complete(&r->req, GOOD); 2229 } 2230 assert(r->iov.iov_len == 0); 2231 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2232 return -r->sector_count * 512; 2233 } else { 2234 return r->sector_count * 512; 2235 } 2236 } 2237 2238 static void scsi_disk_reset(DeviceState *dev) 2239 { 2240 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2241 uint64_t nb_sectors; 2242 2243 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2244 2245 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2246 nb_sectors /= s->qdev.blocksize / 512; 2247 if (nb_sectors) { 2248 nb_sectors--; 2249 } 2250 s->qdev.max_lba = nb_sectors; 2251 /* reset tray statuses */ 2252 s->tray_locked = 0; 2253 s->tray_open = 0; 2254 2255 s->qdev.scsi_version = s->qdev.default_scsi_version; 2256 } 2257 2258 static void scsi_disk_resize_cb(void *opaque) 2259 { 2260 SCSIDiskState *s = opaque; 2261 2262 /* SPC lists this sense code as available only for 2263 * direct-access devices. 2264 */ 2265 if (s->qdev.type == TYPE_DISK) { 2266 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2267 } 2268 } 2269 2270 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2271 { 2272 SCSIDiskState *s = opaque; 2273 2274 /* 2275 * When a CD gets changed, we have to report an ejected state and 2276 * then a loaded state to guests so that they detect tray 2277 * open/close and media change events. Guests that do not use 2278 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2279 * states rely on this behavior. 2280 * 2281 * media_changed governs the state machine used for unit attention 2282 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2283 */ 2284 s->media_changed = load; 2285 s->tray_open = !load; 2286 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2287 s->media_event = true; 2288 s->eject_request = false; 2289 } 2290 2291 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2292 { 2293 SCSIDiskState *s = opaque; 2294 2295 s->eject_request = true; 2296 if (force) { 2297 s->tray_locked = false; 2298 } 2299 } 2300 2301 static bool scsi_cd_is_tray_open(void *opaque) 2302 { 2303 return ((SCSIDiskState *)opaque)->tray_open; 2304 } 2305 2306 static bool scsi_cd_is_medium_locked(void *opaque) 2307 { 2308 return ((SCSIDiskState *)opaque)->tray_locked; 2309 } 2310 2311 static const BlockDevOps scsi_disk_removable_block_ops = { 2312 .change_media_cb = scsi_cd_change_media_cb, 2313 .eject_request_cb = scsi_cd_eject_request_cb, 2314 .is_tray_open = scsi_cd_is_tray_open, 2315 .is_medium_locked = scsi_cd_is_medium_locked, 2316 2317 .resize_cb = scsi_disk_resize_cb, 2318 }; 2319 2320 static const BlockDevOps scsi_disk_block_ops = { 2321 .resize_cb = scsi_disk_resize_cb, 2322 }; 2323 2324 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2325 { 2326 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2327 if (s->media_changed) { 2328 s->media_changed = false; 2329 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2330 } 2331 } 2332 2333 static void scsi_realize(SCSIDevice *dev, Error **errp) 2334 { 2335 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2336 bool read_only; 2337 2338 if (!s->qdev.conf.blk) { 2339 error_setg(errp, "drive property not set"); 2340 return; 2341 } 2342 2343 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2344 !blk_is_inserted(s->qdev.conf.blk)) { 2345 error_setg(errp, "Device needs media, but drive is empty"); 2346 return; 2347 } 2348 2349 blkconf_blocksizes(&s->qdev.conf); 2350 2351 if (s->qdev.conf.logical_block_size > 2352 s->qdev.conf.physical_block_size) { 2353 error_setg(errp, 2354 "logical_block_size > physical_block_size not supported"); 2355 return; 2356 } 2357 2358 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && 2359 !s->qdev.hba_supports_iothread) 2360 { 2361 error_setg(errp, "HBA does not support iothreads"); 2362 return; 2363 } 2364 2365 if (dev->type == TYPE_DISK) { 2366 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2367 return; 2368 } 2369 } 2370 2371 read_only = blk_is_read_only(s->qdev.conf.blk); 2372 if (dev->type == TYPE_ROM) { 2373 read_only = true; 2374 } 2375 2376 if (!blkconf_apply_backend_options(&dev->conf, read_only, 2377 dev->type == TYPE_DISK, errp)) { 2378 return; 2379 } 2380 2381 if (s->qdev.conf.discard_granularity == -1) { 2382 s->qdev.conf.discard_granularity = 2383 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2384 } 2385 2386 if (!s->version) { 2387 s->version = g_strdup(qemu_hw_version()); 2388 } 2389 if (!s->vendor) { 2390 s->vendor = g_strdup("QEMU"); 2391 } 2392 if (!s->device_id) { 2393 if (s->serial) { 2394 s->device_id = g_strdup_printf("%.20s", s->serial); 2395 } else { 2396 const char *str = blk_name(s->qdev.conf.blk); 2397 if (str && *str) { 2398 s->device_id = g_strdup(str); 2399 } 2400 } 2401 } 2402 2403 if (blk_is_sg(s->qdev.conf.blk)) { 2404 error_setg(errp, "unwanted /dev/sg*"); 2405 return; 2406 } 2407 2408 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2409 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2410 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2411 } else { 2412 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2413 } 2414 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2415 2416 blk_iostatus_enable(s->qdev.conf.blk); 2417 } 2418 2419 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2420 { 2421 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2422 AioContext *ctx = NULL; 2423 /* can happen for devices without drive. The error message for missing 2424 * backend will be issued in scsi_realize 2425 */ 2426 if (s->qdev.conf.blk) { 2427 ctx = blk_get_aio_context(s->qdev.conf.blk); 2428 aio_context_acquire(ctx); 2429 blkconf_blocksizes(&s->qdev.conf); 2430 } 2431 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2432 s->qdev.type = TYPE_DISK; 2433 if (!s->product) { 2434 s->product = g_strdup("QEMU HARDDISK"); 2435 } 2436 scsi_realize(&s->qdev, errp); 2437 if (ctx) { 2438 aio_context_release(ctx); 2439 } 2440 } 2441 2442 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2443 { 2444 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2445 AioContext *ctx; 2446 int ret; 2447 2448 if (!dev->conf.blk) { 2449 /* Anonymous BlockBackend for an empty drive. As we put it into 2450 * dev->conf, qdev takes care of detaching on unplug. */ 2451 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 2452 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2453 assert(ret == 0); 2454 } 2455 2456 ctx = blk_get_aio_context(dev->conf.blk); 2457 aio_context_acquire(ctx); 2458 s->qdev.blocksize = 2048; 2459 s->qdev.type = TYPE_ROM; 2460 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2461 if (!s->product) { 2462 s->product = g_strdup("QEMU CD-ROM"); 2463 } 2464 scsi_realize(&s->qdev, errp); 2465 aio_context_release(ctx); 2466 } 2467 2468 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2469 { 2470 DriveInfo *dinfo; 2471 Error *local_err = NULL; 2472 2473 if (!dev->conf.blk) { 2474 scsi_realize(dev, &local_err); 2475 assert(local_err); 2476 error_propagate(errp, local_err); 2477 return; 2478 } 2479 2480 dinfo = blk_legacy_dinfo(dev->conf.blk); 2481 if (dinfo && dinfo->media_cd) { 2482 scsi_cd_realize(dev, errp); 2483 } else { 2484 scsi_hd_realize(dev, errp); 2485 } 2486 } 2487 2488 static const SCSIReqOps scsi_disk_emulate_reqops = { 2489 .size = sizeof(SCSIDiskReq), 2490 .free_req = scsi_free_request, 2491 .send_command = scsi_disk_emulate_command, 2492 .read_data = scsi_disk_emulate_read_data, 2493 .write_data = scsi_disk_emulate_write_data, 2494 .get_buf = scsi_get_buf, 2495 }; 2496 2497 static const SCSIReqOps scsi_disk_dma_reqops = { 2498 .size = sizeof(SCSIDiskReq), 2499 .free_req = scsi_free_request, 2500 .send_command = scsi_disk_dma_command, 2501 .read_data = scsi_read_data, 2502 .write_data = scsi_write_data, 2503 .get_buf = scsi_get_buf, 2504 .load_request = scsi_disk_load_request, 2505 .save_request = scsi_disk_save_request, 2506 }; 2507 2508 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2509 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2510 [INQUIRY] = &scsi_disk_emulate_reqops, 2511 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2512 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2513 [START_STOP] = &scsi_disk_emulate_reqops, 2514 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2515 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2516 [READ_TOC] = &scsi_disk_emulate_reqops, 2517 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2518 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2519 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2520 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2521 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2522 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2523 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2524 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2525 [SEEK_10] = &scsi_disk_emulate_reqops, 2526 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2527 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2528 [UNMAP] = &scsi_disk_emulate_reqops, 2529 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2530 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2531 [VERIFY_10] = &scsi_disk_emulate_reqops, 2532 [VERIFY_12] = &scsi_disk_emulate_reqops, 2533 [VERIFY_16] = &scsi_disk_emulate_reqops, 2534 2535 [READ_6] = &scsi_disk_dma_reqops, 2536 [READ_10] = &scsi_disk_dma_reqops, 2537 [READ_12] = &scsi_disk_dma_reqops, 2538 [READ_16] = &scsi_disk_dma_reqops, 2539 [WRITE_6] = &scsi_disk_dma_reqops, 2540 [WRITE_10] = &scsi_disk_dma_reqops, 2541 [WRITE_12] = &scsi_disk_dma_reqops, 2542 [WRITE_16] = &scsi_disk_dma_reqops, 2543 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2544 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2545 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2546 }; 2547 2548 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) 2549 { 2550 int i; 2551 int len = scsi_cdb_length(buf); 2552 char *line_buffer, *p; 2553 2554 line_buffer = g_malloc(len * 5 + 1); 2555 2556 for (i = 0, p = line_buffer; i < len; i++) { 2557 p += sprintf(p, " 0x%02x", buf[i]); 2558 } 2559 trace_scsi_disk_new_request(lun, tag, line_buffer); 2560 2561 g_free(line_buffer); 2562 } 2563 2564 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2565 uint8_t *buf, void *hba_private) 2566 { 2567 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2568 SCSIRequest *req; 2569 const SCSIReqOps *ops; 2570 uint8_t command; 2571 2572 command = buf[0]; 2573 ops = scsi_disk_reqops_dispatch[command]; 2574 if (!ops) { 2575 ops = &scsi_disk_emulate_reqops; 2576 } 2577 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2578 2579 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { 2580 scsi_disk_new_request_dump(lun, tag, buf); 2581 } 2582 2583 return req; 2584 } 2585 2586 #ifdef __linux__ 2587 static int get_device_type(SCSIDiskState *s) 2588 { 2589 uint8_t cmd[16]; 2590 uint8_t buf[36]; 2591 int ret; 2592 2593 memset(cmd, 0, sizeof(cmd)); 2594 memset(buf, 0, sizeof(buf)); 2595 cmd[0] = INQUIRY; 2596 cmd[4] = sizeof(buf); 2597 2598 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2599 buf, sizeof(buf)); 2600 if (ret < 0) { 2601 return -1; 2602 } 2603 s->qdev.type = buf[0]; 2604 if (buf[1] & 0x80) { 2605 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2606 } 2607 return 0; 2608 } 2609 2610 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2611 { 2612 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2613 AioContext *ctx; 2614 int sg_version; 2615 int rc; 2616 2617 if (!s->qdev.conf.blk) { 2618 error_setg(errp, "drive property not set"); 2619 return; 2620 } 2621 2622 if (s->rotation_rate) { 2623 error_report_once("rotation_rate is specified for scsi-block but is " 2624 "not implemented. This option is deprecated and will " 2625 "be removed in a future version"); 2626 } 2627 2628 ctx = blk_get_aio_context(s->qdev.conf.blk); 2629 aio_context_acquire(ctx); 2630 2631 /* check we are using a driver managing SG_IO (version 3 and after) */ 2632 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2633 if (rc < 0) { 2634 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2635 if (rc != -EPERM) { 2636 error_append_hint(errp, "Is this a SCSI device?\n"); 2637 } 2638 goto out; 2639 } 2640 if (sg_version < 30000) { 2641 error_setg(errp, "scsi generic interface too old"); 2642 goto out; 2643 } 2644 2645 /* get device type from INQUIRY data */ 2646 rc = get_device_type(s); 2647 if (rc < 0) { 2648 error_setg(errp, "INQUIRY failed"); 2649 goto out; 2650 } 2651 2652 /* Make a guess for the block size, we'll fix it when the guest sends. 2653 * READ CAPACITY. If they don't, they likely would assume these sizes 2654 * anyway. (TODO: check in /sys). 2655 */ 2656 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2657 s->qdev.blocksize = 2048; 2658 } else { 2659 s->qdev.blocksize = 512; 2660 } 2661 2662 /* Makes the scsi-block device not removable by using HMP and QMP eject 2663 * command. 2664 */ 2665 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2666 2667 scsi_realize(&s->qdev, errp); 2668 scsi_generic_read_device_inquiry(&s->qdev); 2669 2670 out: 2671 aio_context_release(ctx); 2672 } 2673 2674 typedef struct SCSIBlockReq { 2675 SCSIDiskReq req; 2676 sg_io_hdr_t io_header; 2677 2678 /* Selected bytes of the original CDB, copied into our own CDB. */ 2679 uint8_t cmd, cdb1, group_number; 2680 2681 /* CDB passed to SG_IO. */ 2682 uint8_t cdb[16]; 2683 } SCSIBlockReq; 2684 2685 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2686 int64_t offset, QEMUIOVector *iov, 2687 int direction, 2688 BlockCompletionFunc *cb, void *opaque) 2689 { 2690 sg_io_hdr_t *io_header = &req->io_header; 2691 SCSIDiskReq *r = &req->req; 2692 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2693 int nb_logical_blocks; 2694 uint64_t lba; 2695 BlockAIOCB *aiocb; 2696 2697 /* This is not supported yet. It can only happen if the guest does 2698 * reads and writes that are not aligned to one logical sectors 2699 * _and_ cover multiple MemoryRegions. 2700 */ 2701 assert(offset % s->qdev.blocksize == 0); 2702 assert(iov->size % s->qdev.blocksize == 0); 2703 2704 io_header->interface_id = 'S'; 2705 2706 /* The data transfer comes from the QEMUIOVector. */ 2707 io_header->dxfer_direction = direction; 2708 io_header->dxfer_len = iov->size; 2709 io_header->dxferp = (void *)iov->iov; 2710 io_header->iovec_count = iov->niov; 2711 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2712 2713 /* Build a new CDB with the LBA and length patched in, in case 2714 * DMA helpers split the transfer in multiple segments. Do not 2715 * build a CDB smaller than what the guest wanted, and only build 2716 * a larger one if strictly necessary. 2717 */ 2718 io_header->cmdp = req->cdb; 2719 lba = offset / s->qdev.blocksize; 2720 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2721 2722 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2723 /* 6-byte CDB */ 2724 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2725 req->cdb[4] = nb_logical_blocks; 2726 req->cdb[5] = 0; 2727 io_header->cmd_len = 6; 2728 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2729 /* 10-byte CDB */ 2730 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2731 req->cdb[1] = req->cdb1; 2732 stl_be_p(&req->cdb[2], lba); 2733 req->cdb[6] = req->group_number; 2734 stw_be_p(&req->cdb[7], nb_logical_blocks); 2735 req->cdb[9] = 0; 2736 io_header->cmd_len = 10; 2737 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2738 /* 12-byte CDB */ 2739 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2740 req->cdb[1] = req->cdb1; 2741 stl_be_p(&req->cdb[2], lba); 2742 stl_be_p(&req->cdb[6], nb_logical_blocks); 2743 req->cdb[10] = req->group_number; 2744 req->cdb[11] = 0; 2745 io_header->cmd_len = 12; 2746 } else { 2747 /* 16-byte CDB */ 2748 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2749 req->cdb[1] = req->cdb1; 2750 stq_be_p(&req->cdb[2], lba); 2751 stl_be_p(&req->cdb[10], nb_logical_blocks); 2752 req->cdb[14] = req->group_number; 2753 req->cdb[15] = 0; 2754 io_header->cmd_len = 16; 2755 } 2756 2757 /* The rest is as in scsi-generic.c. */ 2758 io_header->mx_sb_len = sizeof(r->req.sense); 2759 io_header->sbp = r->req.sense; 2760 io_header->timeout = UINT_MAX; 2761 io_header->usr_ptr = r; 2762 io_header->flags |= SG_FLAG_DIRECT_IO; 2763 2764 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2765 assert(aiocb != NULL); 2766 return aiocb; 2767 } 2768 2769 static bool scsi_block_no_fua(SCSICommand *cmd) 2770 { 2771 return false; 2772 } 2773 2774 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2775 QEMUIOVector *iov, 2776 BlockCompletionFunc *cb, void *cb_opaque, 2777 void *opaque) 2778 { 2779 SCSIBlockReq *r = opaque; 2780 return scsi_block_do_sgio(r, offset, iov, 2781 SG_DXFER_FROM_DEV, cb, cb_opaque); 2782 } 2783 2784 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2785 QEMUIOVector *iov, 2786 BlockCompletionFunc *cb, void *cb_opaque, 2787 void *opaque) 2788 { 2789 SCSIBlockReq *r = opaque; 2790 return scsi_block_do_sgio(r, offset, iov, 2791 SG_DXFER_TO_DEV, cb, cb_opaque); 2792 } 2793 2794 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2795 { 2796 switch (buf[0]) { 2797 case VERIFY_10: 2798 case VERIFY_12: 2799 case VERIFY_16: 2800 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2801 * for the number of logical blocks specified in the length 2802 * field). For other modes, do not use scatter/gather operation. 2803 */ 2804 if ((buf[1] & 6) == 2) { 2805 return false; 2806 } 2807 break; 2808 2809 case READ_6: 2810 case READ_10: 2811 case READ_12: 2812 case READ_16: 2813 case WRITE_6: 2814 case WRITE_10: 2815 case WRITE_12: 2816 case WRITE_16: 2817 case WRITE_VERIFY_10: 2818 case WRITE_VERIFY_12: 2819 case WRITE_VERIFY_16: 2820 /* MMC writing cannot be done via DMA helpers, because it sometimes 2821 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2822 * We might use scsi_block_dma_reqops as long as no writing commands are 2823 * seen, but performance usually isn't paramount on optical media. So, 2824 * just make scsi-block operate the same as scsi-generic for them. 2825 */ 2826 if (s->qdev.type != TYPE_ROM) { 2827 return false; 2828 } 2829 break; 2830 2831 default: 2832 break; 2833 } 2834 2835 return true; 2836 } 2837 2838 2839 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2840 { 2841 SCSIBlockReq *r = (SCSIBlockReq *)req; 2842 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2843 2844 r->cmd = req->cmd.buf[0]; 2845 switch (r->cmd >> 5) { 2846 case 0: 2847 /* 6-byte CDB. */ 2848 r->cdb1 = r->group_number = 0; 2849 break; 2850 case 1: 2851 /* 10-byte CDB. */ 2852 r->cdb1 = req->cmd.buf[1]; 2853 r->group_number = req->cmd.buf[6]; 2854 break; 2855 case 4: 2856 /* 12-byte CDB. */ 2857 r->cdb1 = req->cmd.buf[1]; 2858 r->group_number = req->cmd.buf[10]; 2859 break; 2860 case 5: 2861 /* 16-byte CDB. */ 2862 r->cdb1 = req->cmd.buf[1]; 2863 r->group_number = req->cmd.buf[14]; 2864 break; 2865 default: 2866 abort(); 2867 } 2868 2869 /* Protection information is not supported. For SCSI versions 2 and 2870 * older (as determined by snooping the guest's INQUIRY commands), 2871 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2872 */ 2873 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2874 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2875 return 0; 2876 } 2877 2878 r->req.status = &r->io_header.status; 2879 return scsi_disk_dma_command(req, buf); 2880 } 2881 2882 static const SCSIReqOps scsi_block_dma_reqops = { 2883 .size = sizeof(SCSIBlockReq), 2884 .free_req = scsi_free_request, 2885 .send_command = scsi_block_dma_command, 2886 .read_data = scsi_read_data, 2887 .write_data = scsi_write_data, 2888 .get_buf = scsi_get_buf, 2889 .load_request = scsi_disk_load_request, 2890 .save_request = scsi_disk_save_request, 2891 }; 2892 2893 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2894 uint32_t lun, uint8_t *buf, 2895 void *hba_private) 2896 { 2897 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2898 2899 if (scsi_block_is_passthrough(s, buf)) { 2900 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2901 hba_private); 2902 } else { 2903 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2904 hba_private); 2905 } 2906 } 2907 2908 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2909 uint8_t *buf, void *hba_private) 2910 { 2911 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2912 2913 if (scsi_block_is_passthrough(s, buf)) { 2914 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2915 } else { 2916 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2917 } 2918 } 2919 2920 static void scsi_block_update_sense(SCSIRequest *req) 2921 { 2922 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2923 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); 2924 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); 2925 } 2926 #endif 2927 2928 static 2929 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2930 BlockCompletionFunc *cb, void *cb_opaque, 2931 void *opaque) 2932 { 2933 SCSIDiskReq *r = opaque; 2934 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2935 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2936 } 2937 2938 static 2939 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2940 BlockCompletionFunc *cb, void *cb_opaque, 2941 void *opaque) 2942 { 2943 SCSIDiskReq *r = opaque; 2944 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2945 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2946 } 2947 2948 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2949 { 2950 DeviceClass *dc = DEVICE_CLASS(klass); 2951 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2952 2953 dc->fw_name = "disk"; 2954 dc->reset = scsi_disk_reset; 2955 sdc->dma_readv = scsi_dma_readv; 2956 sdc->dma_writev = scsi_dma_writev; 2957 sdc->need_fua_emulation = scsi_is_cmd_fua; 2958 } 2959 2960 static const TypeInfo scsi_disk_base_info = { 2961 .name = TYPE_SCSI_DISK_BASE, 2962 .parent = TYPE_SCSI_DEVICE, 2963 .class_init = scsi_disk_base_class_initfn, 2964 .instance_size = sizeof(SCSIDiskState), 2965 .class_size = sizeof(SCSIDiskClass), 2966 .abstract = true, 2967 }; 2968 2969 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2970 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ 2971 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ 2972 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2973 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2974 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2975 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2976 DEFINE_PROP_STRING("product", SCSIDiskState, product), \ 2977 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) 2978 2979 2980 static Property scsi_hd_properties[] = { 2981 DEFINE_SCSI_DISK_PROPERTIES(), 2982 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2983 SCSI_DISK_F_REMOVABLE, false), 2984 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2985 SCSI_DISK_F_DPOFUA, false), 2986 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2987 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2988 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2989 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2990 DEFAULT_MAX_UNMAP_SIZE), 2991 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2992 DEFAULT_MAX_IO_SIZE), 2993 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2994 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2995 5), 2996 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2997 DEFINE_PROP_END_OF_LIST(), 2998 }; 2999 3000 static const VMStateDescription vmstate_scsi_disk_state = { 3001 .name = "scsi-disk", 3002 .version_id = 1, 3003 .minimum_version_id = 1, 3004 .fields = (VMStateField[]) { 3005 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 3006 VMSTATE_BOOL(media_changed, SCSIDiskState), 3007 VMSTATE_BOOL(media_event, SCSIDiskState), 3008 VMSTATE_BOOL(eject_request, SCSIDiskState), 3009 VMSTATE_BOOL(tray_open, SCSIDiskState), 3010 VMSTATE_BOOL(tray_locked, SCSIDiskState), 3011 VMSTATE_END_OF_LIST() 3012 } 3013 }; 3014 3015 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 3016 { 3017 DeviceClass *dc = DEVICE_CLASS(klass); 3018 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3019 3020 sc->realize = scsi_hd_realize; 3021 sc->alloc_req = scsi_new_request; 3022 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3023 dc->desc = "virtual SCSI disk"; 3024 dc->props = scsi_hd_properties; 3025 dc->vmsd = &vmstate_scsi_disk_state; 3026 } 3027 3028 static const TypeInfo scsi_hd_info = { 3029 .name = "scsi-hd", 3030 .parent = TYPE_SCSI_DISK_BASE, 3031 .class_init = scsi_hd_class_initfn, 3032 }; 3033 3034 static Property scsi_cd_properties[] = { 3035 DEFINE_SCSI_DISK_PROPERTIES(), 3036 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3037 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3038 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3039 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3040 DEFAULT_MAX_IO_SIZE), 3041 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3042 5), 3043 DEFINE_PROP_END_OF_LIST(), 3044 }; 3045 3046 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3047 { 3048 DeviceClass *dc = DEVICE_CLASS(klass); 3049 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3050 3051 sc->realize = scsi_cd_realize; 3052 sc->alloc_req = scsi_new_request; 3053 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3054 dc->desc = "virtual SCSI CD-ROM"; 3055 dc->props = scsi_cd_properties; 3056 dc->vmsd = &vmstate_scsi_disk_state; 3057 } 3058 3059 static const TypeInfo scsi_cd_info = { 3060 .name = "scsi-cd", 3061 .parent = TYPE_SCSI_DISK_BASE, 3062 .class_init = scsi_cd_class_initfn, 3063 }; 3064 3065 #ifdef __linux__ 3066 static Property scsi_block_properties[] = { 3067 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3068 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3069 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3070 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3071 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3072 DEFAULT_MAX_UNMAP_SIZE), 3073 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3074 DEFAULT_MAX_IO_SIZE), 3075 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3076 -1), 3077 DEFINE_PROP_END_OF_LIST(), 3078 }; 3079 3080 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3081 { 3082 DeviceClass *dc = DEVICE_CLASS(klass); 3083 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3084 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3085 3086 sc->realize = scsi_block_realize; 3087 sc->alloc_req = scsi_block_new_request; 3088 sc->parse_cdb = scsi_block_parse_cdb; 3089 sdc->dma_readv = scsi_block_dma_readv; 3090 sdc->dma_writev = scsi_block_dma_writev; 3091 sdc->update_sense = scsi_block_update_sense; 3092 sdc->need_fua_emulation = scsi_block_no_fua; 3093 dc->desc = "SCSI block device passthrough"; 3094 dc->props = scsi_block_properties; 3095 dc->vmsd = &vmstate_scsi_disk_state; 3096 } 3097 3098 static const TypeInfo scsi_block_info = { 3099 .name = "scsi-block", 3100 .parent = TYPE_SCSI_DISK_BASE, 3101 .class_init = scsi_block_class_initfn, 3102 }; 3103 #endif 3104 3105 static Property scsi_disk_properties[] = { 3106 DEFINE_SCSI_DISK_PROPERTIES(), 3107 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3108 SCSI_DISK_F_REMOVABLE, false), 3109 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3110 SCSI_DISK_F_DPOFUA, false), 3111 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3112 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3113 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3114 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3115 DEFAULT_MAX_UNMAP_SIZE), 3116 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3117 DEFAULT_MAX_IO_SIZE), 3118 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3119 5), 3120 DEFINE_PROP_END_OF_LIST(), 3121 }; 3122 3123 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3124 { 3125 DeviceClass *dc = DEVICE_CLASS(klass); 3126 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3127 3128 sc->realize = scsi_disk_realize; 3129 sc->alloc_req = scsi_new_request; 3130 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3131 dc->fw_name = "disk"; 3132 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3133 dc->reset = scsi_disk_reset; 3134 dc->props = scsi_disk_properties; 3135 dc->vmsd = &vmstate_scsi_disk_state; 3136 } 3137 3138 static const TypeInfo scsi_disk_info = { 3139 .name = "scsi-disk", 3140 .parent = TYPE_SCSI_DISK_BASE, 3141 .class_init = scsi_disk_class_initfn, 3142 }; 3143 3144 static void scsi_disk_register_types(void) 3145 { 3146 type_register_static(&scsi_disk_base_info); 3147 type_register_static(&scsi_hd_info); 3148 type_register_static(&scsi_cd_info); 3149 #ifdef __linux__ 3150 type_register_static(&scsi_block_info); 3151 #endif 3152 type_register_static(&scsi_disk_info); 3153 } 3154 3155 type_init(scsi_disk_register_types) 3156