1 /* 2 * SCSI Device emulation 3 * 4 * Copyright (c) 2006 CodeSourcery. 5 * Based on code by Fabrice Bellard 6 * 7 * Written by Paul Brook 8 * Modifications: 9 * 2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case 10 * when the allocation length of CDB is smaller 11 * than 36. 12 * 2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the 13 * MODE SENSE response. 14 * 15 * This code is licensed under the LGPL. 16 * 17 * Note that this file only handles the SCSI architecture model and device 18 * commands. Emulation of interface/link layer protocols is handled by 19 * the host adapter emulator. 20 */ 21 22 #include "qemu/osdep.h" 23 #include "qemu/units.h" 24 #include "qapi/error.h" 25 #include "qemu/error-report.h" 26 #include "qemu/module.h" 27 #include "hw/scsi/scsi.h" 28 #include "migration/qemu-file-types.h" 29 #include "hw/scsi/emulation.h" 30 #include "scsi/constants.h" 31 #include "sysemu/sysemu.h" 32 #include "sysemu/block-backend.h" 33 #include "sysemu/blockdev.h" 34 #include "hw/block/block.h" 35 #include "sysemu/dma.h" 36 #include "qemu/cutils.h" 37 #include "trace.h" 38 39 #ifdef __linux 40 #include <scsi/sg.h> 41 #endif 42 43 #define SCSI_WRITE_SAME_MAX (512 * KiB) 44 #define SCSI_DMA_BUF_SIZE (128 * KiB) 45 #define SCSI_MAX_INQUIRY_LEN 256 46 #define SCSI_MAX_MODE_LEN 256 47 48 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB) 49 #define DEFAULT_MAX_UNMAP_SIZE (1 * GiB) 50 #define DEFAULT_MAX_IO_SIZE INT_MAX /* 2 GB - 1 block */ 51 52 #define TYPE_SCSI_DISK_BASE "scsi-disk-base" 53 54 #define SCSI_DISK_BASE(obj) \ 55 OBJECT_CHECK(SCSIDiskState, (obj), TYPE_SCSI_DISK_BASE) 56 #define SCSI_DISK_BASE_CLASS(klass) \ 57 OBJECT_CLASS_CHECK(SCSIDiskClass, (klass), TYPE_SCSI_DISK_BASE) 58 #define SCSI_DISK_BASE_GET_CLASS(obj) \ 59 OBJECT_GET_CLASS(SCSIDiskClass, (obj), TYPE_SCSI_DISK_BASE) 60 61 typedef struct SCSIDiskClass { 62 SCSIDeviceClass parent_class; 63 DMAIOFunc *dma_readv; 64 DMAIOFunc *dma_writev; 65 bool (*need_fua_emulation)(SCSICommand *cmd); 66 void (*update_sense)(SCSIRequest *r); 67 } SCSIDiskClass; 68 69 typedef struct SCSIDiskReq { 70 SCSIRequest req; 71 /* Both sector and sector_count are in terms of qemu 512 byte blocks. */ 72 uint64_t sector; 73 uint32_t sector_count; 74 uint32_t buflen; 75 bool started; 76 bool need_fua_emulation; 77 struct iovec iov; 78 QEMUIOVector qiov; 79 BlockAcctCookie acct; 80 unsigned char *status; 81 } SCSIDiskReq; 82 83 #define SCSI_DISK_F_REMOVABLE 0 84 #define SCSI_DISK_F_DPOFUA 1 85 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS 2 86 87 typedef struct SCSIDiskState 88 { 89 SCSIDevice qdev; 90 uint32_t features; 91 bool media_changed; 92 bool media_event; 93 bool eject_request; 94 uint16_t port_index; 95 uint64_t max_unmap_size; 96 uint64_t max_io_size; 97 QEMUBH *bh; 98 char *version; 99 char *serial; 100 char *vendor; 101 char *product; 102 char *device_id; 103 bool tray_open; 104 bool tray_locked; 105 /* 106 * 0x0000 - rotation rate not reported 107 * 0x0001 - non-rotating medium (SSD) 108 * 0x0002-0x0400 - reserved 109 * 0x0401-0xffe - rotations per minute 110 * 0xffff - reserved 111 */ 112 uint16_t rotation_rate; 113 } SCSIDiskState; 114 115 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed); 116 117 static void scsi_free_request(SCSIRequest *req) 118 { 119 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 120 121 qemu_vfree(r->iov.iov_base); 122 } 123 124 /* Helper function for command completion with sense. */ 125 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense) 126 { 127 trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc, 128 sense.ascq); 129 scsi_req_build_sense(&r->req, sense); 130 scsi_req_complete(&r->req, CHECK_CONDITION); 131 } 132 133 static void scsi_init_iovec(SCSIDiskReq *r, size_t size) 134 { 135 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 136 137 if (!r->iov.iov_base) { 138 r->buflen = size; 139 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 140 } 141 r->iov.iov_len = MIN(r->sector_count * 512, r->buflen); 142 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 143 } 144 145 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req) 146 { 147 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 148 149 qemu_put_be64s(f, &r->sector); 150 qemu_put_be32s(f, &r->sector_count); 151 qemu_put_be32s(f, &r->buflen); 152 if (r->buflen) { 153 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 154 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 155 } else if (!req->retry) { 156 uint32_t len = r->iov.iov_len; 157 qemu_put_be32s(f, &len); 158 qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len); 159 } 160 } 161 } 162 163 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req) 164 { 165 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 166 167 qemu_get_be64s(f, &r->sector); 168 qemu_get_be32s(f, &r->sector_count); 169 qemu_get_be32s(f, &r->buflen); 170 if (r->buflen) { 171 scsi_init_iovec(r, r->buflen); 172 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 173 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 174 } else if (!r->req.retry) { 175 uint32_t len; 176 qemu_get_be32s(f, &len); 177 r->iov.iov_len = len; 178 assert(r->iov.iov_len <= r->buflen); 179 qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len); 180 } 181 } 182 183 qemu_iovec_init_external(&r->qiov, &r->iov, 1); 184 } 185 186 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) 187 { 188 if (r->req.io_canceled) { 189 scsi_req_cancel_complete(&r->req); 190 return true; 191 } 192 193 if (ret < 0 || (r->status && *r->status)) { 194 return scsi_handle_rw_error(r, -ret, acct_failed); 195 } 196 197 return false; 198 } 199 200 static void scsi_aio_complete(void *opaque, int ret) 201 { 202 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 203 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 204 205 assert(r->req.aiocb != NULL); 206 r->req.aiocb = NULL; 207 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 208 if (scsi_disk_req_check_error(r, ret, true)) { 209 goto done; 210 } 211 212 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 213 scsi_req_complete(&r->req, GOOD); 214 215 done: 216 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 217 scsi_req_unref(&r->req); 218 } 219 220 static bool scsi_is_cmd_fua(SCSICommand *cmd) 221 { 222 switch (cmd->buf[0]) { 223 case READ_10: 224 case READ_12: 225 case READ_16: 226 case WRITE_10: 227 case WRITE_12: 228 case WRITE_16: 229 return (cmd->buf[1] & 8) != 0; 230 231 case VERIFY_10: 232 case VERIFY_12: 233 case VERIFY_16: 234 case WRITE_VERIFY_10: 235 case WRITE_VERIFY_12: 236 case WRITE_VERIFY_16: 237 return true; 238 239 case READ_6: 240 case WRITE_6: 241 default: 242 return false; 243 } 244 } 245 246 static void scsi_write_do_fua(SCSIDiskReq *r) 247 { 248 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 249 250 assert(r->req.aiocb == NULL); 251 assert(!r->req.io_canceled); 252 253 if (r->need_fua_emulation) { 254 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 255 BLOCK_ACCT_FLUSH); 256 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 257 return; 258 } 259 260 scsi_req_complete(&r->req, GOOD); 261 scsi_req_unref(&r->req); 262 } 263 264 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret) 265 { 266 assert(r->req.aiocb == NULL); 267 if (scsi_disk_req_check_error(r, ret, false)) { 268 goto done; 269 } 270 271 r->sector += r->sector_count; 272 r->sector_count = 0; 273 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 274 scsi_write_do_fua(r); 275 return; 276 } else { 277 scsi_req_complete(&r->req, GOOD); 278 } 279 280 done: 281 scsi_req_unref(&r->req); 282 } 283 284 static void scsi_dma_complete(void *opaque, int ret) 285 { 286 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 287 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 288 289 assert(r->req.aiocb != NULL); 290 r->req.aiocb = NULL; 291 292 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 293 if (ret < 0) { 294 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 295 } else { 296 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 297 } 298 scsi_dma_complete_noio(r, ret); 299 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 300 } 301 302 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret) 303 { 304 uint32_t n; 305 306 assert(r->req.aiocb == NULL); 307 if (scsi_disk_req_check_error(r, ret, false)) { 308 goto done; 309 } 310 311 n = r->qiov.size / 512; 312 r->sector += n; 313 r->sector_count -= n; 314 scsi_req_data(&r->req, r->qiov.size); 315 316 done: 317 scsi_req_unref(&r->req); 318 } 319 320 static void scsi_read_complete(void *opaque, int ret) 321 { 322 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 323 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 324 325 assert(r->req.aiocb != NULL); 326 r->req.aiocb = NULL; 327 328 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 329 if (ret < 0) { 330 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 331 } else { 332 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 333 trace_scsi_disk_read_complete(r->req.tag, r->qiov.size); 334 } 335 scsi_read_complete_noio(r, ret); 336 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 337 } 338 339 /* Actually issue a read to the block device. */ 340 static void scsi_do_read(SCSIDiskReq *r, int ret) 341 { 342 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 343 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 344 345 assert (r->req.aiocb == NULL); 346 if (scsi_disk_req_check_error(r, ret, false)) { 347 goto done; 348 } 349 350 /* The request is used as the AIO opaque value, so add a ref. */ 351 scsi_req_ref(&r->req); 352 353 if (r->req.sg) { 354 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ); 355 r->req.resid -= r->req.sg->size; 356 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 357 r->req.sg, r->sector << BDRV_SECTOR_BITS, 358 BDRV_SECTOR_SIZE, 359 sdc->dma_readv, r, scsi_dma_complete, r, 360 DMA_DIRECTION_FROM_DEVICE); 361 } else { 362 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 363 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 364 r->qiov.size, BLOCK_ACCT_READ); 365 r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov, 366 scsi_read_complete, r, r); 367 } 368 369 done: 370 scsi_req_unref(&r->req); 371 } 372 373 static void scsi_do_read_cb(void *opaque, int ret) 374 { 375 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 376 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 377 378 assert (r->req.aiocb != NULL); 379 r->req.aiocb = NULL; 380 381 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 382 if (ret < 0) { 383 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 384 } else { 385 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 386 } 387 scsi_do_read(opaque, ret); 388 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 389 } 390 391 /* Read more data from scsi device into buffer. */ 392 static void scsi_read_data(SCSIRequest *req) 393 { 394 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 395 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 396 bool first; 397 398 trace_scsi_disk_read_data_count(r->sector_count); 399 if (r->sector_count == 0) { 400 /* This also clears the sense buffer for REQUEST SENSE. */ 401 scsi_req_complete(&r->req, GOOD); 402 return; 403 } 404 405 /* No data transfer may already be in progress */ 406 assert(r->req.aiocb == NULL); 407 408 /* The request is used as the AIO opaque value, so add a ref. */ 409 scsi_req_ref(&r->req); 410 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 411 trace_scsi_disk_read_data_invalid(); 412 scsi_read_complete_noio(r, -EINVAL); 413 return; 414 } 415 416 if (!blk_is_available(req->dev->conf.blk)) { 417 scsi_read_complete_noio(r, -ENOMEDIUM); 418 return; 419 } 420 421 first = !r->started; 422 r->started = true; 423 if (first && r->need_fua_emulation) { 424 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 425 BLOCK_ACCT_FLUSH); 426 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r); 427 } else { 428 scsi_do_read(r, 0); 429 } 430 } 431 432 /* 433 * scsi_handle_rw_error has two return values. False means that the error 434 * must be ignored, true means that the error has been processed and the 435 * caller should not do anything else for this request. Note that 436 * scsi_handle_rw_error always manages its reference counts, independent 437 * of the return value. 438 */ 439 static bool scsi_handle_rw_error(SCSIDiskReq *r, int error, bool acct_failed) 440 { 441 bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV); 442 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 443 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 444 BlockErrorAction action = blk_get_error_action(s->qdev.conf.blk, 445 is_read, error); 446 447 if (action == BLOCK_ERROR_ACTION_REPORT) { 448 if (acct_failed) { 449 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 450 } 451 switch (error) { 452 case 0: 453 /* A passthrough command has run and has produced sense data; check 454 * whether the error has to be handled by the guest or should rather 455 * pause the host. 456 */ 457 assert(r->status && *r->status); 458 if (scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) { 459 /* These errors are handled by guest. */ 460 sdc->update_sense(&r->req); 461 scsi_req_complete(&r->req, *r->status); 462 return true; 463 } 464 error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense)); 465 break; 466 case ENOMEDIUM: 467 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 468 break; 469 case ENOMEM: 470 scsi_check_condition(r, SENSE_CODE(TARGET_FAILURE)); 471 break; 472 case EINVAL: 473 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 474 break; 475 case ENOSPC: 476 scsi_check_condition(r, SENSE_CODE(SPACE_ALLOC_FAILED)); 477 break; 478 default: 479 scsi_check_condition(r, SENSE_CODE(IO_ERROR)); 480 break; 481 } 482 } 483 484 blk_error_action(s->qdev.conf.blk, action, is_read, error); 485 if (action == BLOCK_ERROR_ACTION_IGNORE) { 486 scsi_req_complete(&r->req, 0); 487 return true; 488 } 489 490 if (action == BLOCK_ERROR_ACTION_STOP) { 491 scsi_req_retry(&r->req); 492 } 493 return true; 494 } 495 496 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret) 497 { 498 uint32_t n; 499 500 assert (r->req.aiocb == NULL); 501 if (scsi_disk_req_check_error(r, ret, false)) { 502 goto done; 503 } 504 505 n = r->qiov.size / 512; 506 r->sector += n; 507 r->sector_count -= n; 508 if (r->sector_count == 0) { 509 scsi_write_do_fua(r); 510 return; 511 } else { 512 scsi_init_iovec(r, SCSI_DMA_BUF_SIZE); 513 trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size); 514 scsi_req_data(&r->req, r->qiov.size); 515 } 516 517 done: 518 scsi_req_unref(&r->req); 519 } 520 521 static void scsi_write_complete(void * opaque, int ret) 522 { 523 SCSIDiskReq *r = (SCSIDiskReq *)opaque; 524 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 525 526 assert (r->req.aiocb != NULL); 527 r->req.aiocb = NULL; 528 529 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 530 if (ret < 0) { 531 block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); 532 } else { 533 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 534 } 535 scsi_write_complete_noio(r, ret); 536 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 537 } 538 539 static void scsi_write_data(SCSIRequest *req) 540 { 541 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 542 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 543 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 544 545 /* No data transfer may already be in progress */ 546 assert(r->req.aiocb == NULL); 547 548 /* The request is used as the AIO opaque value, so add a ref. */ 549 scsi_req_ref(&r->req); 550 if (r->req.cmd.mode != SCSI_XFER_TO_DEV) { 551 trace_scsi_disk_write_data_invalid(); 552 scsi_write_complete_noio(r, -EINVAL); 553 return; 554 } 555 556 if (!r->req.sg && !r->qiov.size) { 557 /* Called for the first time. Ask the driver to send us more data. */ 558 r->started = true; 559 scsi_write_complete_noio(r, 0); 560 return; 561 } 562 if (!blk_is_available(req->dev->conf.blk)) { 563 scsi_write_complete_noio(r, -ENOMEDIUM); 564 return; 565 } 566 567 if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 || 568 r->req.cmd.buf[0] == VERIFY_16) { 569 if (r->req.sg) { 570 scsi_dma_complete_noio(r, 0); 571 } else { 572 scsi_write_complete_noio(r, 0); 573 } 574 return; 575 } 576 577 if (r->req.sg) { 578 dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE); 579 r->req.resid -= r->req.sg->size; 580 r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk), 581 r->req.sg, r->sector << BDRV_SECTOR_BITS, 582 BDRV_SECTOR_SIZE, 583 sdc->dma_writev, r, scsi_dma_complete, r, 584 DMA_DIRECTION_TO_DEVICE); 585 } else { 586 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 587 r->qiov.size, BLOCK_ACCT_WRITE); 588 r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov, 589 scsi_write_complete, r, r); 590 } 591 } 592 593 /* Return a pointer to the data buffer. */ 594 static uint8_t *scsi_get_buf(SCSIRequest *req) 595 { 596 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 597 598 return (uint8_t *)r->iov.iov_base; 599 } 600 601 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf) 602 { 603 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 604 uint8_t page_code = req->cmd.buf[2]; 605 int start, buflen = 0; 606 607 outbuf[buflen++] = s->qdev.type & 0x1f; 608 outbuf[buflen++] = page_code; 609 outbuf[buflen++] = 0x00; 610 outbuf[buflen++] = 0x00; 611 start = buflen; 612 613 switch (page_code) { 614 case 0x00: /* Supported page codes, mandatory */ 615 { 616 trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer); 617 outbuf[buflen++] = 0x00; /* list of supported pages (this page) */ 618 if (s->serial) { 619 outbuf[buflen++] = 0x80; /* unit serial number */ 620 } 621 outbuf[buflen++] = 0x83; /* device identification */ 622 if (s->qdev.type == TYPE_DISK) { 623 outbuf[buflen++] = 0xb0; /* block limits */ 624 outbuf[buflen++] = 0xb1; /* block device characteristics */ 625 outbuf[buflen++] = 0xb2; /* thin provisioning */ 626 } 627 break; 628 } 629 case 0x80: /* Device serial number, optional */ 630 { 631 int l; 632 633 if (!s->serial) { 634 trace_scsi_disk_emulate_vpd_page_80_not_supported(); 635 return -1; 636 } 637 638 l = strlen(s->serial); 639 if (l > 36) { 640 l = 36; 641 } 642 643 trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer); 644 memcpy(outbuf + buflen, s->serial, l); 645 buflen += l; 646 break; 647 } 648 649 case 0x83: /* Device identification page, mandatory */ 650 { 651 int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0; 652 653 trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer); 654 655 if (id_len) { 656 outbuf[buflen++] = 0x2; /* ASCII */ 657 outbuf[buflen++] = 0; /* not officially assigned */ 658 outbuf[buflen++] = 0; /* reserved */ 659 outbuf[buflen++] = id_len; /* length of data following */ 660 memcpy(outbuf + buflen, s->device_id, id_len); 661 buflen += id_len; 662 } 663 664 if (s->qdev.wwn) { 665 outbuf[buflen++] = 0x1; /* Binary */ 666 outbuf[buflen++] = 0x3; /* NAA */ 667 outbuf[buflen++] = 0; /* reserved */ 668 outbuf[buflen++] = 8; 669 stq_be_p(&outbuf[buflen], s->qdev.wwn); 670 buflen += 8; 671 } 672 673 if (s->qdev.port_wwn) { 674 outbuf[buflen++] = 0x61; /* SAS / Binary */ 675 outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */ 676 outbuf[buflen++] = 0; /* reserved */ 677 outbuf[buflen++] = 8; 678 stq_be_p(&outbuf[buflen], s->qdev.port_wwn); 679 buflen += 8; 680 } 681 682 if (s->port_index) { 683 outbuf[buflen++] = 0x61; /* SAS / Binary */ 684 685 /* PIV/Target port/relative target port */ 686 outbuf[buflen++] = 0x94; 687 688 outbuf[buflen++] = 0; /* reserved */ 689 outbuf[buflen++] = 4; 690 stw_be_p(&outbuf[buflen + 2], s->port_index); 691 buflen += 4; 692 } 693 break; 694 } 695 case 0xb0: /* block limits */ 696 { 697 SCSIBlockLimits bl = {}; 698 699 if (s->qdev.type == TYPE_ROM) { 700 trace_scsi_disk_emulate_vpd_page_b0_not_supported(); 701 return -1; 702 } 703 bl.wsnz = 1; 704 bl.unmap_sectors = 705 s->qdev.conf.discard_granularity / s->qdev.blocksize; 706 bl.min_io_size = 707 s->qdev.conf.min_io_size / s->qdev.blocksize; 708 bl.opt_io_size = 709 s->qdev.conf.opt_io_size / s->qdev.blocksize; 710 bl.max_unmap_sectors = 711 s->max_unmap_size / s->qdev.blocksize; 712 bl.max_io_sectors = 713 s->max_io_size / s->qdev.blocksize; 714 /* 255 descriptors fit in 4 KiB with an 8-byte header */ 715 bl.max_unmap_descr = 255; 716 717 if (s->qdev.type == TYPE_DISK) { 718 int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk); 719 int max_io_sectors_blk = 720 max_transfer_blk / s->qdev.blocksize; 721 722 bl.max_io_sectors = 723 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors); 724 } 725 buflen += scsi_emulate_block_limits(outbuf + buflen, &bl); 726 break; 727 } 728 case 0xb1: /* block device characteristics */ 729 { 730 buflen = 0x40; 731 outbuf[4] = (s->rotation_rate >> 8) & 0xff; 732 outbuf[5] = s->rotation_rate & 0xff; 733 outbuf[6] = 0; /* PRODUCT TYPE */ 734 outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */ 735 outbuf[8] = 0; /* VBULS */ 736 break; 737 } 738 case 0xb2: /* thin provisioning */ 739 { 740 buflen = 8; 741 outbuf[4] = 0; 742 outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */ 743 outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1; 744 outbuf[7] = 0; 745 break; 746 } 747 default: 748 return -1; 749 } 750 /* done with EVPD */ 751 assert(buflen - start <= 255); 752 outbuf[start - 1] = buflen - start; 753 return buflen; 754 } 755 756 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf) 757 { 758 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 759 int buflen = 0; 760 761 if (req->cmd.buf[1] & 0x1) { 762 /* Vital product data */ 763 return scsi_disk_emulate_vpd_page(req, outbuf); 764 } 765 766 /* Standard INQUIRY data */ 767 if (req->cmd.buf[2] != 0) { 768 return -1; 769 } 770 771 /* PAGE CODE == 0 */ 772 buflen = req->cmd.xfer; 773 if (buflen > SCSI_MAX_INQUIRY_LEN) { 774 buflen = SCSI_MAX_INQUIRY_LEN; 775 } 776 777 outbuf[0] = s->qdev.type & 0x1f; 778 outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0; 779 780 strpadcpy((char *) &outbuf[16], 16, s->product, ' '); 781 strpadcpy((char *) &outbuf[8], 8, s->vendor, ' '); 782 783 memset(&outbuf[32], 0, 4); 784 memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version))); 785 /* 786 * We claim conformance to SPC-3, which is required for guests 787 * to ask for modern features like READ CAPACITY(16) or the 788 * block characteristics VPD page by default. Not all of SPC-3 789 * is actually implemented, but we're good enough. 790 */ 791 outbuf[2] = s->qdev.default_scsi_version; 792 outbuf[3] = 2 | 0x10; /* Format 2, HiSup */ 793 794 if (buflen > 36) { 795 outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */ 796 } else { 797 /* If the allocation length of CDB is too small, 798 the additional length is not adjusted */ 799 outbuf[4] = 36 - 5; 800 } 801 802 /* Sync data transfer and TCQ. */ 803 outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0); 804 return buflen; 805 } 806 807 static inline bool media_is_dvd(SCSIDiskState *s) 808 { 809 uint64_t nb_sectors; 810 if (s->qdev.type != TYPE_ROM) { 811 return false; 812 } 813 if (!blk_is_available(s->qdev.conf.blk)) { 814 return false; 815 } 816 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 817 return nb_sectors > CD_MAX_SECTORS; 818 } 819 820 static inline bool media_is_cd(SCSIDiskState *s) 821 { 822 uint64_t nb_sectors; 823 if (s->qdev.type != TYPE_ROM) { 824 return false; 825 } 826 if (!blk_is_available(s->qdev.conf.blk)) { 827 return false; 828 } 829 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 830 return nb_sectors <= CD_MAX_SECTORS; 831 } 832 833 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r, 834 uint8_t *outbuf) 835 { 836 uint8_t type = r->req.cmd.buf[1] & 7; 837 838 if (s->qdev.type != TYPE_ROM) { 839 return -1; 840 } 841 842 /* Types 1/2 are only defined for Blu-Ray. */ 843 if (type != 0) { 844 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 845 return -1; 846 } 847 848 memset(outbuf, 0, 34); 849 outbuf[1] = 32; 850 outbuf[2] = 0xe; /* last session complete, disc finalized */ 851 outbuf[3] = 1; /* first track on disc */ 852 outbuf[4] = 1; /* # of sessions */ 853 outbuf[5] = 1; /* first track of last session */ 854 outbuf[6] = 1; /* last track of last session */ 855 outbuf[7] = 0x20; /* unrestricted use */ 856 outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */ 857 /* 9-10-11: most significant byte corresponding bytes 4-5-6 */ 858 /* 12-23: not meaningful for CD-ROM or DVD-ROM */ 859 /* 24-31: disc bar code */ 860 /* 32: disc application code */ 861 /* 33: number of OPC tables */ 862 863 return 34; 864 } 865 866 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r, 867 uint8_t *outbuf) 868 { 869 static const int rds_caps_size[5] = { 870 [0] = 2048 + 4, 871 [1] = 4 + 4, 872 [3] = 188 + 4, 873 [4] = 2048 + 4, 874 }; 875 876 uint8_t media = r->req.cmd.buf[1]; 877 uint8_t layer = r->req.cmd.buf[6]; 878 uint8_t format = r->req.cmd.buf[7]; 879 int size = -1; 880 881 if (s->qdev.type != TYPE_ROM) { 882 return -1; 883 } 884 if (media != 0) { 885 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 886 return -1; 887 } 888 889 if (format != 0xff) { 890 if (!blk_is_available(s->qdev.conf.blk)) { 891 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 892 return -1; 893 } 894 if (media_is_cd(s)) { 895 scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT)); 896 return -1; 897 } 898 if (format >= ARRAY_SIZE(rds_caps_size)) { 899 return -1; 900 } 901 size = rds_caps_size[format]; 902 memset(outbuf, 0, size); 903 } 904 905 switch (format) { 906 case 0x00: { 907 /* Physical format information */ 908 uint64_t nb_sectors; 909 if (layer != 0) { 910 goto fail; 911 } 912 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 913 914 outbuf[4] = 1; /* DVD-ROM, part version 1 */ 915 outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */ 916 outbuf[6] = 1; /* one layer, read-only (per MMC-2 spec) */ 917 outbuf[7] = 0; /* default densities */ 918 919 stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */ 920 stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */ 921 break; 922 } 923 924 case 0x01: /* DVD copyright information, all zeros */ 925 break; 926 927 case 0x03: /* BCA information - invalid field for no BCA info */ 928 return -1; 929 930 case 0x04: /* DVD disc manufacturing information, all zeros */ 931 break; 932 933 case 0xff: { /* List capabilities */ 934 int i; 935 size = 4; 936 for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) { 937 if (!rds_caps_size[i]) { 938 continue; 939 } 940 outbuf[size] = i; 941 outbuf[size + 1] = 0x40; /* Not writable, readable */ 942 stw_be_p(&outbuf[size + 2], rds_caps_size[i]); 943 size += 4; 944 } 945 break; 946 } 947 948 default: 949 return -1; 950 } 951 952 /* Size of buffer, not including 2 byte size field */ 953 stw_be_p(outbuf, size - 2); 954 return size; 955 956 fail: 957 return -1; 958 } 959 960 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf) 961 { 962 uint8_t event_code, media_status; 963 964 media_status = 0; 965 if (s->tray_open) { 966 media_status = MS_TRAY_OPEN; 967 } else if (blk_is_inserted(s->qdev.conf.blk)) { 968 media_status = MS_MEDIA_PRESENT; 969 } 970 971 /* Event notification descriptor */ 972 event_code = MEC_NO_CHANGE; 973 if (media_status != MS_TRAY_OPEN) { 974 if (s->media_event) { 975 event_code = MEC_NEW_MEDIA; 976 s->media_event = false; 977 } else if (s->eject_request) { 978 event_code = MEC_EJECT_REQUESTED; 979 s->eject_request = false; 980 } 981 } 982 983 outbuf[0] = event_code; 984 outbuf[1] = media_status; 985 986 /* These fields are reserved, just clear them. */ 987 outbuf[2] = 0; 988 outbuf[3] = 0; 989 return 4; 990 } 991 992 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r, 993 uint8_t *outbuf) 994 { 995 int size; 996 uint8_t *buf = r->req.cmd.buf; 997 uint8_t notification_class_request = buf[4]; 998 if (s->qdev.type != TYPE_ROM) { 999 return -1; 1000 } 1001 if ((buf[1] & 1) == 0) { 1002 /* asynchronous */ 1003 return -1; 1004 } 1005 1006 size = 4; 1007 outbuf[0] = outbuf[1] = 0; 1008 outbuf[3] = 1 << GESN_MEDIA; /* supported events */ 1009 if (notification_class_request & (1 << GESN_MEDIA)) { 1010 outbuf[2] = GESN_MEDIA; 1011 size += scsi_event_status_media(s, &outbuf[size]); 1012 } else { 1013 outbuf[2] = 0x80; 1014 } 1015 stw_be_p(outbuf, size - 4); 1016 return size; 1017 } 1018 1019 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf) 1020 { 1021 int current; 1022 1023 if (s->qdev.type != TYPE_ROM) { 1024 return -1; 1025 } 1026 1027 if (media_is_dvd(s)) { 1028 current = MMC_PROFILE_DVD_ROM; 1029 } else if (media_is_cd(s)) { 1030 current = MMC_PROFILE_CD_ROM; 1031 } else { 1032 current = MMC_PROFILE_NONE; 1033 } 1034 1035 memset(outbuf, 0, 40); 1036 stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */ 1037 stw_be_p(&outbuf[6], current); 1038 /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */ 1039 outbuf[10] = 0x03; /* persistent, current */ 1040 outbuf[11] = 8; /* two profiles */ 1041 stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM); 1042 outbuf[14] = (current == MMC_PROFILE_DVD_ROM); 1043 stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM); 1044 outbuf[18] = (current == MMC_PROFILE_CD_ROM); 1045 /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */ 1046 stw_be_p(&outbuf[20], 1); 1047 outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */ 1048 outbuf[23] = 8; 1049 stl_be_p(&outbuf[24], 1); /* SCSI */ 1050 outbuf[28] = 1; /* DBE = 1, mandatory */ 1051 /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */ 1052 stw_be_p(&outbuf[32], 3); 1053 outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */ 1054 outbuf[35] = 4; 1055 outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */ 1056 /* TODO: Random readable, CD read, DVD read, drive serial number, 1057 power management */ 1058 return 40; 1059 } 1060 1061 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf) 1062 { 1063 if (s->qdev.type != TYPE_ROM) { 1064 return -1; 1065 } 1066 memset(outbuf, 0, 8); 1067 outbuf[5] = 1; /* CD-ROM */ 1068 return 8; 1069 } 1070 1071 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf, 1072 int page_control) 1073 { 1074 static const int mode_sense_valid[0x3f] = { 1075 [MODE_PAGE_HD_GEOMETRY] = (1 << TYPE_DISK), 1076 [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK), 1077 [MODE_PAGE_CACHING] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1078 [MODE_PAGE_R_W_ERROR] = (1 << TYPE_DISK) | (1 << TYPE_ROM), 1079 [MODE_PAGE_AUDIO_CTL] = (1 << TYPE_ROM), 1080 [MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM), 1081 }; 1082 1083 uint8_t *p = *p_outbuf + 2; 1084 int length; 1085 1086 if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) { 1087 return -1; 1088 } 1089 1090 /* 1091 * If Changeable Values are requested, a mask denoting those mode parameters 1092 * that are changeable shall be returned. As we currently don't support 1093 * parameter changes via MODE_SELECT all bits are returned set to zero. 1094 * The buffer was already menset to zero by the caller of this function. 1095 * 1096 * The offsets here are off by two compared to the descriptions in the 1097 * SCSI specs, because those include a 2-byte header. This is unfortunate, 1098 * but it is done so that offsets are consistent within our implementation 1099 * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both 1100 * 2-byte and 4-byte headers. 1101 */ 1102 switch (page) { 1103 case MODE_PAGE_HD_GEOMETRY: 1104 length = 0x16; 1105 if (page_control == 1) { /* Changeable Values */ 1106 break; 1107 } 1108 /* if a geometry hint is available, use it */ 1109 p[0] = (s->qdev.conf.cyls >> 16) & 0xff; 1110 p[1] = (s->qdev.conf.cyls >> 8) & 0xff; 1111 p[2] = s->qdev.conf.cyls & 0xff; 1112 p[3] = s->qdev.conf.heads & 0xff; 1113 /* Write precomp start cylinder, disabled */ 1114 p[4] = (s->qdev.conf.cyls >> 16) & 0xff; 1115 p[5] = (s->qdev.conf.cyls >> 8) & 0xff; 1116 p[6] = s->qdev.conf.cyls & 0xff; 1117 /* Reduced current start cylinder, disabled */ 1118 p[7] = (s->qdev.conf.cyls >> 16) & 0xff; 1119 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1120 p[9] = s->qdev.conf.cyls & 0xff; 1121 /* Device step rate [ns], 200ns */ 1122 p[10] = 0; 1123 p[11] = 200; 1124 /* Landing zone cylinder */ 1125 p[12] = 0xff; 1126 p[13] = 0xff; 1127 p[14] = 0xff; 1128 /* Medium rotation rate [rpm], 5400 rpm */ 1129 p[18] = (5400 >> 8) & 0xff; 1130 p[19] = 5400 & 0xff; 1131 break; 1132 1133 case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY: 1134 length = 0x1e; 1135 if (page_control == 1) { /* Changeable Values */ 1136 break; 1137 } 1138 /* Transfer rate [kbit/s], 5Mbit/s */ 1139 p[0] = 5000 >> 8; 1140 p[1] = 5000 & 0xff; 1141 /* if a geometry hint is available, use it */ 1142 p[2] = s->qdev.conf.heads & 0xff; 1143 p[3] = s->qdev.conf.secs & 0xff; 1144 p[4] = s->qdev.blocksize >> 8; 1145 p[6] = (s->qdev.conf.cyls >> 8) & 0xff; 1146 p[7] = s->qdev.conf.cyls & 0xff; 1147 /* Write precomp start cylinder, disabled */ 1148 p[8] = (s->qdev.conf.cyls >> 8) & 0xff; 1149 p[9] = s->qdev.conf.cyls & 0xff; 1150 /* Reduced current start cylinder, disabled */ 1151 p[10] = (s->qdev.conf.cyls >> 8) & 0xff; 1152 p[11] = s->qdev.conf.cyls & 0xff; 1153 /* Device step rate [100us], 100us */ 1154 p[12] = 0; 1155 p[13] = 1; 1156 /* Device step pulse width [us], 1us */ 1157 p[14] = 1; 1158 /* Device head settle delay [100us], 100us */ 1159 p[15] = 0; 1160 p[16] = 1; 1161 /* Motor on delay [0.1s], 0.1s */ 1162 p[17] = 1; 1163 /* Motor off delay [0.1s], 0.1s */ 1164 p[18] = 1; 1165 /* Medium rotation rate [rpm], 5400 rpm */ 1166 p[26] = (5400 >> 8) & 0xff; 1167 p[27] = 5400 & 0xff; 1168 break; 1169 1170 case MODE_PAGE_CACHING: 1171 length = 0x12; 1172 if (page_control == 1 || /* Changeable Values */ 1173 blk_enable_write_cache(s->qdev.conf.blk)) { 1174 p[0] = 4; /* WCE */ 1175 } 1176 break; 1177 1178 case MODE_PAGE_R_W_ERROR: 1179 length = 10; 1180 if (page_control == 1) { /* Changeable Values */ 1181 break; 1182 } 1183 p[0] = 0x80; /* Automatic Write Reallocation Enabled */ 1184 if (s->qdev.type == TYPE_ROM) { 1185 p[1] = 0x20; /* Read Retry Count */ 1186 } 1187 break; 1188 1189 case MODE_PAGE_AUDIO_CTL: 1190 length = 14; 1191 break; 1192 1193 case MODE_PAGE_CAPABILITIES: 1194 length = 0x14; 1195 if (page_control == 1) { /* Changeable Values */ 1196 break; 1197 } 1198 1199 p[0] = 0x3b; /* CD-R & CD-RW read */ 1200 p[1] = 0; /* Writing not supported */ 1201 p[2] = 0x7f; /* Audio, composite, digital out, 1202 mode 2 form 1&2, multi session */ 1203 p[3] = 0xff; /* CD DA, DA accurate, RW supported, 1204 RW corrected, C2 errors, ISRC, 1205 UPC, Bar code */ 1206 p[4] = 0x2d | (s->tray_locked ? 2 : 0); 1207 /* Locking supported, jumper present, eject, tray */ 1208 p[5] = 0; /* no volume & mute control, no 1209 changer */ 1210 p[6] = (50 * 176) >> 8; /* 50x read speed */ 1211 p[7] = (50 * 176) & 0xff; 1212 p[8] = 2 >> 8; /* Two volume levels */ 1213 p[9] = 2 & 0xff; 1214 p[10] = 2048 >> 8; /* 2M buffer */ 1215 p[11] = 2048 & 0xff; 1216 p[12] = (16 * 176) >> 8; /* 16x read speed current */ 1217 p[13] = (16 * 176) & 0xff; 1218 p[16] = (16 * 176) >> 8; /* 16x write speed */ 1219 p[17] = (16 * 176) & 0xff; 1220 p[18] = (16 * 176) >> 8; /* 16x write speed current */ 1221 p[19] = (16 * 176) & 0xff; 1222 break; 1223 1224 default: 1225 return -1; 1226 } 1227 1228 assert(length < 256); 1229 (*p_outbuf)[0] = page; 1230 (*p_outbuf)[1] = length; 1231 *p_outbuf += length + 2; 1232 return length + 2; 1233 } 1234 1235 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf) 1236 { 1237 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1238 uint64_t nb_sectors; 1239 bool dbd; 1240 int page, buflen, ret, page_control; 1241 uint8_t *p; 1242 uint8_t dev_specific_param; 1243 1244 dbd = (r->req.cmd.buf[1] & 0x8) != 0; 1245 page = r->req.cmd.buf[2] & 0x3f; 1246 page_control = (r->req.cmd.buf[2] & 0xc0) >> 6; 1247 1248 trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 : 1249 10, page, r->req.cmd.xfer, page_control); 1250 memset(outbuf, 0, r->req.cmd.xfer); 1251 p = outbuf; 1252 1253 if (s->qdev.type == TYPE_DISK) { 1254 dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0; 1255 if (blk_is_read_only(s->qdev.conf.blk)) { 1256 dev_specific_param |= 0x80; /* Readonly. */ 1257 } 1258 } else { 1259 /* MMC prescribes that CD/DVD drives have no block descriptors, 1260 * and defines no device-specific parameter. */ 1261 dev_specific_param = 0x00; 1262 dbd = true; 1263 } 1264 1265 if (r->req.cmd.buf[0] == MODE_SENSE) { 1266 p[1] = 0; /* Default media type. */ 1267 p[2] = dev_specific_param; 1268 p[3] = 0; /* Block descriptor length. */ 1269 p += 4; 1270 } else { /* MODE_SENSE_10 */ 1271 p[2] = 0; /* Default media type. */ 1272 p[3] = dev_specific_param; 1273 p[6] = p[7] = 0; /* Block descriptor length. */ 1274 p += 8; 1275 } 1276 1277 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1278 if (!dbd && nb_sectors) { 1279 if (r->req.cmd.buf[0] == MODE_SENSE) { 1280 outbuf[3] = 8; /* Block descriptor length */ 1281 } else { /* MODE_SENSE_10 */ 1282 outbuf[7] = 8; /* Block descriptor length */ 1283 } 1284 nb_sectors /= (s->qdev.blocksize / 512); 1285 if (nb_sectors > 0xffffff) { 1286 nb_sectors = 0; 1287 } 1288 p[0] = 0; /* media density code */ 1289 p[1] = (nb_sectors >> 16) & 0xff; 1290 p[2] = (nb_sectors >> 8) & 0xff; 1291 p[3] = nb_sectors & 0xff; 1292 p[4] = 0; /* reserved */ 1293 p[5] = 0; /* bytes 5-7 are the sector size in bytes */ 1294 p[6] = s->qdev.blocksize >> 8; 1295 p[7] = 0; 1296 p += 8; 1297 } 1298 1299 if (page_control == 3) { 1300 /* Saved Values */ 1301 scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED)); 1302 return -1; 1303 } 1304 1305 if (page == 0x3f) { 1306 for (page = 0; page <= 0x3e; page++) { 1307 mode_sense_page(s, page, &p, page_control); 1308 } 1309 } else { 1310 ret = mode_sense_page(s, page, &p, page_control); 1311 if (ret == -1) { 1312 return -1; 1313 } 1314 } 1315 1316 buflen = p - outbuf; 1317 /* 1318 * The mode data length field specifies the length in bytes of the 1319 * following data that is available to be transferred. The mode data 1320 * length does not include itself. 1321 */ 1322 if (r->req.cmd.buf[0] == MODE_SENSE) { 1323 outbuf[0] = buflen - 1; 1324 } else { /* MODE_SENSE_10 */ 1325 outbuf[0] = ((buflen - 2) >> 8) & 0xff; 1326 outbuf[1] = (buflen - 2) & 0xff; 1327 } 1328 return buflen; 1329 } 1330 1331 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf) 1332 { 1333 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1334 int start_track, format, msf, toclen; 1335 uint64_t nb_sectors; 1336 1337 msf = req->cmd.buf[1] & 2; 1338 format = req->cmd.buf[2] & 0xf; 1339 start_track = req->cmd.buf[6]; 1340 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1341 trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1); 1342 nb_sectors /= s->qdev.blocksize / 512; 1343 switch (format) { 1344 case 0: 1345 toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track); 1346 break; 1347 case 1: 1348 /* multi session : only a single session defined */ 1349 toclen = 12; 1350 memset(outbuf, 0, 12); 1351 outbuf[1] = 0x0a; 1352 outbuf[2] = 0x01; 1353 outbuf[3] = 0x01; 1354 break; 1355 case 2: 1356 toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track); 1357 break; 1358 default: 1359 return -1; 1360 } 1361 return toclen; 1362 } 1363 1364 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r) 1365 { 1366 SCSIRequest *req = &r->req; 1367 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1368 bool start = req->cmd.buf[4] & 1; 1369 bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */ 1370 int pwrcnd = req->cmd.buf[4] & 0xf0; 1371 1372 if (pwrcnd) { 1373 /* eject/load only happens for power condition == 0 */ 1374 return 0; 1375 } 1376 1377 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) { 1378 if (!start && !s->tray_open && s->tray_locked) { 1379 scsi_check_condition(r, 1380 blk_is_inserted(s->qdev.conf.blk) 1381 ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED) 1382 : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED)); 1383 return -1; 1384 } 1385 1386 if (s->tray_open != !start) { 1387 blk_eject(s->qdev.conf.blk, !start); 1388 s->tray_open = !start; 1389 } 1390 } 1391 return 0; 1392 } 1393 1394 static void scsi_disk_emulate_read_data(SCSIRequest *req) 1395 { 1396 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1397 int buflen = r->iov.iov_len; 1398 1399 if (buflen) { 1400 trace_scsi_disk_emulate_read_data(buflen); 1401 r->iov.iov_len = 0; 1402 r->started = true; 1403 scsi_req_data(&r->req, buflen); 1404 return; 1405 } 1406 1407 /* This also clears the sense buffer for REQUEST SENSE. */ 1408 scsi_req_complete(&r->req, GOOD); 1409 } 1410 1411 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page, 1412 uint8_t *inbuf, int inlen) 1413 { 1414 uint8_t mode_current[SCSI_MAX_MODE_LEN]; 1415 uint8_t mode_changeable[SCSI_MAX_MODE_LEN]; 1416 uint8_t *p; 1417 int len, expected_len, changeable_len, i; 1418 1419 /* The input buffer does not include the page header, so it is 1420 * off by 2 bytes. 1421 */ 1422 expected_len = inlen + 2; 1423 if (expected_len > SCSI_MAX_MODE_LEN) { 1424 return -1; 1425 } 1426 1427 p = mode_current; 1428 memset(mode_current, 0, inlen + 2); 1429 len = mode_sense_page(s, page, &p, 0); 1430 if (len < 0 || len != expected_len) { 1431 return -1; 1432 } 1433 1434 p = mode_changeable; 1435 memset(mode_changeable, 0, inlen + 2); 1436 changeable_len = mode_sense_page(s, page, &p, 1); 1437 assert(changeable_len == len); 1438 1439 /* Check that unchangeable bits are the same as what MODE SENSE 1440 * would return. 1441 */ 1442 for (i = 2; i < len; i++) { 1443 if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) { 1444 return -1; 1445 } 1446 } 1447 return 0; 1448 } 1449 1450 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p) 1451 { 1452 switch (page) { 1453 case MODE_PAGE_CACHING: 1454 blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0); 1455 break; 1456 1457 default: 1458 break; 1459 } 1460 } 1461 1462 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change) 1463 { 1464 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1465 1466 while (len > 0) { 1467 int page, subpage, page_len; 1468 1469 /* Parse both possible formats for the mode page headers. */ 1470 page = p[0] & 0x3f; 1471 if (p[0] & 0x40) { 1472 if (len < 4) { 1473 goto invalid_param_len; 1474 } 1475 subpage = p[1]; 1476 page_len = lduw_be_p(&p[2]); 1477 p += 4; 1478 len -= 4; 1479 } else { 1480 if (len < 2) { 1481 goto invalid_param_len; 1482 } 1483 subpage = 0; 1484 page_len = p[1]; 1485 p += 2; 1486 len -= 2; 1487 } 1488 1489 if (subpage) { 1490 goto invalid_param; 1491 } 1492 if (page_len > len) { 1493 goto invalid_param_len; 1494 } 1495 1496 if (!change) { 1497 if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) { 1498 goto invalid_param; 1499 } 1500 } else { 1501 scsi_disk_apply_mode_select(s, page, p); 1502 } 1503 1504 p += page_len; 1505 len -= page_len; 1506 } 1507 return 0; 1508 1509 invalid_param: 1510 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1511 return -1; 1512 1513 invalid_param_len: 1514 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1515 return -1; 1516 } 1517 1518 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf) 1519 { 1520 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1521 uint8_t *p = inbuf; 1522 int cmd = r->req.cmd.buf[0]; 1523 int len = r->req.cmd.xfer; 1524 int hdr_len = (cmd == MODE_SELECT ? 4 : 8); 1525 int bd_len; 1526 int pass; 1527 1528 /* We only support PF=1, SP=0. */ 1529 if ((r->req.cmd.buf[1] & 0x11) != 0x10) { 1530 goto invalid_field; 1531 } 1532 1533 if (len < hdr_len) { 1534 goto invalid_param_len; 1535 } 1536 1537 bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6])); 1538 len -= hdr_len; 1539 p += hdr_len; 1540 if (len < bd_len) { 1541 goto invalid_param_len; 1542 } 1543 if (bd_len != 0 && bd_len != 8) { 1544 goto invalid_param; 1545 } 1546 1547 len -= bd_len; 1548 p += bd_len; 1549 1550 /* Ensure no change is made if there is an error! */ 1551 for (pass = 0; pass < 2; pass++) { 1552 if (mode_select_pages(r, p, len, pass == 1) < 0) { 1553 assert(pass == 0); 1554 return; 1555 } 1556 } 1557 if (!blk_enable_write_cache(s->qdev.conf.blk)) { 1558 /* The request is used as the AIO opaque value, so add a ref. */ 1559 scsi_req_ref(&r->req); 1560 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 1561 BLOCK_ACCT_FLUSH); 1562 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 1563 return; 1564 } 1565 1566 scsi_req_complete(&r->req, GOOD); 1567 return; 1568 1569 invalid_param: 1570 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM)); 1571 return; 1572 1573 invalid_param_len: 1574 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1575 return; 1576 1577 invalid_field: 1578 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1579 } 1580 1581 static inline bool check_lba_range(SCSIDiskState *s, 1582 uint64_t sector_num, uint32_t nb_sectors) 1583 { 1584 /* 1585 * The first line tests that no overflow happens when computing the last 1586 * sector. The second line tests that the last accessed sector is in 1587 * range. 1588 * 1589 * Careful, the computations should not underflow for nb_sectors == 0, 1590 * and a 0-block read to the first LBA beyond the end of device is 1591 * valid. 1592 */ 1593 return (sector_num <= sector_num + nb_sectors && 1594 sector_num + nb_sectors <= s->qdev.max_lba + 1); 1595 } 1596 1597 typedef struct UnmapCBData { 1598 SCSIDiskReq *r; 1599 uint8_t *inbuf; 1600 int count; 1601 } UnmapCBData; 1602 1603 static void scsi_unmap_complete(void *opaque, int ret); 1604 1605 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret) 1606 { 1607 SCSIDiskReq *r = data->r; 1608 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1609 uint64_t sector_num; 1610 uint32_t nb_sectors; 1611 1612 assert(r->req.aiocb == NULL); 1613 if (scsi_disk_req_check_error(r, ret, false)) { 1614 goto done; 1615 } 1616 1617 if (data->count > 0) { 1618 sector_num = ldq_be_p(&data->inbuf[0]); 1619 nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL; 1620 if (!check_lba_range(s, sector_num, nb_sectors)) { 1621 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1622 goto done; 1623 } 1624 1625 r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk, 1626 sector_num * s->qdev.blocksize, 1627 nb_sectors * s->qdev.blocksize, 1628 scsi_unmap_complete, data); 1629 data->count--; 1630 data->inbuf += 16; 1631 return; 1632 } 1633 1634 scsi_req_complete(&r->req, GOOD); 1635 1636 done: 1637 scsi_req_unref(&r->req); 1638 g_free(data); 1639 } 1640 1641 static void scsi_unmap_complete(void *opaque, int ret) 1642 { 1643 UnmapCBData *data = opaque; 1644 SCSIDiskReq *r = data->r; 1645 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1646 1647 assert(r->req.aiocb != NULL); 1648 r->req.aiocb = NULL; 1649 1650 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1651 scsi_unmap_complete_noio(data, ret); 1652 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1653 } 1654 1655 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf) 1656 { 1657 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1658 uint8_t *p = inbuf; 1659 int len = r->req.cmd.xfer; 1660 UnmapCBData *data; 1661 1662 /* Reject ANCHOR=1. */ 1663 if (r->req.cmd.buf[1] & 0x1) { 1664 goto invalid_field; 1665 } 1666 1667 if (len < 8) { 1668 goto invalid_param_len; 1669 } 1670 if (len < lduw_be_p(&p[0]) + 2) { 1671 goto invalid_param_len; 1672 } 1673 if (len < lduw_be_p(&p[2]) + 8) { 1674 goto invalid_param_len; 1675 } 1676 if (lduw_be_p(&p[2]) & 15) { 1677 goto invalid_param_len; 1678 } 1679 1680 if (blk_is_read_only(s->qdev.conf.blk)) { 1681 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1682 return; 1683 } 1684 1685 data = g_new0(UnmapCBData, 1); 1686 data->r = r; 1687 data->inbuf = &p[8]; 1688 data->count = lduw_be_p(&p[2]) >> 4; 1689 1690 /* The matching unref is in scsi_unmap_complete, before data is freed. */ 1691 scsi_req_ref(&r->req); 1692 scsi_unmap_complete_noio(data, 0); 1693 return; 1694 1695 invalid_param_len: 1696 scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN)); 1697 return; 1698 1699 invalid_field: 1700 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1701 } 1702 1703 typedef struct WriteSameCBData { 1704 SCSIDiskReq *r; 1705 int64_t sector; 1706 int nb_sectors; 1707 QEMUIOVector qiov; 1708 struct iovec iov; 1709 } WriteSameCBData; 1710 1711 static void scsi_write_same_complete(void *opaque, int ret) 1712 { 1713 WriteSameCBData *data = opaque; 1714 SCSIDiskReq *r = data->r; 1715 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 1716 1717 assert(r->req.aiocb != NULL); 1718 r->req.aiocb = NULL; 1719 aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk)); 1720 if (scsi_disk_req_check_error(r, ret, true)) { 1721 goto done; 1722 } 1723 1724 block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); 1725 1726 data->nb_sectors -= data->iov.iov_len / 512; 1727 data->sector += data->iov.iov_len / 512; 1728 data->iov.iov_len = MIN(data->nb_sectors * 512, data->iov.iov_len); 1729 if (data->iov.iov_len) { 1730 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1731 data->iov.iov_len, BLOCK_ACCT_WRITE); 1732 /* Reinitialize qiov, to handle unaligned WRITE SAME request 1733 * where final qiov may need smaller size */ 1734 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1735 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1736 data->sector << BDRV_SECTOR_BITS, 1737 &data->qiov, 0, 1738 scsi_write_same_complete, data); 1739 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1740 return; 1741 } 1742 1743 scsi_req_complete(&r->req, GOOD); 1744 1745 done: 1746 scsi_req_unref(&r->req); 1747 qemu_vfree(data->iov.iov_base); 1748 g_free(data); 1749 aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); 1750 } 1751 1752 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) 1753 { 1754 SCSIRequest *req = &r->req; 1755 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1756 uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); 1757 WriteSameCBData *data; 1758 uint8_t *buf; 1759 int i; 1760 1761 /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ 1762 if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { 1763 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1764 return; 1765 } 1766 1767 if (blk_is_read_only(s->qdev.conf.blk)) { 1768 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 1769 return; 1770 } 1771 if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) { 1772 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 1773 return; 1774 } 1775 1776 if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) { 1777 int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0; 1778 1779 /* The request is used as the AIO opaque value, so add a ref. */ 1780 scsi_req_ref(&r->req); 1781 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1782 nb_sectors * s->qdev.blocksize, 1783 BLOCK_ACCT_WRITE); 1784 r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk, 1785 r->req.cmd.lba * s->qdev.blocksize, 1786 nb_sectors * s->qdev.blocksize, 1787 flags, scsi_aio_complete, r); 1788 return; 1789 } 1790 1791 data = g_new0(WriteSameCBData, 1); 1792 data->r = r; 1793 data->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 1794 data->nb_sectors = nb_sectors * (s->qdev.blocksize / 512); 1795 data->iov.iov_len = MIN(data->nb_sectors * 512, SCSI_WRITE_SAME_MAX); 1796 data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk, 1797 data->iov.iov_len); 1798 qemu_iovec_init_external(&data->qiov, &data->iov, 1); 1799 1800 for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { 1801 memcpy(&buf[i], inbuf, s->qdev.blocksize); 1802 } 1803 1804 scsi_req_ref(&r->req); 1805 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 1806 data->iov.iov_len, BLOCK_ACCT_WRITE); 1807 r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk, 1808 data->sector << BDRV_SECTOR_BITS, 1809 &data->qiov, 0, 1810 scsi_write_same_complete, data); 1811 } 1812 1813 static void scsi_disk_emulate_write_data(SCSIRequest *req) 1814 { 1815 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1816 1817 if (r->iov.iov_len) { 1818 int buflen = r->iov.iov_len; 1819 trace_scsi_disk_emulate_write_data(buflen); 1820 r->iov.iov_len = 0; 1821 scsi_req_data(&r->req, buflen); 1822 return; 1823 } 1824 1825 switch (req->cmd.buf[0]) { 1826 case MODE_SELECT: 1827 case MODE_SELECT_10: 1828 /* This also clears the sense buffer for REQUEST SENSE. */ 1829 scsi_disk_emulate_mode_select(r, r->iov.iov_base); 1830 break; 1831 1832 case UNMAP: 1833 scsi_disk_emulate_unmap(r, r->iov.iov_base); 1834 break; 1835 1836 case VERIFY_10: 1837 case VERIFY_12: 1838 case VERIFY_16: 1839 if (r->req.status == -1) { 1840 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 1841 } 1842 break; 1843 1844 case WRITE_SAME_10: 1845 case WRITE_SAME_16: 1846 scsi_disk_emulate_write_same(r, r->iov.iov_base); 1847 break; 1848 1849 default: 1850 abort(); 1851 } 1852 } 1853 1854 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) 1855 { 1856 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 1857 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 1858 uint64_t nb_sectors; 1859 uint8_t *outbuf; 1860 int buflen; 1861 1862 switch (req->cmd.buf[0]) { 1863 case INQUIRY: 1864 case MODE_SENSE: 1865 case MODE_SENSE_10: 1866 case RESERVE: 1867 case RESERVE_10: 1868 case RELEASE: 1869 case RELEASE_10: 1870 case START_STOP: 1871 case ALLOW_MEDIUM_REMOVAL: 1872 case GET_CONFIGURATION: 1873 case GET_EVENT_STATUS_NOTIFICATION: 1874 case MECHANISM_STATUS: 1875 case REQUEST_SENSE: 1876 break; 1877 1878 default: 1879 if (!blk_is_available(s->qdev.conf.blk)) { 1880 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 1881 return 0; 1882 } 1883 break; 1884 } 1885 1886 /* 1887 * FIXME: we shouldn't return anything bigger than 4k, but the code 1888 * requires the buffer to be as big as req->cmd.xfer in several 1889 * places. So, do not allow CDBs with a very large ALLOCATION 1890 * LENGTH. The real fix would be to modify scsi_read_data and 1891 * dma_buf_read, so that they return data beyond the buflen 1892 * as all zeros. 1893 */ 1894 if (req->cmd.xfer > 65536) { 1895 goto illegal_request; 1896 } 1897 r->buflen = MAX(4096, req->cmd.xfer); 1898 1899 if (!r->iov.iov_base) { 1900 r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen); 1901 } 1902 1903 buflen = req->cmd.xfer; 1904 outbuf = r->iov.iov_base; 1905 memset(outbuf, 0, r->buflen); 1906 switch (req->cmd.buf[0]) { 1907 case TEST_UNIT_READY: 1908 assert(blk_is_available(s->qdev.conf.blk)); 1909 break; 1910 case INQUIRY: 1911 buflen = scsi_disk_emulate_inquiry(req, outbuf); 1912 if (buflen < 0) { 1913 goto illegal_request; 1914 } 1915 break; 1916 case MODE_SENSE: 1917 case MODE_SENSE_10: 1918 buflen = scsi_disk_emulate_mode_sense(r, outbuf); 1919 if (buflen < 0) { 1920 goto illegal_request; 1921 } 1922 break; 1923 case READ_TOC: 1924 buflen = scsi_disk_emulate_read_toc(req, outbuf); 1925 if (buflen < 0) { 1926 goto illegal_request; 1927 } 1928 break; 1929 case RESERVE: 1930 if (req->cmd.buf[1] & 1) { 1931 goto illegal_request; 1932 } 1933 break; 1934 case RESERVE_10: 1935 if (req->cmd.buf[1] & 3) { 1936 goto illegal_request; 1937 } 1938 break; 1939 case RELEASE: 1940 if (req->cmd.buf[1] & 1) { 1941 goto illegal_request; 1942 } 1943 break; 1944 case RELEASE_10: 1945 if (req->cmd.buf[1] & 3) { 1946 goto illegal_request; 1947 } 1948 break; 1949 case START_STOP: 1950 if (scsi_disk_emulate_start_stop(r) < 0) { 1951 return 0; 1952 } 1953 break; 1954 case ALLOW_MEDIUM_REMOVAL: 1955 s->tray_locked = req->cmd.buf[4] & 1; 1956 blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1); 1957 break; 1958 case READ_CAPACITY_10: 1959 /* The normal LEN field for this command is zero. */ 1960 memset(outbuf, 0, 8); 1961 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 1962 if (!nb_sectors) { 1963 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 1964 return 0; 1965 } 1966 if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) { 1967 goto illegal_request; 1968 } 1969 nb_sectors /= s->qdev.blocksize / 512; 1970 /* Returned value is the address of the last sector. */ 1971 nb_sectors--; 1972 /* Remember the new size for read/write sanity checking. */ 1973 s->qdev.max_lba = nb_sectors; 1974 /* Clip to 2TB, instead of returning capacity modulo 2TB. */ 1975 if (nb_sectors > UINT32_MAX) { 1976 nb_sectors = UINT32_MAX; 1977 } 1978 outbuf[0] = (nb_sectors >> 24) & 0xff; 1979 outbuf[1] = (nb_sectors >> 16) & 0xff; 1980 outbuf[2] = (nb_sectors >> 8) & 0xff; 1981 outbuf[3] = nb_sectors & 0xff; 1982 outbuf[4] = 0; 1983 outbuf[5] = 0; 1984 outbuf[6] = s->qdev.blocksize >> 8; 1985 outbuf[7] = 0; 1986 break; 1987 case REQUEST_SENSE: 1988 /* Just return "NO SENSE". */ 1989 buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen, 1990 (req->cmd.buf[1] & 1) == 0); 1991 if (buflen < 0) { 1992 goto illegal_request; 1993 } 1994 break; 1995 case MECHANISM_STATUS: 1996 buflen = scsi_emulate_mechanism_status(s, outbuf); 1997 if (buflen < 0) { 1998 goto illegal_request; 1999 } 2000 break; 2001 case GET_CONFIGURATION: 2002 buflen = scsi_get_configuration(s, outbuf); 2003 if (buflen < 0) { 2004 goto illegal_request; 2005 } 2006 break; 2007 case GET_EVENT_STATUS_NOTIFICATION: 2008 buflen = scsi_get_event_status_notification(s, r, outbuf); 2009 if (buflen < 0) { 2010 goto illegal_request; 2011 } 2012 break; 2013 case READ_DISC_INFORMATION: 2014 buflen = scsi_read_disc_information(s, r, outbuf); 2015 if (buflen < 0) { 2016 goto illegal_request; 2017 } 2018 break; 2019 case READ_DVD_STRUCTURE: 2020 buflen = scsi_read_dvd_structure(s, r, outbuf); 2021 if (buflen < 0) { 2022 goto illegal_request; 2023 } 2024 break; 2025 case SERVICE_ACTION_IN_16: 2026 /* Service Action In subcommands. */ 2027 if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 2028 trace_scsi_disk_emulate_command_SAI_16(); 2029 memset(outbuf, 0, req->cmd.xfer); 2030 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2031 if (!nb_sectors) { 2032 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY)); 2033 return 0; 2034 } 2035 if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) { 2036 goto illegal_request; 2037 } 2038 nb_sectors /= s->qdev.blocksize / 512; 2039 /* Returned value is the address of the last sector. */ 2040 nb_sectors--; 2041 /* Remember the new size for read/write sanity checking. */ 2042 s->qdev.max_lba = nb_sectors; 2043 outbuf[0] = (nb_sectors >> 56) & 0xff; 2044 outbuf[1] = (nb_sectors >> 48) & 0xff; 2045 outbuf[2] = (nb_sectors >> 40) & 0xff; 2046 outbuf[3] = (nb_sectors >> 32) & 0xff; 2047 outbuf[4] = (nb_sectors >> 24) & 0xff; 2048 outbuf[5] = (nb_sectors >> 16) & 0xff; 2049 outbuf[6] = (nb_sectors >> 8) & 0xff; 2050 outbuf[7] = nb_sectors & 0xff; 2051 outbuf[8] = 0; 2052 outbuf[9] = 0; 2053 outbuf[10] = s->qdev.blocksize >> 8; 2054 outbuf[11] = 0; 2055 outbuf[12] = 0; 2056 outbuf[13] = get_physical_block_exp(&s->qdev.conf); 2057 2058 /* set TPE bit if the format supports discard */ 2059 if (s->qdev.conf.discard_granularity) { 2060 outbuf[14] = 0x80; 2061 } 2062 2063 /* Protection, exponent and lowest lba field left blank. */ 2064 break; 2065 } 2066 trace_scsi_disk_emulate_command_SAI_unsupported(); 2067 goto illegal_request; 2068 case SYNCHRONIZE_CACHE: 2069 /* The request is used as the AIO opaque value, so add a ref. */ 2070 scsi_req_ref(&r->req); 2071 block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0, 2072 BLOCK_ACCT_FLUSH); 2073 r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r); 2074 return 0; 2075 case SEEK_10: 2076 trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba); 2077 if (r->req.cmd.lba > s->qdev.max_lba) { 2078 goto illegal_lba; 2079 } 2080 break; 2081 case MODE_SELECT: 2082 trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer); 2083 break; 2084 case MODE_SELECT_10: 2085 trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer); 2086 break; 2087 case UNMAP: 2088 trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer); 2089 break; 2090 case VERIFY_10: 2091 case VERIFY_12: 2092 case VERIFY_16: 2093 trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3); 2094 if (req->cmd.buf[1] & 6) { 2095 goto illegal_request; 2096 } 2097 break; 2098 case WRITE_SAME_10: 2099 case WRITE_SAME_16: 2100 trace_scsi_disk_emulate_command_WRITE_SAME( 2101 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer); 2102 break; 2103 default: 2104 trace_scsi_disk_emulate_command_UNKNOWN(buf[0], 2105 scsi_command_name(buf[0])); 2106 scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE)); 2107 return 0; 2108 } 2109 assert(!r->req.aiocb); 2110 r->iov.iov_len = MIN(r->buflen, req->cmd.xfer); 2111 if (r->iov.iov_len == 0) { 2112 scsi_req_complete(&r->req, GOOD); 2113 } 2114 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2115 assert(r->iov.iov_len == req->cmd.xfer); 2116 return -r->iov.iov_len; 2117 } else { 2118 return r->iov.iov_len; 2119 } 2120 2121 illegal_request: 2122 if (r->req.status == -1) { 2123 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2124 } 2125 return 0; 2126 2127 illegal_lba: 2128 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2129 return 0; 2130 } 2131 2132 /* Execute a scsi command. Returns the length of the data expected by the 2133 command. This will be Positive for data transfers from the device 2134 (eg. disk reads), negative for transfers to the device (eg. disk writes), 2135 and zero if the command does not transfer any data. */ 2136 2137 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf) 2138 { 2139 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2140 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2141 SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); 2142 uint32_t len; 2143 uint8_t command; 2144 2145 command = buf[0]; 2146 2147 if (!blk_is_available(s->qdev.conf.blk)) { 2148 scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); 2149 return 0; 2150 } 2151 2152 len = scsi_data_cdb_xfer(r->req.cmd.buf); 2153 switch (command) { 2154 case READ_6: 2155 case READ_10: 2156 case READ_12: 2157 case READ_16: 2158 trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len); 2159 /* Protection information is not supported. For SCSI versions 2 and 2160 * older (as determined by snooping the guest's INQUIRY commands), 2161 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2162 */ 2163 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2164 goto illegal_request; 2165 } 2166 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2167 goto illegal_lba; 2168 } 2169 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2170 r->sector_count = len * (s->qdev.blocksize / 512); 2171 break; 2172 case WRITE_6: 2173 case WRITE_10: 2174 case WRITE_12: 2175 case WRITE_16: 2176 case WRITE_VERIFY_10: 2177 case WRITE_VERIFY_12: 2178 case WRITE_VERIFY_16: 2179 if (blk_is_read_only(s->qdev.conf.blk)) { 2180 scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED)); 2181 return 0; 2182 } 2183 trace_scsi_disk_dma_command_WRITE( 2184 (command & 0xe) == 0xe ? "And Verify " : "", 2185 r->req.cmd.lba, len); 2186 /* fall through */ 2187 case VERIFY_10: 2188 case VERIFY_12: 2189 case VERIFY_16: 2190 /* We get here only for BYTCHK == 0x01 and only for scsi-block. 2191 * As far as DMA is concerned, we can treat it the same as a write; 2192 * scsi_block_do_sgio will send VERIFY commands. 2193 */ 2194 if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) { 2195 goto illegal_request; 2196 } 2197 if (!check_lba_range(s, r->req.cmd.lba, len)) { 2198 goto illegal_lba; 2199 } 2200 r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512); 2201 r->sector_count = len * (s->qdev.blocksize / 512); 2202 break; 2203 default: 2204 abort(); 2205 illegal_request: 2206 scsi_check_condition(r, SENSE_CODE(INVALID_FIELD)); 2207 return 0; 2208 illegal_lba: 2209 scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE)); 2210 return 0; 2211 } 2212 r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd); 2213 if (r->sector_count == 0) { 2214 scsi_req_complete(&r->req, GOOD); 2215 } 2216 assert(r->iov.iov_len == 0); 2217 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 2218 return -r->sector_count * 512; 2219 } else { 2220 return r->sector_count * 512; 2221 } 2222 } 2223 2224 static void scsi_disk_reset(DeviceState *dev) 2225 { 2226 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev); 2227 uint64_t nb_sectors; 2228 2229 scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET)); 2230 2231 blk_get_geometry(s->qdev.conf.blk, &nb_sectors); 2232 nb_sectors /= s->qdev.blocksize / 512; 2233 if (nb_sectors) { 2234 nb_sectors--; 2235 } 2236 s->qdev.max_lba = nb_sectors; 2237 /* reset tray statuses */ 2238 s->tray_locked = 0; 2239 s->tray_open = 0; 2240 2241 s->qdev.scsi_version = s->qdev.default_scsi_version; 2242 } 2243 2244 static void scsi_disk_resize_cb(void *opaque) 2245 { 2246 SCSIDiskState *s = opaque; 2247 2248 /* SPC lists this sense code as available only for 2249 * direct-access devices. 2250 */ 2251 if (s->qdev.type == TYPE_DISK) { 2252 scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED)); 2253 } 2254 } 2255 2256 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) 2257 { 2258 SCSIDiskState *s = opaque; 2259 2260 /* 2261 * When a CD gets changed, we have to report an ejected state and 2262 * then a loaded state to guests so that they detect tray 2263 * open/close and media change events. Guests that do not use 2264 * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close 2265 * states rely on this behavior. 2266 * 2267 * media_changed governs the state machine used for unit attention 2268 * report. media_event is used by GET EVENT STATUS NOTIFICATION. 2269 */ 2270 s->media_changed = load; 2271 s->tray_open = !load; 2272 scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM)); 2273 s->media_event = true; 2274 s->eject_request = false; 2275 } 2276 2277 static void scsi_cd_eject_request_cb(void *opaque, bool force) 2278 { 2279 SCSIDiskState *s = opaque; 2280 2281 s->eject_request = true; 2282 if (force) { 2283 s->tray_locked = false; 2284 } 2285 } 2286 2287 static bool scsi_cd_is_tray_open(void *opaque) 2288 { 2289 return ((SCSIDiskState *)opaque)->tray_open; 2290 } 2291 2292 static bool scsi_cd_is_medium_locked(void *opaque) 2293 { 2294 return ((SCSIDiskState *)opaque)->tray_locked; 2295 } 2296 2297 static const BlockDevOps scsi_disk_removable_block_ops = { 2298 .change_media_cb = scsi_cd_change_media_cb, 2299 .eject_request_cb = scsi_cd_eject_request_cb, 2300 .is_tray_open = scsi_cd_is_tray_open, 2301 .is_medium_locked = scsi_cd_is_medium_locked, 2302 2303 .resize_cb = scsi_disk_resize_cb, 2304 }; 2305 2306 static const BlockDevOps scsi_disk_block_ops = { 2307 .resize_cb = scsi_disk_resize_cb, 2308 }; 2309 2310 static void scsi_disk_unit_attention_reported(SCSIDevice *dev) 2311 { 2312 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2313 if (s->media_changed) { 2314 s->media_changed = false; 2315 scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED)); 2316 } 2317 } 2318 2319 static void scsi_realize(SCSIDevice *dev, Error **errp) 2320 { 2321 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2322 bool read_only; 2323 2324 if (!s->qdev.conf.blk) { 2325 error_setg(errp, "drive property not set"); 2326 return; 2327 } 2328 2329 if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2330 !blk_is_inserted(s->qdev.conf.blk)) { 2331 error_setg(errp, "Device needs media, but drive is empty"); 2332 return; 2333 } 2334 2335 blkconf_blocksizes(&s->qdev.conf); 2336 2337 if (s->qdev.conf.logical_block_size > 2338 s->qdev.conf.physical_block_size) { 2339 error_setg(errp, 2340 "logical_block_size > physical_block_size not supported"); 2341 return; 2342 } 2343 2344 if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && 2345 !s->qdev.hba_supports_iothread) 2346 { 2347 error_setg(errp, "HBA does not support iothreads"); 2348 return; 2349 } 2350 2351 if (dev->type == TYPE_DISK) { 2352 if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { 2353 return; 2354 } 2355 } 2356 2357 read_only = blk_is_read_only(s->qdev.conf.blk); 2358 if (dev->type == TYPE_ROM) { 2359 read_only = true; 2360 } 2361 2362 if (!blkconf_apply_backend_options(&dev->conf, read_only, 2363 dev->type == TYPE_DISK, errp)) { 2364 return; 2365 } 2366 2367 if (s->qdev.conf.discard_granularity == -1) { 2368 s->qdev.conf.discard_granularity = 2369 MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY); 2370 } 2371 2372 if (!s->version) { 2373 s->version = g_strdup(qemu_hw_version()); 2374 } 2375 if (!s->vendor) { 2376 s->vendor = g_strdup("QEMU"); 2377 } 2378 if (!s->device_id) { 2379 if (s->serial) { 2380 s->device_id = g_strdup_printf("%.20s", s->serial); 2381 } else { 2382 const char *str = blk_name(s->qdev.conf.blk); 2383 if (str && *str) { 2384 s->device_id = g_strdup(str); 2385 } 2386 } 2387 } 2388 2389 if (blk_is_sg(s->qdev.conf.blk)) { 2390 error_setg(errp, "unwanted /dev/sg*"); 2391 return; 2392 } 2393 2394 if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && 2395 !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) { 2396 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s); 2397 } else { 2398 blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s); 2399 } 2400 blk_set_guest_block_size(s->qdev.conf.blk, s->qdev.blocksize); 2401 2402 blk_iostatus_enable(s->qdev.conf.blk); 2403 } 2404 2405 static void scsi_hd_realize(SCSIDevice *dev, Error **errp) 2406 { 2407 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2408 AioContext *ctx = NULL; 2409 /* can happen for devices without drive. The error message for missing 2410 * backend will be issued in scsi_realize 2411 */ 2412 if (s->qdev.conf.blk) { 2413 ctx = blk_get_aio_context(s->qdev.conf.blk); 2414 aio_context_acquire(ctx); 2415 blkconf_blocksizes(&s->qdev.conf); 2416 } 2417 s->qdev.blocksize = s->qdev.conf.logical_block_size; 2418 s->qdev.type = TYPE_DISK; 2419 if (!s->product) { 2420 s->product = g_strdup("QEMU HARDDISK"); 2421 } 2422 scsi_realize(&s->qdev, errp); 2423 if (ctx) { 2424 aio_context_release(ctx); 2425 } 2426 } 2427 2428 static void scsi_cd_realize(SCSIDevice *dev, Error **errp) 2429 { 2430 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2431 AioContext *ctx; 2432 int ret; 2433 2434 if (!dev->conf.blk) { 2435 /* Anonymous BlockBackend for an empty drive. As we put it into 2436 * dev->conf, qdev takes care of detaching on unplug. */ 2437 dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); 2438 ret = blk_attach_dev(dev->conf.blk, &dev->qdev); 2439 assert(ret == 0); 2440 } 2441 2442 ctx = blk_get_aio_context(dev->conf.blk); 2443 aio_context_acquire(ctx); 2444 s->qdev.blocksize = 2048; 2445 s->qdev.type = TYPE_ROM; 2446 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2447 if (!s->product) { 2448 s->product = g_strdup("QEMU CD-ROM"); 2449 } 2450 scsi_realize(&s->qdev, errp); 2451 aio_context_release(ctx); 2452 } 2453 2454 static void scsi_disk_realize(SCSIDevice *dev, Error **errp) 2455 { 2456 DriveInfo *dinfo; 2457 Error *local_err = NULL; 2458 2459 if (!dev->conf.blk) { 2460 scsi_realize(dev, &local_err); 2461 assert(local_err); 2462 error_propagate(errp, local_err); 2463 return; 2464 } 2465 2466 dinfo = blk_legacy_dinfo(dev->conf.blk); 2467 if (dinfo && dinfo->media_cd) { 2468 scsi_cd_realize(dev, errp); 2469 } else { 2470 scsi_hd_realize(dev, errp); 2471 } 2472 } 2473 2474 static const SCSIReqOps scsi_disk_emulate_reqops = { 2475 .size = sizeof(SCSIDiskReq), 2476 .free_req = scsi_free_request, 2477 .send_command = scsi_disk_emulate_command, 2478 .read_data = scsi_disk_emulate_read_data, 2479 .write_data = scsi_disk_emulate_write_data, 2480 .get_buf = scsi_get_buf, 2481 }; 2482 2483 static const SCSIReqOps scsi_disk_dma_reqops = { 2484 .size = sizeof(SCSIDiskReq), 2485 .free_req = scsi_free_request, 2486 .send_command = scsi_disk_dma_command, 2487 .read_data = scsi_read_data, 2488 .write_data = scsi_write_data, 2489 .get_buf = scsi_get_buf, 2490 .load_request = scsi_disk_load_request, 2491 .save_request = scsi_disk_save_request, 2492 }; 2493 2494 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = { 2495 [TEST_UNIT_READY] = &scsi_disk_emulate_reqops, 2496 [INQUIRY] = &scsi_disk_emulate_reqops, 2497 [MODE_SENSE] = &scsi_disk_emulate_reqops, 2498 [MODE_SENSE_10] = &scsi_disk_emulate_reqops, 2499 [START_STOP] = &scsi_disk_emulate_reqops, 2500 [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops, 2501 [READ_CAPACITY_10] = &scsi_disk_emulate_reqops, 2502 [READ_TOC] = &scsi_disk_emulate_reqops, 2503 [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops, 2504 [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops, 2505 [GET_CONFIGURATION] = &scsi_disk_emulate_reqops, 2506 [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops, 2507 [MECHANISM_STATUS] = &scsi_disk_emulate_reqops, 2508 [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops, 2509 [REQUEST_SENSE] = &scsi_disk_emulate_reqops, 2510 [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops, 2511 [SEEK_10] = &scsi_disk_emulate_reqops, 2512 [MODE_SELECT] = &scsi_disk_emulate_reqops, 2513 [MODE_SELECT_10] = &scsi_disk_emulate_reqops, 2514 [UNMAP] = &scsi_disk_emulate_reqops, 2515 [WRITE_SAME_10] = &scsi_disk_emulate_reqops, 2516 [WRITE_SAME_16] = &scsi_disk_emulate_reqops, 2517 [VERIFY_10] = &scsi_disk_emulate_reqops, 2518 [VERIFY_12] = &scsi_disk_emulate_reqops, 2519 [VERIFY_16] = &scsi_disk_emulate_reqops, 2520 2521 [READ_6] = &scsi_disk_dma_reqops, 2522 [READ_10] = &scsi_disk_dma_reqops, 2523 [READ_12] = &scsi_disk_dma_reqops, 2524 [READ_16] = &scsi_disk_dma_reqops, 2525 [WRITE_6] = &scsi_disk_dma_reqops, 2526 [WRITE_10] = &scsi_disk_dma_reqops, 2527 [WRITE_12] = &scsi_disk_dma_reqops, 2528 [WRITE_16] = &scsi_disk_dma_reqops, 2529 [WRITE_VERIFY_10] = &scsi_disk_dma_reqops, 2530 [WRITE_VERIFY_12] = &scsi_disk_dma_reqops, 2531 [WRITE_VERIFY_16] = &scsi_disk_dma_reqops, 2532 }; 2533 2534 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf) 2535 { 2536 int i; 2537 int len = scsi_cdb_length(buf); 2538 char *line_buffer, *p; 2539 2540 line_buffer = g_malloc(len * 5 + 1); 2541 2542 for (i = 0, p = line_buffer; i < len; i++) { 2543 p += sprintf(p, " 0x%02x", buf[i]); 2544 } 2545 trace_scsi_disk_new_request(lun, tag, line_buffer); 2546 2547 g_free(line_buffer); 2548 } 2549 2550 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 2551 uint8_t *buf, void *hba_private) 2552 { 2553 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2554 SCSIRequest *req; 2555 const SCSIReqOps *ops; 2556 uint8_t command; 2557 2558 command = buf[0]; 2559 ops = scsi_disk_reqops_dispatch[command]; 2560 if (!ops) { 2561 ops = &scsi_disk_emulate_reqops; 2562 } 2563 req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private); 2564 2565 if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) { 2566 scsi_disk_new_request_dump(lun, tag, buf); 2567 } 2568 2569 return req; 2570 } 2571 2572 #ifdef __linux__ 2573 static int get_device_type(SCSIDiskState *s) 2574 { 2575 uint8_t cmd[16]; 2576 uint8_t buf[36]; 2577 int ret; 2578 2579 memset(cmd, 0, sizeof(cmd)); 2580 memset(buf, 0, sizeof(buf)); 2581 cmd[0] = INQUIRY; 2582 cmd[4] = sizeof(buf); 2583 2584 ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd), 2585 buf, sizeof(buf)); 2586 if (ret < 0) { 2587 return -1; 2588 } 2589 s->qdev.type = buf[0]; 2590 if (buf[1] & 0x80) { 2591 s->features |= 1 << SCSI_DISK_F_REMOVABLE; 2592 } 2593 return 0; 2594 } 2595 2596 static void scsi_block_realize(SCSIDevice *dev, Error **errp) 2597 { 2598 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev); 2599 AioContext *ctx; 2600 int sg_version; 2601 int rc; 2602 2603 if (!s->qdev.conf.blk) { 2604 error_setg(errp, "drive property not set"); 2605 return; 2606 } 2607 2608 if (s->rotation_rate) { 2609 error_report_once("rotation_rate is specified for scsi-block but is " 2610 "not implemented. This option is deprecated and will " 2611 "be removed in a future version"); 2612 } 2613 2614 ctx = blk_get_aio_context(s->qdev.conf.blk); 2615 aio_context_acquire(ctx); 2616 2617 /* check we are using a driver managing SG_IO (version 3 and after) */ 2618 rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version); 2619 if (rc < 0) { 2620 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 2621 if (rc != -EPERM) { 2622 error_append_hint(errp, "Is this a SCSI device?\n"); 2623 } 2624 goto out; 2625 } 2626 if (sg_version < 30000) { 2627 error_setg(errp, "scsi generic interface too old"); 2628 goto out; 2629 } 2630 2631 /* get device type from INQUIRY data */ 2632 rc = get_device_type(s); 2633 if (rc < 0) { 2634 error_setg(errp, "INQUIRY failed"); 2635 goto out; 2636 } 2637 2638 /* Make a guess for the block size, we'll fix it when the guest sends. 2639 * READ CAPACITY. If they don't, they likely would assume these sizes 2640 * anyway. (TODO: check in /sys). 2641 */ 2642 if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) { 2643 s->qdev.blocksize = 2048; 2644 } else { 2645 s->qdev.blocksize = 512; 2646 } 2647 2648 /* Makes the scsi-block device not removable by using HMP and QMP eject 2649 * command. 2650 */ 2651 s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS); 2652 2653 scsi_realize(&s->qdev, errp); 2654 scsi_generic_read_device_inquiry(&s->qdev); 2655 2656 out: 2657 aio_context_release(ctx); 2658 } 2659 2660 typedef struct SCSIBlockReq { 2661 SCSIDiskReq req; 2662 sg_io_hdr_t io_header; 2663 2664 /* Selected bytes of the original CDB, copied into our own CDB. */ 2665 uint8_t cmd, cdb1, group_number; 2666 2667 /* CDB passed to SG_IO. */ 2668 uint8_t cdb[16]; 2669 } SCSIBlockReq; 2670 2671 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req, 2672 int64_t offset, QEMUIOVector *iov, 2673 int direction, 2674 BlockCompletionFunc *cb, void *opaque) 2675 { 2676 sg_io_hdr_t *io_header = &req->io_header; 2677 SCSIDiskReq *r = &req->req; 2678 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2679 int nb_logical_blocks; 2680 uint64_t lba; 2681 BlockAIOCB *aiocb; 2682 2683 /* This is not supported yet. It can only happen if the guest does 2684 * reads and writes that are not aligned to one logical sectors 2685 * _and_ cover multiple MemoryRegions. 2686 */ 2687 assert(offset % s->qdev.blocksize == 0); 2688 assert(iov->size % s->qdev.blocksize == 0); 2689 2690 io_header->interface_id = 'S'; 2691 2692 /* The data transfer comes from the QEMUIOVector. */ 2693 io_header->dxfer_direction = direction; 2694 io_header->dxfer_len = iov->size; 2695 io_header->dxferp = (void *)iov->iov; 2696 io_header->iovec_count = iov->niov; 2697 assert(io_header->iovec_count == iov->niov); /* no overflow! */ 2698 2699 /* Build a new CDB with the LBA and length patched in, in case 2700 * DMA helpers split the transfer in multiple segments. Do not 2701 * build a CDB smaller than what the guest wanted, and only build 2702 * a larger one if strictly necessary. 2703 */ 2704 io_header->cmdp = req->cdb; 2705 lba = offset / s->qdev.blocksize; 2706 nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize; 2707 2708 if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) { 2709 /* 6-byte CDB */ 2710 stl_be_p(&req->cdb[0], lba | (req->cmd << 24)); 2711 req->cdb[4] = nb_logical_blocks; 2712 req->cdb[5] = 0; 2713 io_header->cmd_len = 6; 2714 } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) { 2715 /* 10-byte CDB */ 2716 req->cdb[0] = (req->cmd & 0x1f) | 0x20; 2717 req->cdb[1] = req->cdb1; 2718 stl_be_p(&req->cdb[2], lba); 2719 req->cdb[6] = req->group_number; 2720 stw_be_p(&req->cdb[7], nb_logical_blocks); 2721 req->cdb[9] = 0; 2722 io_header->cmd_len = 10; 2723 } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) { 2724 /* 12-byte CDB */ 2725 req->cdb[0] = (req->cmd & 0x1f) | 0xA0; 2726 req->cdb[1] = req->cdb1; 2727 stl_be_p(&req->cdb[2], lba); 2728 stl_be_p(&req->cdb[6], nb_logical_blocks); 2729 req->cdb[10] = req->group_number; 2730 req->cdb[11] = 0; 2731 io_header->cmd_len = 12; 2732 } else { 2733 /* 16-byte CDB */ 2734 req->cdb[0] = (req->cmd & 0x1f) | 0x80; 2735 req->cdb[1] = req->cdb1; 2736 stq_be_p(&req->cdb[2], lba); 2737 stl_be_p(&req->cdb[10], nb_logical_blocks); 2738 req->cdb[14] = req->group_number; 2739 req->cdb[15] = 0; 2740 io_header->cmd_len = 16; 2741 } 2742 2743 /* The rest is as in scsi-generic.c. */ 2744 io_header->mx_sb_len = sizeof(r->req.sense); 2745 io_header->sbp = r->req.sense; 2746 io_header->timeout = UINT_MAX; 2747 io_header->usr_ptr = r; 2748 io_header->flags |= SG_FLAG_DIRECT_IO; 2749 2750 aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, cb, opaque); 2751 assert(aiocb != NULL); 2752 return aiocb; 2753 } 2754 2755 static bool scsi_block_no_fua(SCSICommand *cmd) 2756 { 2757 return false; 2758 } 2759 2760 static BlockAIOCB *scsi_block_dma_readv(int64_t offset, 2761 QEMUIOVector *iov, 2762 BlockCompletionFunc *cb, void *cb_opaque, 2763 void *opaque) 2764 { 2765 SCSIBlockReq *r = opaque; 2766 return scsi_block_do_sgio(r, offset, iov, 2767 SG_DXFER_FROM_DEV, cb, cb_opaque); 2768 } 2769 2770 static BlockAIOCB *scsi_block_dma_writev(int64_t offset, 2771 QEMUIOVector *iov, 2772 BlockCompletionFunc *cb, void *cb_opaque, 2773 void *opaque) 2774 { 2775 SCSIBlockReq *r = opaque; 2776 return scsi_block_do_sgio(r, offset, iov, 2777 SG_DXFER_TO_DEV, cb, cb_opaque); 2778 } 2779 2780 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf) 2781 { 2782 switch (buf[0]) { 2783 case VERIFY_10: 2784 case VERIFY_12: 2785 case VERIFY_16: 2786 /* Check if BYTCHK == 0x01 (data-out buffer contains data 2787 * for the number of logical blocks specified in the length 2788 * field). For other modes, do not use scatter/gather operation. 2789 */ 2790 if ((buf[1] & 6) == 2) { 2791 return false; 2792 } 2793 break; 2794 2795 case READ_6: 2796 case READ_10: 2797 case READ_12: 2798 case READ_16: 2799 case WRITE_6: 2800 case WRITE_10: 2801 case WRITE_12: 2802 case WRITE_16: 2803 case WRITE_VERIFY_10: 2804 case WRITE_VERIFY_12: 2805 case WRITE_VERIFY_16: 2806 /* MMC writing cannot be done via DMA helpers, because it sometimes 2807 * involves writing beyond the maximum LBA or to negative LBA (lead-in). 2808 * We might use scsi_block_dma_reqops as long as no writing commands are 2809 * seen, but performance usually isn't paramount on optical media. So, 2810 * just make scsi-block operate the same as scsi-generic for them. 2811 */ 2812 if (s->qdev.type != TYPE_ROM) { 2813 return false; 2814 } 2815 break; 2816 2817 default: 2818 break; 2819 } 2820 2821 return true; 2822 } 2823 2824 2825 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf) 2826 { 2827 SCSIBlockReq *r = (SCSIBlockReq *)req; 2828 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev); 2829 2830 r->cmd = req->cmd.buf[0]; 2831 switch (r->cmd >> 5) { 2832 case 0: 2833 /* 6-byte CDB. */ 2834 r->cdb1 = r->group_number = 0; 2835 break; 2836 case 1: 2837 /* 10-byte CDB. */ 2838 r->cdb1 = req->cmd.buf[1]; 2839 r->group_number = req->cmd.buf[6]; 2840 break; 2841 case 4: 2842 /* 12-byte CDB. */ 2843 r->cdb1 = req->cmd.buf[1]; 2844 r->group_number = req->cmd.buf[10]; 2845 break; 2846 case 5: 2847 /* 16-byte CDB. */ 2848 r->cdb1 = req->cmd.buf[1]; 2849 r->group_number = req->cmd.buf[14]; 2850 break; 2851 default: 2852 abort(); 2853 } 2854 2855 /* Protection information is not supported. For SCSI versions 2 and 2856 * older (as determined by snooping the guest's INQUIRY commands), 2857 * there is no RD/WR/VRPROTECT, so skip this check in these versions. 2858 */ 2859 if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) { 2860 scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD)); 2861 return 0; 2862 } 2863 2864 r->req.status = &r->io_header.status; 2865 return scsi_disk_dma_command(req, buf); 2866 } 2867 2868 static const SCSIReqOps scsi_block_dma_reqops = { 2869 .size = sizeof(SCSIBlockReq), 2870 .free_req = scsi_free_request, 2871 .send_command = scsi_block_dma_command, 2872 .read_data = scsi_read_data, 2873 .write_data = scsi_write_data, 2874 .get_buf = scsi_get_buf, 2875 .load_request = scsi_disk_load_request, 2876 .save_request = scsi_disk_save_request, 2877 }; 2878 2879 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag, 2880 uint32_t lun, uint8_t *buf, 2881 void *hba_private) 2882 { 2883 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2884 2885 if (scsi_block_is_passthrough(s, buf)) { 2886 return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun, 2887 hba_private); 2888 } else { 2889 return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun, 2890 hba_private); 2891 } 2892 } 2893 2894 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd, 2895 uint8_t *buf, void *hba_private) 2896 { 2897 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d); 2898 2899 if (scsi_block_is_passthrough(s, buf)) { 2900 return scsi_bus_parse_cdb(&s->qdev, cmd, buf, hba_private); 2901 } else { 2902 return scsi_req_parse_cdb(&s->qdev, cmd, buf); 2903 } 2904 } 2905 2906 static void scsi_block_update_sense(SCSIRequest *req) 2907 { 2908 SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req); 2909 SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r); 2910 r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense)); 2911 } 2912 #endif 2913 2914 static 2915 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov, 2916 BlockCompletionFunc *cb, void *cb_opaque, 2917 void *opaque) 2918 { 2919 SCSIDiskReq *r = opaque; 2920 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2921 return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2922 } 2923 2924 static 2925 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov, 2926 BlockCompletionFunc *cb, void *cb_opaque, 2927 void *opaque) 2928 { 2929 SCSIDiskReq *r = opaque; 2930 SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); 2931 return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque); 2932 } 2933 2934 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data) 2935 { 2936 DeviceClass *dc = DEVICE_CLASS(klass); 2937 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 2938 2939 dc->fw_name = "disk"; 2940 dc->reset = scsi_disk_reset; 2941 sdc->dma_readv = scsi_dma_readv; 2942 sdc->dma_writev = scsi_dma_writev; 2943 sdc->need_fua_emulation = scsi_is_cmd_fua; 2944 } 2945 2946 static const TypeInfo scsi_disk_base_info = { 2947 .name = TYPE_SCSI_DISK_BASE, 2948 .parent = TYPE_SCSI_DEVICE, 2949 .class_init = scsi_disk_base_class_initfn, 2950 .instance_size = sizeof(SCSIDiskState), 2951 .class_size = sizeof(SCSIDiskClass), 2952 .abstract = true, 2953 }; 2954 2955 #define DEFINE_SCSI_DISK_PROPERTIES() \ 2956 DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ 2957 DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ 2958 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 2959 DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ 2960 DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ 2961 DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ 2962 DEFINE_PROP_STRING("product", SCSIDiskState, product), \ 2963 DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) 2964 2965 2966 static Property scsi_hd_properties[] = { 2967 DEFINE_SCSI_DISK_PROPERTIES(), 2968 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 2969 SCSI_DISK_F_REMOVABLE, false), 2970 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 2971 SCSI_DISK_F_DPOFUA, false), 2972 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 2973 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 2974 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 2975 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 2976 DEFAULT_MAX_UNMAP_SIZE), 2977 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 2978 DEFAULT_MAX_IO_SIZE), 2979 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 2980 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 2981 5), 2982 DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf), 2983 DEFINE_PROP_END_OF_LIST(), 2984 }; 2985 2986 static const VMStateDescription vmstate_scsi_disk_state = { 2987 .name = "scsi-disk", 2988 .version_id = 1, 2989 .minimum_version_id = 1, 2990 .fields = (VMStateField[]) { 2991 VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState), 2992 VMSTATE_BOOL(media_changed, SCSIDiskState), 2993 VMSTATE_BOOL(media_event, SCSIDiskState), 2994 VMSTATE_BOOL(eject_request, SCSIDiskState), 2995 VMSTATE_BOOL(tray_open, SCSIDiskState), 2996 VMSTATE_BOOL(tray_locked, SCSIDiskState), 2997 VMSTATE_END_OF_LIST() 2998 } 2999 }; 3000 3001 static void scsi_hd_class_initfn(ObjectClass *klass, void *data) 3002 { 3003 DeviceClass *dc = DEVICE_CLASS(klass); 3004 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3005 3006 sc->realize = scsi_hd_realize; 3007 sc->alloc_req = scsi_new_request; 3008 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3009 dc->desc = "virtual SCSI disk"; 3010 dc->props = scsi_hd_properties; 3011 dc->vmsd = &vmstate_scsi_disk_state; 3012 } 3013 3014 static const TypeInfo scsi_hd_info = { 3015 .name = "scsi-hd", 3016 .parent = TYPE_SCSI_DISK_BASE, 3017 .class_init = scsi_hd_class_initfn, 3018 }; 3019 3020 static Property scsi_cd_properties[] = { 3021 DEFINE_SCSI_DISK_PROPERTIES(), 3022 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3023 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3024 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3025 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3026 DEFAULT_MAX_IO_SIZE), 3027 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3028 5), 3029 DEFINE_PROP_END_OF_LIST(), 3030 }; 3031 3032 static void scsi_cd_class_initfn(ObjectClass *klass, void *data) 3033 { 3034 DeviceClass *dc = DEVICE_CLASS(klass); 3035 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3036 3037 sc->realize = scsi_cd_realize; 3038 sc->alloc_req = scsi_new_request; 3039 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3040 dc->desc = "virtual SCSI CD-ROM"; 3041 dc->props = scsi_cd_properties; 3042 dc->vmsd = &vmstate_scsi_disk_state; 3043 } 3044 3045 static const TypeInfo scsi_cd_info = { 3046 .name = "scsi-cd", 3047 .parent = TYPE_SCSI_DISK_BASE, 3048 .class_init = scsi_cd_class_initfn, 3049 }; 3050 3051 #ifdef __linux__ 3052 static Property scsi_block_properties[] = { 3053 DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ 3054 DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), 3055 DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), 3056 DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), 3057 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3058 DEFAULT_MAX_UNMAP_SIZE), 3059 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3060 DEFAULT_MAX_IO_SIZE), 3061 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3062 -1), 3063 DEFINE_PROP_END_OF_LIST(), 3064 }; 3065 3066 static void scsi_block_class_initfn(ObjectClass *klass, void *data) 3067 { 3068 DeviceClass *dc = DEVICE_CLASS(klass); 3069 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3070 SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass); 3071 3072 sc->realize = scsi_block_realize; 3073 sc->alloc_req = scsi_block_new_request; 3074 sc->parse_cdb = scsi_block_parse_cdb; 3075 sdc->dma_readv = scsi_block_dma_readv; 3076 sdc->dma_writev = scsi_block_dma_writev; 3077 sdc->update_sense = scsi_block_update_sense; 3078 sdc->need_fua_emulation = scsi_block_no_fua; 3079 dc->desc = "SCSI block device passthrough"; 3080 dc->props = scsi_block_properties; 3081 dc->vmsd = &vmstate_scsi_disk_state; 3082 } 3083 3084 static const TypeInfo scsi_block_info = { 3085 .name = "scsi-block", 3086 .parent = TYPE_SCSI_DISK_BASE, 3087 .class_init = scsi_block_class_initfn, 3088 }; 3089 #endif 3090 3091 static Property scsi_disk_properties[] = { 3092 DEFINE_SCSI_DISK_PROPERTIES(), 3093 DEFINE_PROP_BIT("removable", SCSIDiskState, features, 3094 SCSI_DISK_F_REMOVABLE, false), 3095 DEFINE_PROP_BIT("dpofua", SCSIDiskState, features, 3096 SCSI_DISK_F_DPOFUA, false), 3097 DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0), 3098 DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0), 3099 DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0), 3100 DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, 3101 DEFAULT_MAX_UNMAP_SIZE), 3102 DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size, 3103 DEFAULT_MAX_IO_SIZE), 3104 DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version, 3105 5), 3106 DEFINE_PROP_END_OF_LIST(), 3107 }; 3108 3109 static void scsi_disk_class_initfn(ObjectClass *klass, void *data) 3110 { 3111 DeviceClass *dc = DEVICE_CLASS(klass); 3112 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 3113 3114 sc->realize = scsi_disk_realize; 3115 sc->alloc_req = scsi_new_request; 3116 sc->unit_attention_reported = scsi_disk_unit_attention_reported; 3117 dc->fw_name = "disk"; 3118 dc->desc = "virtual SCSI disk or CD-ROM (legacy)"; 3119 dc->reset = scsi_disk_reset; 3120 dc->props = scsi_disk_properties; 3121 dc->vmsd = &vmstate_scsi_disk_state; 3122 } 3123 3124 static const TypeInfo scsi_disk_info = { 3125 .name = "scsi-disk", 3126 .parent = TYPE_SCSI_DISK_BASE, 3127 .class_init = scsi_disk_class_initfn, 3128 }; 3129 3130 static void scsi_disk_register_types(void) 3131 { 3132 type_register_static(&scsi_disk_base_info); 3133 type_register_static(&scsi_hd_info); 3134 type_register_static(&scsi_cd_info); 3135 #ifdef __linux__ 3136 type_register_static(&scsi_block_info); 3137 #endif 3138 type_register_static(&scsi_disk_info); 3139 } 3140 3141 type_init(scsi_disk_register_types) 3142