1 #include "qemu/osdep.h" 2 #include "qapi/error.h" 3 #include "qemu/error-report.h" 4 #include "qemu/module.h" 5 #include "qemu/option.h" 6 #include "qemu/hw-version.h" 7 #include "hw/qdev-properties.h" 8 #include "hw/scsi/scsi.h" 9 #include "migration/qemu-file-types.h" 10 #include "migration/vmstate.h" 11 #include "scsi/constants.h" 12 #include "sysemu/block-backend.h" 13 #include "sysemu/blockdev.h" 14 #include "sysemu/sysemu.h" 15 #include "sysemu/runstate.h" 16 #include "trace.h" 17 #include "sysemu/dma.h" 18 #include "qemu/cutils.h" 19 20 static char *scsibus_get_dev_path(DeviceState *dev); 21 static char *scsibus_get_fw_dev_path(DeviceState *dev); 22 static void scsi_req_dequeue(SCSIRequest *req); 23 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len); 24 static void scsi_target_free_buf(SCSIRequest *req); 25 static void scsi_clear_reported_luns_changed(SCSIRequest *req); 26 27 static int next_scsi_bus; 28 29 static SCSIDevice *do_scsi_device_find(SCSIBus *bus, 30 int channel, int id, int lun, 31 bool include_unrealized) 32 { 33 BusChild *kid; 34 SCSIDevice *retval = NULL; 35 36 QTAILQ_FOREACH_RCU(kid, &bus->qbus.children, sibling) { 37 DeviceState *qdev = kid->child; 38 SCSIDevice *dev = SCSI_DEVICE(qdev); 39 40 if (dev->channel == channel && dev->id == id) { 41 if (dev->lun == lun) { 42 retval = dev; 43 break; 44 } 45 46 /* 47 * If we don't find exact match (channel/bus/lun), 48 * we will return the first device which matches channel/bus 49 */ 50 51 if (!retval) { 52 retval = dev; 53 } 54 } 55 } 56 57 /* 58 * This function might run on the IO thread and we might race against 59 * main thread hot-plugging the device. 60 * We assume that as soon as .realized is set to true we can let 61 * the user access the device. 62 */ 63 64 if (retval && !include_unrealized && !qdev_is_realized(&retval->qdev)) { 65 retval = NULL; 66 } 67 68 return retval; 69 } 70 71 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun) 72 { 73 RCU_READ_LOCK_GUARD(); 74 return do_scsi_device_find(bus, channel, id, lun, false); 75 } 76 77 SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun) 78 { 79 SCSIDevice *d; 80 RCU_READ_LOCK_GUARD(); 81 d = do_scsi_device_find(bus, channel, id, lun, false); 82 if (d) { 83 object_ref(d); 84 } 85 return d; 86 } 87 88 /* 89 * Invoke @fn() for each enqueued request in device @s. Must be called from the 90 * main loop thread while the guest is stopped. This is only suitable for 91 * vmstate ->put(), use scsi_device_for_each_req_async() for other cases. 92 */ 93 static void scsi_device_for_each_req_sync(SCSIDevice *s, 94 void (*fn)(SCSIRequest *, void *), 95 void *opaque) 96 { 97 SCSIRequest *req; 98 SCSIRequest *next_req; 99 100 assert(!runstate_is_running()); 101 assert(qemu_in_main_thread()); 102 103 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next_req) { 104 fn(req, opaque); 105 } 106 } 107 108 typedef struct { 109 SCSIDevice *s; 110 void (*fn)(SCSIRequest *, void *); 111 void *fn_opaque; 112 } SCSIDeviceForEachReqAsyncData; 113 114 static void scsi_device_for_each_req_async_bh(void *opaque) 115 { 116 g_autofree SCSIDeviceForEachReqAsyncData *data = opaque; 117 SCSIDevice *s = data->s; 118 AioContext *ctx; 119 SCSIRequest *req; 120 SCSIRequest *next; 121 122 /* 123 * If the AioContext changed before this BH was called then reschedule into 124 * the new AioContext before accessing ->requests. This can happen when 125 * scsi_device_for_each_req_async() is called and then the AioContext is 126 * changed before BHs are run. 127 */ 128 ctx = blk_get_aio_context(s->conf.blk); 129 if (ctx != qemu_get_current_aio_context()) { 130 aio_bh_schedule_oneshot(ctx, scsi_device_for_each_req_async_bh, 131 g_steal_pointer(&data)); 132 return; 133 } 134 135 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { 136 data->fn(req, data->fn_opaque); 137 } 138 139 /* Drop the reference taken by scsi_device_for_each_req_async() */ 140 object_unref(OBJECT(s)); 141 } 142 143 /* 144 * Schedule @fn() to be invoked for each enqueued request in device @s. @fn() 145 * runs in the AioContext that is executing the request. 146 */ 147 static void scsi_device_for_each_req_async(SCSIDevice *s, 148 void (*fn)(SCSIRequest *, void *), 149 void *opaque) 150 { 151 assert(qemu_in_main_thread()); 152 153 SCSIDeviceForEachReqAsyncData *data = 154 g_new(SCSIDeviceForEachReqAsyncData, 1); 155 156 data->s = s; 157 data->fn = fn; 158 data->fn_opaque = opaque; 159 160 /* 161 * Hold a reference to the SCSIDevice until 162 * scsi_device_for_each_req_async_bh() finishes. 163 */ 164 object_ref(OBJECT(s)); 165 166 aio_bh_schedule_oneshot(blk_get_aio_context(s->conf.blk), 167 scsi_device_for_each_req_async_bh, 168 data); 169 } 170 171 static void scsi_device_realize(SCSIDevice *s, Error **errp) 172 { 173 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 174 if (sc->realize) { 175 sc->realize(s, errp); 176 } 177 } 178 179 static void scsi_device_unrealize(SCSIDevice *s) 180 { 181 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 182 if (sc->unrealize) { 183 sc->unrealize(s); 184 } 185 } 186 187 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, 188 size_t buf_len, void *hba_private) 189 { 190 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 191 int rc; 192 193 assert(cmd->len == 0); 194 rc = scsi_req_parse_cdb(dev, cmd, buf, buf_len); 195 if (bus->info->parse_cdb) { 196 rc = bus->info->parse_cdb(dev, cmd, buf, buf_len, hba_private); 197 } 198 return rc; 199 } 200 201 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun, 202 uint8_t *buf, void *hba_private) 203 { 204 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 205 if (sc->alloc_req) { 206 return sc->alloc_req(s, tag, lun, buf, hba_private); 207 } 208 209 return NULL; 210 } 211 212 void scsi_device_unit_attention_reported(SCSIDevice *s) 213 { 214 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 215 if (sc->unit_attention_reported) { 216 sc->unit_attention_reported(s); 217 } 218 } 219 220 /* Create a scsi bus, and attach devices to it. */ 221 void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host, 222 const SCSIBusInfo *info, const char *bus_name) 223 { 224 qbus_init(bus, bus_size, TYPE_SCSI_BUS, host, bus_name); 225 bus->busnr = next_scsi_bus++; 226 bus->info = info; 227 qbus_set_bus_hotplug_handler(BUS(bus)); 228 } 229 230 void scsi_req_retry(SCSIRequest *req) 231 { 232 req->retry = true; 233 } 234 235 /* Called in the AioContext that is executing the request */ 236 static void scsi_dma_restart_req(SCSIRequest *req, void *opaque) 237 { 238 scsi_req_ref(req); 239 if (req->retry) { 240 req->retry = false; 241 switch (req->cmd.mode) { 242 case SCSI_XFER_FROM_DEV: 243 case SCSI_XFER_TO_DEV: 244 scsi_req_continue(req); 245 break; 246 case SCSI_XFER_NONE: 247 scsi_req_dequeue(req); 248 scsi_req_enqueue(req); 249 break; 250 } 251 } 252 scsi_req_unref(req); 253 } 254 255 static void scsi_dma_restart_cb(void *opaque, bool running, RunState state) 256 { 257 SCSIDevice *s = opaque; 258 259 assert(qemu_in_main_thread()); 260 261 if (!running) { 262 return; 263 } 264 265 scsi_device_for_each_req_async(s, scsi_dma_restart_req, NULL); 266 } 267 268 static bool scsi_bus_is_address_free(SCSIBus *bus, 269 int channel, int target, int lun, 270 SCSIDevice **p_dev) 271 { 272 SCSIDevice *d; 273 274 RCU_READ_LOCK_GUARD(); 275 d = do_scsi_device_find(bus, channel, target, lun, true); 276 if (d && d->lun == lun) { 277 if (p_dev) { 278 *p_dev = d; 279 } 280 return false; 281 } 282 if (p_dev) { 283 *p_dev = NULL; 284 } 285 return true; 286 } 287 288 static bool scsi_bus_check_address(BusState *qbus, DeviceState *qdev, Error **errp) 289 { 290 SCSIDevice *dev = SCSI_DEVICE(qdev); 291 SCSIBus *bus = SCSI_BUS(qbus); 292 293 if (dev->channel > bus->info->max_channel) { 294 error_setg(errp, "bad scsi channel id: %d", dev->channel); 295 return false; 296 } 297 if (dev->id != -1 && dev->id > bus->info->max_target) { 298 error_setg(errp, "bad scsi device id: %d", dev->id); 299 return false; 300 } 301 if (dev->lun != -1 && dev->lun > bus->info->max_lun) { 302 error_setg(errp, "bad scsi device lun: %d", dev->lun); 303 return false; 304 } 305 306 if (dev->id != -1 && dev->lun != -1) { 307 SCSIDevice *d; 308 if (!scsi_bus_is_address_free(bus, dev->channel, dev->id, dev->lun, &d)) { 309 error_setg(errp, "lun already used by '%s'", d->qdev.id); 310 return false; 311 } 312 } 313 314 return true; 315 } 316 317 static void scsi_qdev_realize(DeviceState *qdev, Error **errp) 318 { 319 SCSIDevice *dev = SCSI_DEVICE(qdev); 320 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 321 bool is_free; 322 Error *local_err = NULL; 323 324 if (dev->id == -1) { 325 int id = -1; 326 if (dev->lun == -1) { 327 dev->lun = 0; 328 } 329 do { 330 is_free = scsi_bus_is_address_free(bus, dev->channel, ++id, dev->lun, NULL); 331 } while (!is_free && id < bus->info->max_target); 332 if (!is_free) { 333 error_setg(errp, "no free target"); 334 return; 335 } 336 dev->id = id; 337 } else if (dev->lun == -1) { 338 int lun = -1; 339 do { 340 is_free = scsi_bus_is_address_free(bus, dev->channel, dev->id, ++lun, NULL); 341 } while (!is_free && lun < bus->info->max_lun); 342 if (!is_free) { 343 error_setg(errp, "no free lun"); 344 return; 345 } 346 dev->lun = lun; 347 } 348 349 QTAILQ_INIT(&dev->requests); 350 scsi_device_realize(dev, &local_err); 351 if (local_err) { 352 error_propagate(errp, local_err); 353 return; 354 } 355 dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev), 356 scsi_dma_restart_cb, dev); 357 } 358 359 static void scsi_qdev_unrealize(DeviceState *qdev) 360 { 361 SCSIDevice *dev = SCSI_DEVICE(qdev); 362 363 if (dev->vmsentry) { 364 qemu_del_vm_change_state_handler(dev->vmsentry); 365 } 366 367 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE)); 368 369 scsi_device_unrealize(dev); 370 371 blockdev_mark_auto_del(dev->conf.blk); 372 } 373 374 /* handle legacy '-drive if=scsi,...' cmd line args */ 375 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, 376 int unit, bool removable, int bootindex, 377 bool share_rw, 378 BlockdevOnError rerror, 379 BlockdevOnError werror, 380 const char *serial, Error **errp) 381 { 382 const char *driver; 383 char *name; 384 DeviceState *dev; 385 DriveInfo *dinfo; 386 387 if (blk_is_sg(blk)) { 388 driver = "scsi-generic"; 389 } else { 390 dinfo = blk_legacy_dinfo(blk); 391 if (dinfo && dinfo->media_cd) { 392 driver = "scsi-cd"; 393 } else { 394 driver = "scsi-hd"; 395 } 396 } 397 dev = qdev_new(driver); 398 name = g_strdup_printf("legacy[%d]", unit); 399 object_property_add_child(OBJECT(bus), name, OBJECT(dev)); 400 g_free(name); 401 402 qdev_prop_set_uint32(dev, "scsi-id", unit); 403 if (bootindex >= 0) { 404 object_property_set_int(OBJECT(dev), "bootindex", bootindex, 405 &error_abort); 406 } 407 if (object_property_find(OBJECT(dev), "removable")) { 408 qdev_prop_set_bit(dev, "removable", removable); 409 } 410 if (serial && object_property_find(OBJECT(dev), "serial")) { 411 qdev_prop_set_string(dev, "serial", serial); 412 } 413 if (!qdev_prop_set_drive_err(dev, "drive", blk, errp)) { 414 object_unparent(OBJECT(dev)); 415 return NULL; 416 } 417 if (!object_property_set_bool(OBJECT(dev), "share-rw", share_rw, errp)) { 418 object_unparent(OBJECT(dev)); 419 return NULL; 420 } 421 422 qdev_prop_set_enum(dev, "rerror", rerror); 423 qdev_prop_set_enum(dev, "werror", werror); 424 425 if (!qdev_realize_and_unref(dev, &bus->qbus, errp)) { 426 object_unparent(OBJECT(dev)); 427 return NULL; 428 } 429 return SCSI_DEVICE(dev); 430 } 431 432 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus) 433 { 434 Location loc; 435 DriveInfo *dinfo; 436 int unit; 437 438 loc_push_none(&loc); 439 for (unit = 0; unit <= bus->info->max_target; unit++) { 440 dinfo = drive_get(IF_SCSI, bus->busnr, unit); 441 if (dinfo == NULL) { 442 continue; 443 } 444 qemu_opts_loc_restore(dinfo->opts); 445 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo), 446 unit, false, -1, false, 447 BLOCKDEV_ON_ERROR_AUTO, 448 BLOCKDEV_ON_ERROR_AUTO, 449 NULL, &error_fatal); 450 } 451 loc_pop(&loc); 452 } 453 454 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf) 455 { 456 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 457 scsi_req_complete(req, CHECK_CONDITION); 458 return 0; 459 } 460 461 static const struct SCSIReqOps reqops_invalid_field = { 462 .size = sizeof(SCSIRequest), 463 .send_command = scsi_invalid_field 464 }; 465 466 /* SCSIReqOps implementation for invalid commands. */ 467 468 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf) 469 { 470 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 471 scsi_req_complete(req, CHECK_CONDITION); 472 return 0; 473 } 474 475 static const struct SCSIReqOps reqops_invalid_opcode = { 476 .size = sizeof(SCSIRequest), 477 .send_command = scsi_invalid_command 478 }; 479 480 /* SCSIReqOps implementation for unit attention conditions. */ 481 482 static void scsi_fetch_unit_attention_sense(SCSIRequest *req) 483 { 484 SCSISense *ua = NULL; 485 486 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 487 ua = &req->dev->unit_attention; 488 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) { 489 ua = &req->bus->unit_attention; 490 } 491 492 /* 493 * Fetch the unit attention sense immediately so that another 494 * scsi_req_new does not use reqops_unit_attention. 495 */ 496 if (ua) { 497 scsi_req_build_sense(req, *ua); 498 *ua = SENSE_CODE(NO_SENSE); 499 } 500 } 501 502 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf) 503 { 504 scsi_req_complete(req, CHECK_CONDITION); 505 return 0; 506 } 507 508 static const struct SCSIReqOps reqops_unit_attention = { 509 .size = sizeof(SCSIRequest), 510 .init_req = scsi_fetch_unit_attention_sense, 511 .send_command = scsi_unit_attention 512 }; 513 514 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to 515 an invalid LUN. */ 516 517 typedef struct SCSITargetReq SCSITargetReq; 518 519 struct SCSITargetReq { 520 SCSIRequest req; 521 int len; 522 uint8_t *buf; 523 int buf_len; 524 }; 525 526 static void store_lun(uint8_t *outbuf, int lun) 527 { 528 if (lun < 256) { 529 /* Simple logical unit addressing method*/ 530 outbuf[0] = 0; 531 outbuf[1] = lun; 532 } else { 533 /* Flat space addressing method */ 534 outbuf[0] = 0x40 | (lun >> 8); 535 outbuf[1] = (lun & 255); 536 } 537 } 538 539 static bool scsi_target_emulate_report_luns(SCSITargetReq *r) 540 { 541 BusChild *kid; 542 int channel, id; 543 uint8_t tmp[8] = {0}; 544 int len = 0; 545 GByteArray *buf; 546 547 if (r->req.cmd.xfer < 16) { 548 return false; 549 } 550 if (r->req.cmd.buf[2] > 2) { 551 return false; 552 } 553 554 /* reserve space for 63 LUNs*/ 555 buf = g_byte_array_sized_new(512); 556 557 channel = r->req.dev->channel; 558 id = r->req.dev->id; 559 560 /* add size (will be updated later to correct value */ 561 g_byte_array_append(buf, tmp, 8); 562 len += 8; 563 564 /* add LUN0 */ 565 g_byte_array_append(buf, tmp, 8); 566 len += 8; 567 568 WITH_RCU_READ_LOCK_GUARD() { 569 QTAILQ_FOREACH_RCU(kid, &r->req.bus->qbus.children, sibling) { 570 DeviceState *qdev = kid->child; 571 SCSIDevice *dev = SCSI_DEVICE(qdev); 572 573 if (dev->channel == channel && dev->id == id && dev->lun != 0 && 574 qdev_is_realized(&dev->qdev)) { 575 store_lun(tmp, dev->lun); 576 g_byte_array_append(buf, tmp, 8); 577 len += 8; 578 } 579 } 580 } 581 582 r->buf_len = len; 583 r->buf = g_byte_array_free(buf, FALSE); 584 r->len = MIN(len, r->req.cmd.xfer & ~7); 585 586 /* store the LUN list length */ 587 stl_be_p(&r->buf[0], len - 8); 588 589 /* 590 * If a REPORT LUNS command enters the enabled command state, [...] 591 * the device server shall clear any pending unit attention condition 592 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED. 593 */ 594 scsi_clear_reported_luns_changed(&r->req); 595 596 return true; 597 } 598 599 static bool scsi_target_emulate_inquiry(SCSITargetReq *r) 600 { 601 assert(r->req.dev->lun != r->req.lun); 602 603 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN); 604 605 if (r->req.cmd.buf[1] & 0x2) { 606 /* Command support data - optional, not implemented */ 607 return false; 608 } 609 610 if (r->req.cmd.buf[1] & 0x1) { 611 /* Vital product data */ 612 uint8_t page_code = r->req.cmd.buf[2]; 613 r->buf[r->len++] = page_code ; /* this page */ 614 r->buf[r->len++] = 0x00; 615 616 switch (page_code) { 617 case 0x00: /* Supported page codes, mandatory */ 618 { 619 int pages; 620 pages = r->len++; 621 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */ 622 r->buf[pages] = r->len - pages - 1; /* number of pages */ 623 break; 624 } 625 default: 626 return false; 627 } 628 /* done with EVPD */ 629 assert(r->len < r->buf_len); 630 r->len = MIN(r->req.cmd.xfer, r->len); 631 return true; 632 } 633 634 /* Standard INQUIRY data */ 635 if (r->req.cmd.buf[2] != 0) { 636 return false; 637 } 638 639 /* PAGE CODE == 0 */ 640 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN); 641 memset(r->buf, 0, r->len); 642 if (r->req.lun != 0) { 643 r->buf[0] = TYPE_NO_LUN; 644 } else { 645 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE; 646 r->buf[2] = 5; /* Version */ 647 r->buf[3] = 2 | 0x10; /* HiSup, response data format */ 648 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */ 649 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */ 650 memcpy(&r->buf[8], "QEMU ", 8); 651 memcpy(&r->buf[16], "QEMU TARGET ", 16); 652 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version()); 653 } 654 return true; 655 } 656 657 static size_t scsi_sense_len(SCSIRequest *req) 658 { 659 if (req->dev->type == TYPE_SCANNER) 660 return SCSI_SENSE_LEN_SCANNER; 661 else 662 return SCSI_SENSE_LEN; 663 } 664 665 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf) 666 { 667 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 668 int fixed_sense = (req->cmd.buf[1] & 1) == 0; 669 670 if (req->lun != 0 && 671 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) { 672 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED)); 673 scsi_req_complete(req, CHECK_CONDITION); 674 return 0; 675 } 676 switch (buf[0]) { 677 case REPORT_LUNS: 678 if (!scsi_target_emulate_report_luns(r)) { 679 goto illegal_request; 680 } 681 break; 682 case INQUIRY: 683 if (!scsi_target_emulate_inquiry(r)) { 684 goto illegal_request; 685 } 686 break; 687 case REQUEST_SENSE: 688 scsi_target_alloc_buf(&r->req, scsi_sense_len(req)); 689 if (req->lun != 0) { 690 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED); 691 692 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer, 693 sense, fixed_sense); 694 } else { 695 r->len = scsi_device_get_sense(r->req.dev, r->buf, 696 MIN(req->cmd.xfer, r->buf_len), 697 fixed_sense); 698 } 699 if (r->req.dev->sense_is_ua) { 700 scsi_device_unit_attention_reported(req->dev); 701 r->req.dev->sense_len = 0; 702 r->req.dev->sense_is_ua = false; 703 } 704 break; 705 case TEST_UNIT_READY: 706 break; 707 default: 708 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 709 scsi_req_complete(req, CHECK_CONDITION); 710 return 0; 711 illegal_request: 712 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 713 scsi_req_complete(req, CHECK_CONDITION); 714 return 0; 715 } 716 717 if (!r->len) { 718 scsi_req_complete(req, GOOD); 719 } 720 return r->len; 721 } 722 723 static void scsi_target_read_data(SCSIRequest *req) 724 { 725 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 726 uint32_t n; 727 728 n = r->len; 729 if (n > 0) { 730 r->len = 0; 731 scsi_req_data(&r->req, n); 732 } else { 733 scsi_req_complete(&r->req, GOOD); 734 } 735 } 736 737 static uint8_t *scsi_target_get_buf(SCSIRequest *req) 738 { 739 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 740 741 return r->buf; 742 } 743 744 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len) 745 { 746 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 747 748 r->buf = g_malloc(len); 749 r->buf_len = len; 750 751 return r->buf; 752 } 753 754 static void scsi_target_free_buf(SCSIRequest *req) 755 { 756 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 757 758 g_free(r->buf); 759 } 760 761 static const struct SCSIReqOps reqops_target_command = { 762 .size = sizeof(SCSITargetReq), 763 .send_command = scsi_target_send_command, 764 .read_data = scsi_target_read_data, 765 .get_buf = scsi_target_get_buf, 766 .free_req = scsi_target_free_buf, 767 }; 768 769 770 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d, 771 uint32_t tag, uint32_t lun, void *hba_private) 772 { 773 SCSIRequest *req; 774 SCSIBus *bus = scsi_bus_from_device(d); 775 BusState *qbus = BUS(bus); 776 const int memset_off = offsetof(SCSIRequest, sense) 777 + sizeof(req->sense); 778 779 req = g_malloc(reqops->size); 780 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off); 781 req->refcount = 1; 782 req->bus = bus; 783 req->dev = d; 784 req->tag = tag; 785 req->lun = lun; 786 req->hba_private = hba_private; 787 req->status = -1; 788 req->host_status = -1; 789 req->ops = reqops; 790 object_ref(OBJECT(d)); 791 object_ref(OBJECT(qbus->parent)); 792 notifier_list_init(&req->cancel_notifiers); 793 794 if (reqops->init_req) { 795 reqops->init_req(req); 796 } 797 798 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag); 799 return req; 800 } 801 802 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, 803 uint8_t *buf, size_t buf_len, void *hba_private) 804 { 805 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus); 806 const SCSIReqOps *ops; 807 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d); 808 SCSIRequest *req; 809 SCSICommand cmd = { .len = 0 }; 810 int ret; 811 812 if (buf_len == 0) { 813 trace_scsi_req_parse_bad(d->id, lun, tag, 0); 814 goto invalid_opcode; 815 } 816 817 if ((d->unit_attention.key == UNIT_ATTENTION || 818 bus->unit_attention.key == UNIT_ATTENTION) && 819 (buf[0] != INQUIRY && 820 buf[0] != REPORT_LUNS && 821 buf[0] != GET_CONFIGURATION && 822 buf[0] != GET_EVENT_STATUS_NOTIFICATION && 823 824 /* 825 * If we already have a pending unit attention condition, 826 * report this one before triggering another one. 827 */ 828 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) { 829 ops = &reqops_unit_attention; 830 } else if (lun != d->lun || 831 buf[0] == REPORT_LUNS || 832 (buf[0] == REQUEST_SENSE && d->sense_len)) { 833 ops = &reqops_target_command; 834 } else { 835 ops = NULL; 836 } 837 838 if (ops != NULL || !sc->parse_cdb) { 839 ret = scsi_req_parse_cdb(d, &cmd, buf, buf_len); 840 } else { 841 ret = sc->parse_cdb(d, &cmd, buf, buf_len, hba_private); 842 } 843 844 if (ret != 0) { 845 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]); 846 invalid_opcode: 847 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private); 848 } else { 849 assert(cmd.len != 0); 850 trace_scsi_req_parsed(d->id, lun, tag, buf[0], 851 cmd.mode, cmd.xfer); 852 if (cmd.lba != -1) { 853 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0], 854 cmd.lba); 855 } 856 857 if (cmd.xfer > INT32_MAX) { 858 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private); 859 } else if (ops) { 860 req = scsi_req_alloc(ops, d, tag, lun, hba_private); 861 } else { 862 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private); 863 } 864 } 865 866 req->cmd = cmd; 867 req->residual = req->cmd.xfer; 868 869 switch (buf[0]) { 870 case INQUIRY: 871 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]); 872 break; 873 case TEST_UNIT_READY: 874 trace_scsi_test_unit_ready(d->id, lun, tag); 875 break; 876 case REPORT_LUNS: 877 trace_scsi_report_luns(d->id, lun, tag); 878 break; 879 case REQUEST_SENSE: 880 trace_scsi_request_sense(d->id, lun, tag); 881 break; 882 default: 883 break; 884 } 885 886 return req; 887 } 888 889 uint8_t *scsi_req_get_buf(SCSIRequest *req) 890 { 891 return req->ops->get_buf(req); 892 } 893 894 static void scsi_clear_reported_luns_changed(SCSIRequest *req) 895 { 896 SCSISense *ua; 897 898 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 899 ua = &req->dev->unit_attention; 900 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) { 901 ua = &req->bus->unit_attention; 902 } else { 903 return; 904 } 905 906 if (ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc && 907 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq) { 908 *ua = SENSE_CODE(NO_SENSE); 909 } 910 } 911 912 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len) 913 { 914 int ret; 915 916 assert(len >= 14); 917 if (!req->sense_len) { 918 return 0; 919 } 920 921 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true); 922 923 /* 924 * FIXME: clearing unit attention conditions upon autosense should be done 925 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b 926 * (SAM-5, 5.14). 927 * 928 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and 929 * 10b for HBAs that do not support it (do not call scsi_req_get_sense). 930 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b. 931 */ 932 if (req->dev->sense_is_ua) { 933 scsi_device_unit_attention_reported(req->dev); 934 req->dev->sense_len = 0; 935 req->dev->sense_is_ua = false; 936 } 937 return ret; 938 } 939 940 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed) 941 { 942 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed); 943 } 944 945 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense) 946 { 947 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag, 948 sense.key, sense.asc, sense.ascq); 949 req->sense_len = scsi_build_sense(req->sense, sense); 950 } 951 952 static void scsi_req_enqueue_internal(SCSIRequest *req) 953 { 954 assert(!req->enqueued); 955 scsi_req_ref(req); 956 if (req->bus->info->get_sg_list) { 957 req->sg = req->bus->info->get_sg_list(req); 958 } else { 959 req->sg = NULL; 960 } 961 req->enqueued = true; 962 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next); 963 } 964 965 int32_t scsi_req_enqueue(SCSIRequest *req) 966 { 967 int32_t rc; 968 969 assert(!req->retry); 970 scsi_req_enqueue_internal(req); 971 scsi_req_ref(req); 972 rc = req->ops->send_command(req, req->cmd.buf); 973 scsi_req_unref(req); 974 return rc; 975 } 976 977 static void scsi_req_dequeue(SCSIRequest *req) 978 { 979 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag); 980 req->retry = false; 981 if (req->enqueued) { 982 QTAILQ_REMOVE(&req->dev->requests, req, next); 983 req->enqueued = false; 984 scsi_req_unref(req); 985 } 986 } 987 988 static int scsi_get_performance_length(int num_desc, int type, int data_type) 989 { 990 /* MMC-6, paragraph 6.7. */ 991 switch (type) { 992 case 0: 993 if ((data_type & 3) == 0) { 994 /* Each descriptor is as in Table 295 - Nominal performance. */ 995 return 16 * num_desc + 8; 996 } else { 997 /* Each descriptor is as in Table 296 - Exceptions. */ 998 return 6 * num_desc + 8; 999 } 1000 case 1: 1001 case 4: 1002 case 5: 1003 return 8 * num_desc + 8; 1004 case 2: 1005 return 2048 * num_desc + 8; 1006 case 3: 1007 return 16 * num_desc + 8; 1008 default: 1009 return 8; 1010 } 1011 } 1012 1013 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf) 1014 { 1015 int byte_block = (buf[2] >> 2) & 0x1; 1016 int type = (buf[2] >> 4) & 0x1; 1017 int xfer_unit; 1018 1019 if (byte_block) { 1020 if (type) { 1021 xfer_unit = dev->blocksize; 1022 } else { 1023 xfer_unit = 512; 1024 } 1025 } else { 1026 xfer_unit = 1; 1027 } 1028 1029 return xfer_unit; 1030 } 1031 1032 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf) 1033 { 1034 int length = buf[2] & 0x3; 1035 int xfer; 1036 int unit = ata_passthrough_xfer_unit(dev, buf); 1037 1038 switch (length) { 1039 case 0: 1040 case 3: /* USB-specific. */ 1041 default: 1042 xfer = 0; 1043 break; 1044 case 1: 1045 xfer = buf[3]; 1046 break; 1047 case 2: 1048 xfer = buf[4]; 1049 break; 1050 } 1051 1052 return xfer * unit; 1053 } 1054 1055 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf) 1056 { 1057 int extend = buf[1] & 0x1; 1058 int length = buf[2] & 0x3; 1059 int xfer; 1060 int unit = ata_passthrough_xfer_unit(dev, buf); 1061 1062 switch (length) { 1063 case 0: 1064 case 3: /* USB-specific. */ 1065 default: 1066 xfer = 0; 1067 break; 1068 case 1: 1069 xfer = buf[4]; 1070 xfer |= (extend ? buf[3] << 8 : 0); 1071 break; 1072 case 2: 1073 xfer = buf[6]; 1074 xfer |= (extend ? buf[5] << 8 : 0); 1075 break; 1076 } 1077 1078 return xfer * unit; 1079 } 1080 1081 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1082 { 1083 cmd->xfer = scsi_cdb_xfer(buf); 1084 switch (buf[0]) { 1085 case TEST_UNIT_READY: 1086 case REWIND: 1087 case START_STOP: 1088 case SET_CAPACITY: 1089 case WRITE_FILEMARKS: 1090 case WRITE_FILEMARKS_16: 1091 case SPACE: 1092 case RESERVE: 1093 case RELEASE: 1094 case ERASE: 1095 case ALLOW_MEDIUM_REMOVAL: 1096 case SEEK_10: 1097 case SYNCHRONIZE_CACHE: 1098 case SYNCHRONIZE_CACHE_16: 1099 case LOCATE_16: 1100 case LOCK_UNLOCK_CACHE: 1101 case SET_CD_SPEED: 1102 case SET_LIMITS: 1103 case WRITE_LONG_10: 1104 case UPDATE_BLOCK: 1105 case RESERVE_TRACK: 1106 case SET_READ_AHEAD: 1107 case PRE_FETCH: 1108 case PRE_FETCH_16: 1109 case ALLOW_OVERWRITE: 1110 cmd->xfer = 0; 1111 break; 1112 case VERIFY_10: 1113 case VERIFY_12: 1114 case VERIFY_16: 1115 if ((buf[1] & 2) == 0) { 1116 cmd->xfer = 0; 1117 } else if ((buf[1] & 4) != 0) { 1118 cmd->xfer = 1; 1119 } 1120 cmd->xfer *= dev->blocksize; 1121 break; 1122 case MODE_SENSE: 1123 break; 1124 case WRITE_SAME_10: 1125 case WRITE_SAME_16: 1126 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize; 1127 break; 1128 case READ_CAPACITY_10: 1129 cmd->xfer = 8; 1130 break; 1131 case READ_BLOCK_LIMITS: 1132 cmd->xfer = 6; 1133 break; 1134 case SEND_VOLUME_TAG: 1135 /* GPCMD_SET_STREAMING from multimedia commands. */ 1136 if (dev->type == TYPE_ROM) { 1137 cmd->xfer = buf[10] | (buf[9] << 8); 1138 } else { 1139 cmd->xfer = buf[9] | (buf[8] << 8); 1140 } 1141 break; 1142 case WRITE_6: 1143 /* length 0 means 256 blocks */ 1144 if (cmd->xfer == 0) { 1145 cmd->xfer = 256; 1146 } 1147 /* fall through */ 1148 case WRITE_10: 1149 case WRITE_VERIFY_10: 1150 case WRITE_12: 1151 case WRITE_VERIFY_12: 1152 case WRITE_16: 1153 case WRITE_VERIFY_16: 1154 cmd->xfer *= dev->blocksize; 1155 break; 1156 case READ_6: 1157 case READ_REVERSE: 1158 /* length 0 means 256 blocks */ 1159 if (cmd->xfer == 0) { 1160 cmd->xfer = 256; 1161 } 1162 /* fall through */ 1163 case READ_10: 1164 case READ_12: 1165 case READ_16: 1166 cmd->xfer *= dev->blocksize; 1167 break; 1168 case FORMAT_UNIT: 1169 /* MMC mandates the parameter list to be 12-bytes long. Parameters 1170 * for block devices are restricted to the header right now. */ 1171 if (dev->type == TYPE_ROM && (buf[1] & 16)) { 1172 cmd->xfer = 12; 1173 } else { 1174 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4); 1175 } 1176 break; 1177 case INQUIRY: 1178 case RECEIVE_DIAGNOSTIC: 1179 case SEND_DIAGNOSTIC: 1180 cmd->xfer = buf[4] | (buf[3] << 8); 1181 break; 1182 case READ_CD: 1183 case READ_BUFFER: 1184 case WRITE_BUFFER: 1185 case SEND_CUE_SHEET: 1186 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1187 break; 1188 case PERSISTENT_RESERVE_OUT: 1189 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL; 1190 break; 1191 case ERASE_12: 1192 if (dev->type == TYPE_ROM) { 1193 /* MMC command GET PERFORMANCE. */ 1194 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8), 1195 buf[10], buf[1] & 0x1f); 1196 } 1197 break; 1198 case MECHANISM_STATUS: 1199 case READ_DVD_STRUCTURE: 1200 case SEND_DVD_STRUCTURE: 1201 case MAINTENANCE_OUT: 1202 case MAINTENANCE_IN: 1203 if (dev->type == TYPE_ROM) { 1204 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */ 1205 cmd->xfer = buf[9] | (buf[8] << 8); 1206 } 1207 break; 1208 case ATA_PASSTHROUGH_12: 1209 if (dev->type == TYPE_ROM) { 1210 /* BLANK command of MMC */ 1211 cmd->xfer = 0; 1212 } else { 1213 cmd->xfer = ata_passthrough_12_xfer(dev, buf); 1214 } 1215 break; 1216 case ATA_PASSTHROUGH_16: 1217 cmd->xfer = ata_passthrough_16_xfer(dev, buf); 1218 break; 1219 } 1220 return 0; 1221 } 1222 1223 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1224 { 1225 switch (buf[0]) { 1226 /* stream commands */ 1227 case ERASE_12: 1228 case ERASE_16: 1229 cmd->xfer = 0; 1230 break; 1231 case READ_6: 1232 case READ_REVERSE: 1233 case RECOVER_BUFFERED_DATA: 1234 case WRITE_6: 1235 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16); 1236 if (buf[1] & 0x01) { /* fixed */ 1237 cmd->xfer *= dev->blocksize; 1238 } 1239 break; 1240 case READ_16: 1241 case READ_REVERSE_16: 1242 case VERIFY_16: 1243 case WRITE_16: 1244 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16); 1245 if (buf[1] & 0x01) { /* fixed */ 1246 cmd->xfer *= dev->blocksize; 1247 } 1248 break; 1249 case REWIND: 1250 case LOAD_UNLOAD: 1251 cmd->xfer = 0; 1252 break; 1253 case SPACE_16: 1254 cmd->xfer = buf[13] | (buf[12] << 8); 1255 break; 1256 case READ_POSITION: 1257 switch (buf[1] & 0x1f) /* operation code */ { 1258 case SHORT_FORM_BLOCK_ID: 1259 case SHORT_FORM_VENDOR_SPECIFIC: 1260 cmd->xfer = 20; 1261 break; 1262 case LONG_FORM: 1263 cmd->xfer = 32; 1264 break; 1265 case EXTENDED_FORM: 1266 cmd->xfer = buf[8] | (buf[7] << 8); 1267 break; 1268 default: 1269 return -1; 1270 } 1271 1272 break; 1273 case FORMAT_UNIT: 1274 cmd->xfer = buf[4] | (buf[3] << 8); 1275 break; 1276 /* generic commands */ 1277 default: 1278 return scsi_req_xfer(cmd, dev, buf); 1279 } 1280 return 0; 1281 } 1282 1283 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1284 { 1285 switch (buf[0]) { 1286 /* medium changer commands */ 1287 case EXCHANGE_MEDIUM: 1288 case INITIALIZE_ELEMENT_STATUS: 1289 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE: 1290 case MOVE_MEDIUM: 1291 case POSITION_TO_ELEMENT: 1292 cmd->xfer = 0; 1293 break; 1294 case READ_ELEMENT_STATUS: 1295 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16); 1296 break; 1297 1298 /* generic commands */ 1299 default: 1300 return scsi_req_xfer(cmd, dev, buf); 1301 } 1302 return 0; 1303 } 1304 1305 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1306 { 1307 switch (buf[0]) { 1308 /* Scanner commands */ 1309 case OBJECT_POSITION: 1310 cmd->xfer = 0; 1311 break; 1312 case SCAN: 1313 cmd->xfer = buf[4]; 1314 break; 1315 case READ_10: 1316 case SEND: 1317 case GET_WINDOW: 1318 case SET_WINDOW: 1319 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1320 break; 1321 default: 1322 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */ 1323 return scsi_req_xfer(cmd, dev, buf); 1324 } 1325 1326 return 0; 1327 } 1328 1329 static void scsi_cmd_xfer_mode(SCSICommand *cmd) 1330 { 1331 if (!cmd->xfer) { 1332 cmd->mode = SCSI_XFER_NONE; 1333 return; 1334 } 1335 switch (cmd->buf[0]) { 1336 case WRITE_6: 1337 case WRITE_10: 1338 case WRITE_VERIFY_10: 1339 case WRITE_12: 1340 case WRITE_VERIFY_12: 1341 case WRITE_16: 1342 case WRITE_VERIFY_16: 1343 case VERIFY_10: 1344 case VERIFY_12: 1345 case VERIFY_16: 1346 case COPY: 1347 case COPY_VERIFY: 1348 case COMPARE: 1349 case CHANGE_DEFINITION: 1350 case LOG_SELECT: 1351 case MODE_SELECT: 1352 case MODE_SELECT_10: 1353 case SEND_DIAGNOSTIC: 1354 case WRITE_BUFFER: 1355 case FORMAT_UNIT: 1356 case REASSIGN_BLOCKS: 1357 case SEARCH_EQUAL: 1358 case SEARCH_HIGH: 1359 case SEARCH_LOW: 1360 case UPDATE_BLOCK: 1361 case WRITE_LONG_10: 1362 case WRITE_SAME_10: 1363 case WRITE_SAME_16: 1364 case UNMAP: 1365 case SEARCH_HIGH_12: 1366 case SEARCH_EQUAL_12: 1367 case SEARCH_LOW_12: 1368 case MEDIUM_SCAN: 1369 case SEND_VOLUME_TAG: 1370 case SEND_CUE_SHEET: 1371 case SEND_DVD_STRUCTURE: 1372 case PERSISTENT_RESERVE_OUT: 1373 case MAINTENANCE_OUT: 1374 case SET_WINDOW: 1375 case SCAN: 1376 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for 1377 * non-scanner devices, so we only get here for SCAN and not for START_STOP. 1378 */ 1379 cmd->mode = SCSI_XFER_TO_DEV; 1380 break; 1381 case ATA_PASSTHROUGH_12: 1382 case ATA_PASSTHROUGH_16: 1383 /* T_DIR */ 1384 cmd->mode = (cmd->buf[2] & 0x8) ? 1385 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV; 1386 break; 1387 default: 1388 cmd->mode = SCSI_XFER_FROM_DEV; 1389 break; 1390 } 1391 } 1392 1393 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, 1394 size_t buf_len) 1395 { 1396 int rc; 1397 int len; 1398 1399 cmd->lba = -1; 1400 len = scsi_cdb_length(buf); 1401 if (len < 0 || len > buf_len) { 1402 return -1; 1403 } 1404 1405 cmd->len = len; 1406 switch (dev->type) { 1407 case TYPE_TAPE: 1408 rc = scsi_req_stream_xfer(cmd, dev, buf); 1409 break; 1410 case TYPE_MEDIUM_CHANGER: 1411 rc = scsi_req_medium_changer_xfer(cmd, dev, buf); 1412 break; 1413 case TYPE_SCANNER: 1414 rc = scsi_req_scanner_length(cmd, dev, buf); 1415 break; 1416 default: 1417 rc = scsi_req_xfer(cmd, dev, buf); 1418 break; 1419 } 1420 1421 if (rc != 0) 1422 return rc; 1423 1424 memcpy(cmd->buf, buf, cmd->len); 1425 scsi_cmd_xfer_mode(cmd); 1426 cmd->lba = scsi_cmd_lba(cmd); 1427 return 0; 1428 } 1429 1430 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense) 1431 { 1432 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 1433 1434 scsi_device_set_ua(dev, sense); 1435 if (bus->info->change) { 1436 bus->info->change(bus, dev, sense); 1437 } 1438 } 1439 1440 SCSIRequest *scsi_req_ref(SCSIRequest *req) 1441 { 1442 assert(req->refcount > 0); 1443 req->refcount++; 1444 return req; 1445 } 1446 1447 void scsi_req_unref(SCSIRequest *req) 1448 { 1449 assert(req->refcount > 0); 1450 if (--req->refcount == 0) { 1451 BusState *qbus = req->dev->qdev.parent_bus; 1452 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus); 1453 1454 if (bus->info->free_request && req->hba_private) { 1455 bus->info->free_request(bus, req->hba_private); 1456 } 1457 if (req->ops->free_req) { 1458 req->ops->free_req(req); 1459 } 1460 object_unref(OBJECT(req->dev)); 1461 object_unref(OBJECT(qbus->parent)); 1462 g_free(req); 1463 } 1464 } 1465 1466 /* Tell the device that we finished processing this chunk of I/O. It 1467 will start the next chunk or complete the command. */ 1468 void scsi_req_continue(SCSIRequest *req) 1469 { 1470 if (req->io_canceled) { 1471 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag); 1472 return; 1473 } 1474 trace_scsi_req_continue(req->dev->id, req->lun, req->tag); 1475 if (req->cmd.mode == SCSI_XFER_TO_DEV) { 1476 req->ops->write_data(req); 1477 } else { 1478 req->ops->read_data(req); 1479 } 1480 } 1481 1482 /* Called by the devices when data is ready for the HBA. The HBA should 1483 start a DMA operation to read or fill the device's data buffer. 1484 Once it completes, calling scsi_req_continue will restart I/O. */ 1485 void scsi_req_data(SCSIRequest *req, int len) 1486 { 1487 uint8_t *buf; 1488 if (req->io_canceled) { 1489 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len); 1490 return; 1491 } 1492 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len); 1493 assert(req->cmd.mode != SCSI_XFER_NONE); 1494 if (!req->sg) { 1495 req->residual -= len; 1496 req->bus->info->transfer_data(req, len); 1497 return; 1498 } 1499 1500 /* If the device calls scsi_req_data and the HBA specified a 1501 * scatter/gather list, the transfer has to happen in a single 1502 * step. */ 1503 assert(!req->dma_started); 1504 req->dma_started = true; 1505 1506 buf = scsi_req_get_buf(req); 1507 if (req->cmd.mode == SCSI_XFER_FROM_DEV) { 1508 dma_buf_read(buf, len, &req->residual, req->sg, 1509 MEMTXATTRS_UNSPECIFIED); 1510 } else { 1511 dma_buf_write(buf, len, &req->residual, req->sg, 1512 MEMTXATTRS_UNSPECIFIED); 1513 } 1514 scsi_req_continue(req); 1515 } 1516 1517 void scsi_req_print(SCSIRequest *req) 1518 { 1519 FILE *fp = stderr; 1520 int i; 1521 1522 fprintf(fp, "[%s id=%d] %s", 1523 req->dev->qdev.parent_bus->name, 1524 req->dev->id, 1525 scsi_command_name(req->cmd.buf[0])); 1526 for (i = 1; i < req->cmd.len; i++) { 1527 fprintf(fp, " 0x%02x", req->cmd.buf[i]); 1528 } 1529 switch (req->cmd.mode) { 1530 case SCSI_XFER_NONE: 1531 fprintf(fp, " - none\n"); 1532 break; 1533 case SCSI_XFER_FROM_DEV: 1534 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer); 1535 break; 1536 case SCSI_XFER_TO_DEV: 1537 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer); 1538 break; 1539 default: 1540 fprintf(fp, " - Oops\n"); 1541 break; 1542 } 1543 } 1544 1545 void scsi_req_complete_failed(SCSIRequest *req, int host_status) 1546 { 1547 SCSISense sense; 1548 int status; 1549 1550 assert(req->status == -1 && req->host_status == -1); 1551 assert(req->ops != &reqops_unit_attention); 1552 1553 if (!req->bus->info->fail) { 1554 status = scsi_sense_from_host_status(req->host_status, &sense); 1555 if (status == CHECK_CONDITION) { 1556 scsi_req_build_sense(req, sense); 1557 } 1558 scsi_req_complete(req, status); 1559 return; 1560 } 1561 1562 req->host_status = host_status; 1563 scsi_req_ref(req); 1564 scsi_req_dequeue(req); 1565 req->bus->info->fail(req); 1566 1567 /* Cancelled requests might end up being completed instead of cancelled */ 1568 notifier_list_notify(&req->cancel_notifiers, req); 1569 scsi_req_unref(req); 1570 } 1571 1572 void scsi_req_complete(SCSIRequest *req, int status) 1573 { 1574 assert(req->status == -1 && req->host_status == -1); 1575 req->status = status; 1576 req->host_status = SCSI_HOST_OK; 1577 1578 assert(req->sense_len <= sizeof(req->sense)); 1579 if (status == GOOD) { 1580 req->sense_len = 0; 1581 } 1582 1583 if (req->sense_len) { 1584 memcpy(req->dev->sense, req->sense, req->sense_len); 1585 req->dev->sense_len = req->sense_len; 1586 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention); 1587 } else { 1588 req->dev->sense_len = 0; 1589 req->dev->sense_is_ua = false; 1590 } 1591 1592 scsi_req_ref(req); 1593 scsi_req_dequeue(req); 1594 req->bus->info->complete(req, req->residual); 1595 1596 /* Cancelled requests might end up being completed instead of cancelled */ 1597 notifier_list_notify(&req->cancel_notifiers, req); 1598 scsi_req_unref(req); 1599 } 1600 1601 /* Called by the devices when the request is canceled. */ 1602 void scsi_req_cancel_complete(SCSIRequest *req) 1603 { 1604 assert(req->io_canceled); 1605 if (req->bus->info->cancel) { 1606 req->bus->info->cancel(req); 1607 } 1608 notifier_list_notify(&req->cancel_notifiers, req); 1609 scsi_req_unref(req); 1610 } 1611 1612 /* Cancel @req asynchronously. @notifier is added to @req's cancellation 1613 * notifier list, the bus will be notified the requests cancellation is 1614 * completed. 1615 * */ 1616 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier) 1617 { 1618 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1619 if (notifier) { 1620 notifier_list_add(&req->cancel_notifiers, notifier); 1621 } 1622 if (req->io_canceled) { 1623 /* A blk_aio_cancel_async is pending; when it finishes, 1624 * scsi_req_cancel_complete will be called and will 1625 * call the notifier we just added. Just wait for that. 1626 */ 1627 assert(req->aiocb); 1628 return; 1629 } 1630 /* Dropped in scsi_req_cancel_complete. */ 1631 scsi_req_ref(req); 1632 scsi_req_dequeue(req); 1633 req->io_canceled = true; 1634 if (req->aiocb) { 1635 blk_aio_cancel_async(req->aiocb); 1636 } else { 1637 scsi_req_cancel_complete(req); 1638 } 1639 } 1640 1641 void scsi_req_cancel(SCSIRequest *req) 1642 { 1643 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1644 if (!req->enqueued) { 1645 return; 1646 } 1647 assert(!req->io_canceled); 1648 /* Dropped in scsi_req_cancel_complete. */ 1649 scsi_req_ref(req); 1650 scsi_req_dequeue(req); 1651 req->io_canceled = true; 1652 if (req->aiocb) { 1653 blk_aio_cancel(req->aiocb); 1654 } else { 1655 scsi_req_cancel_complete(req); 1656 } 1657 } 1658 1659 static int scsi_ua_precedence(SCSISense sense) 1660 { 1661 if (sense.key != UNIT_ATTENTION) { 1662 return INT_MAX; 1663 } 1664 if (sense.asc == 0x29 && sense.ascq == 0x04) { 1665 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */ 1666 return 1; 1667 } else if (sense.asc == 0x3F && sense.ascq == 0x01) { 1668 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */ 1669 return 2; 1670 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) { 1671 /* These two go with "all others". */ 1672 ; 1673 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) { 1674 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0 1675 * POWER ON OCCURRED = 1 1676 * SCSI BUS RESET OCCURRED = 2 1677 * BUS DEVICE RESET FUNCTION OCCURRED = 3 1678 * I_T NEXUS LOSS OCCURRED = 7 1679 */ 1680 return sense.ascq; 1681 } else if (sense.asc == 0x2F && sense.ascq == 0x01) { 1682 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */ 1683 return 8; 1684 } 1685 return (sense.asc << 8) | sense.ascq; 1686 } 1687 1688 void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense) 1689 { 1690 int prec1, prec2; 1691 if (sense.key != UNIT_ATTENTION) { 1692 return; 1693 } 1694 1695 /* 1696 * Override a pre-existing unit attention condition, except for a more 1697 * important reset condition. 1698 */ 1699 prec1 = scsi_ua_precedence(bus->unit_attention); 1700 prec2 = scsi_ua_precedence(sense); 1701 if (prec2 < prec1) { 1702 bus->unit_attention = sense; 1703 } 1704 } 1705 1706 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense) 1707 { 1708 int prec1, prec2; 1709 if (sense.key != UNIT_ATTENTION) { 1710 return; 1711 } 1712 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key, 1713 sense.asc, sense.ascq); 1714 1715 /* 1716 * Override a pre-existing unit attention condition, except for a more 1717 * important reset condition. 1718 */ 1719 prec1 = scsi_ua_precedence(sdev->unit_attention); 1720 prec2 = scsi_ua_precedence(sense); 1721 if (prec2 < prec1) { 1722 sdev->unit_attention = sense; 1723 } 1724 } 1725 1726 static void scsi_device_purge_one_req(SCSIRequest *req, void *opaque) 1727 { 1728 scsi_req_cancel_async(req, NULL); 1729 } 1730 1731 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense) 1732 { 1733 scsi_device_for_each_req_async(sdev, scsi_device_purge_one_req, NULL); 1734 1735 blk_drain(sdev->conf.blk); 1736 scsi_device_set_ua(sdev, sense); 1737 } 1738 1739 void scsi_device_drained_begin(SCSIDevice *sdev) 1740 { 1741 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus); 1742 if (!bus) { 1743 return; 1744 } 1745 1746 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 1747 assert(bus->drain_count < INT_MAX); 1748 1749 /* 1750 * Multiple BlockBackends can be on a SCSIBus and each may begin/end 1751 * draining at any time. Keep a counter so HBAs only see begin/end once. 1752 */ 1753 if (bus->drain_count++ == 0) { 1754 trace_scsi_bus_drained_begin(bus, sdev); 1755 if (bus->info->drained_begin) { 1756 bus->info->drained_begin(bus); 1757 } 1758 } 1759 } 1760 1761 void scsi_device_drained_end(SCSIDevice *sdev) 1762 { 1763 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, sdev->qdev.parent_bus); 1764 if (!bus) { 1765 return; 1766 } 1767 1768 assert(qemu_get_current_aio_context() == qemu_get_aio_context()); 1769 assert(bus->drain_count > 0); 1770 1771 if (bus->drain_count-- == 1) { 1772 trace_scsi_bus_drained_end(bus, sdev); 1773 if (bus->info->drained_end) { 1774 bus->info->drained_end(bus); 1775 } 1776 } 1777 } 1778 1779 static char *scsibus_get_dev_path(DeviceState *dev) 1780 { 1781 SCSIDevice *d = SCSI_DEVICE(dev); 1782 DeviceState *hba = dev->parent_bus->parent; 1783 char *id; 1784 char *path; 1785 1786 id = qdev_get_dev_path(hba); 1787 if (id) { 1788 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun); 1789 } else { 1790 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun); 1791 } 1792 g_free(id); 1793 return path; 1794 } 1795 1796 static char *scsibus_get_fw_dev_path(DeviceState *dev) 1797 { 1798 SCSIDevice *d = SCSI_DEVICE(dev); 1799 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel, 1800 qdev_fw_name(dev), d->id, d->lun); 1801 } 1802 1803 /* SCSI request list. For simplicity, pv points to the whole device */ 1804 1805 static void put_scsi_req(SCSIRequest *req, void *opaque) 1806 { 1807 QEMUFile *f = opaque; 1808 1809 assert(!req->io_canceled); 1810 assert(req->status == -1 && req->host_status == -1); 1811 assert(req->enqueued); 1812 1813 qemu_put_sbyte(f, req->retry ? 1 : 2); 1814 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); 1815 qemu_put_be32s(f, &req->tag); 1816 qemu_put_be32s(f, &req->lun); 1817 if (req->bus->info->save_request) { 1818 req->bus->info->save_request(f, req); 1819 } 1820 if (req->ops->save_request) { 1821 req->ops->save_request(f, req); 1822 } 1823 } 1824 1825 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size, 1826 const VMStateField *field, JSONWriter *vmdesc) 1827 { 1828 SCSIDevice *s = pv; 1829 1830 scsi_device_for_each_req_sync(s, put_scsi_req, f); 1831 qemu_put_sbyte(f, 0); 1832 return 0; 1833 } 1834 1835 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size, 1836 const VMStateField *field) 1837 { 1838 SCSIDevice *s = pv; 1839 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1840 int8_t sbyte; 1841 1842 while ((sbyte = qemu_get_sbyte(f)) > 0) { 1843 uint8_t buf[SCSI_CMD_BUF_SIZE]; 1844 uint32_t tag; 1845 uint32_t lun; 1846 SCSIRequest *req; 1847 1848 qemu_get_buffer(f, buf, sizeof(buf)); 1849 qemu_get_be32s(f, &tag); 1850 qemu_get_be32s(f, &lun); 1851 /* 1852 * A too-short CDB would have been rejected by scsi_req_new, so just use 1853 * SCSI_CMD_BUF_SIZE as the CDB length. 1854 */ 1855 req = scsi_req_new(s, tag, lun, buf, sizeof(buf), NULL); 1856 req->retry = (sbyte == 1); 1857 if (bus->info->load_request) { 1858 req->hba_private = bus->info->load_request(f, req); 1859 } 1860 if (req->ops->load_request) { 1861 req->ops->load_request(f, req); 1862 } 1863 1864 /* Just restart it later. */ 1865 scsi_req_enqueue_internal(req); 1866 1867 /* At this point, the request will be kept alive by the reference 1868 * added by scsi_req_enqueue_internal, so we can release our reference. 1869 * The HBA of course will add its own reference in the load_request 1870 * callback if it needs to hold on the SCSIRequest. 1871 */ 1872 scsi_req_unref(req); 1873 } 1874 1875 return 0; 1876 } 1877 1878 static const VMStateInfo vmstate_info_scsi_requests = { 1879 .name = "scsi-requests", 1880 .get = get_scsi_requests, 1881 .put = put_scsi_requests, 1882 }; 1883 1884 static bool scsi_sense_state_needed(void *opaque) 1885 { 1886 SCSIDevice *s = opaque; 1887 1888 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD; 1889 } 1890 1891 static const VMStateDescription vmstate_scsi_sense_state = { 1892 .name = "SCSIDevice/sense", 1893 .version_id = 1, 1894 .minimum_version_id = 1, 1895 .needed = scsi_sense_state_needed, 1896 .fields = (VMStateField[]) { 1897 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 1898 SCSI_SENSE_BUF_SIZE_OLD, 1899 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD), 1900 VMSTATE_END_OF_LIST() 1901 } 1902 }; 1903 1904 const VMStateDescription vmstate_scsi_device = { 1905 .name = "SCSIDevice", 1906 .version_id = 1, 1907 .minimum_version_id = 1, 1908 .fields = (VMStateField[]) { 1909 VMSTATE_UINT8(unit_attention.key, SCSIDevice), 1910 VMSTATE_UINT8(unit_attention.asc, SCSIDevice), 1911 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice), 1912 VMSTATE_BOOL(sense_is_ua, SCSIDevice), 1913 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD), 1914 VMSTATE_UINT32(sense_len, SCSIDevice), 1915 { 1916 .name = "requests", 1917 .version_id = 0, 1918 .field_exists = NULL, 1919 .size = 0, /* ouch */ 1920 .info = &vmstate_info_scsi_requests, 1921 .flags = VMS_SINGLE, 1922 .offset = 0, 1923 }, 1924 VMSTATE_END_OF_LIST() 1925 }, 1926 .subsections = (const VMStateDescription*[]) { 1927 &vmstate_scsi_sense_state, 1928 NULL 1929 } 1930 }; 1931 1932 static Property scsi_props[] = { 1933 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0), 1934 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1), 1935 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1), 1936 DEFINE_PROP_END_OF_LIST(), 1937 }; 1938 1939 static void scsi_device_class_init(ObjectClass *klass, void *data) 1940 { 1941 DeviceClass *k = DEVICE_CLASS(klass); 1942 set_bit(DEVICE_CATEGORY_STORAGE, k->categories); 1943 k->bus_type = TYPE_SCSI_BUS; 1944 k->realize = scsi_qdev_realize; 1945 k->unrealize = scsi_qdev_unrealize; 1946 device_class_set_props(k, scsi_props); 1947 } 1948 1949 static void scsi_dev_instance_init(Object *obj) 1950 { 1951 DeviceState *dev = DEVICE(obj); 1952 SCSIDevice *s = SCSI_DEVICE(dev); 1953 1954 device_add_bootindex_property(obj, &s->conf.bootindex, 1955 "bootindex", NULL, 1956 &s->qdev); 1957 } 1958 1959 static const TypeInfo scsi_device_type_info = { 1960 .name = TYPE_SCSI_DEVICE, 1961 .parent = TYPE_DEVICE, 1962 .instance_size = sizeof(SCSIDevice), 1963 .abstract = true, 1964 .class_size = sizeof(SCSIDeviceClass), 1965 .class_init = scsi_device_class_init, 1966 .instance_init = scsi_dev_instance_init, 1967 }; 1968 1969 static void scsi_bus_class_init(ObjectClass *klass, void *data) 1970 { 1971 BusClass *k = BUS_CLASS(klass); 1972 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 1973 1974 k->get_dev_path = scsibus_get_dev_path; 1975 k->get_fw_dev_path = scsibus_get_fw_dev_path; 1976 k->check_address = scsi_bus_check_address; 1977 hc->unplug = qdev_simple_device_unplug_cb; 1978 } 1979 1980 static const TypeInfo scsi_bus_info = { 1981 .name = TYPE_SCSI_BUS, 1982 .parent = TYPE_BUS, 1983 .instance_size = sizeof(SCSIBus), 1984 .class_init = scsi_bus_class_init, 1985 .interfaces = (InterfaceInfo[]) { 1986 { TYPE_HOTPLUG_HANDLER }, 1987 { } 1988 } 1989 }; 1990 1991 static void scsi_register_types(void) 1992 { 1993 type_register_static(&scsi_bus_info); 1994 type_register_static(&scsi_device_type_info); 1995 } 1996 1997 type_init(scsi_register_types) 1998