1 #include "qemu/osdep.h" 2 #include "qapi/error.h" 3 #include "qemu/error-report.h" 4 #include "qemu/module.h" 5 #include "qemu/option.h" 6 #include "qemu/hw-version.h" 7 #include "hw/qdev-properties.h" 8 #include "hw/scsi/scsi.h" 9 #include "migration/qemu-file-types.h" 10 #include "migration/vmstate.h" 11 #include "scsi/constants.h" 12 #include "sysemu/block-backend.h" 13 #include "sysemu/blockdev.h" 14 #include "sysemu/sysemu.h" 15 #include "sysemu/runstate.h" 16 #include "trace.h" 17 #include "sysemu/dma.h" 18 #include "qemu/cutils.h" 19 20 static char *scsibus_get_dev_path(DeviceState *dev); 21 static char *scsibus_get_fw_dev_path(DeviceState *dev); 22 static void scsi_req_dequeue(SCSIRequest *req); 23 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len); 24 static void scsi_target_free_buf(SCSIRequest *req); 25 26 static int next_scsi_bus; 27 28 static SCSIDevice *do_scsi_device_find(SCSIBus *bus, 29 int channel, int id, int lun, 30 bool include_unrealized) 31 { 32 BusChild *kid; 33 SCSIDevice *retval = NULL; 34 35 QTAILQ_FOREACH_RCU(kid, &bus->qbus.children, sibling) { 36 DeviceState *qdev = kid->child; 37 SCSIDevice *dev = SCSI_DEVICE(qdev); 38 39 if (dev->channel == channel && dev->id == id) { 40 if (dev->lun == lun) { 41 retval = dev; 42 break; 43 } 44 45 /* 46 * If we don't find exact match (channel/bus/lun), 47 * we will return the first device which matches channel/bus 48 */ 49 50 if (!retval) { 51 retval = dev; 52 } 53 } 54 } 55 56 /* 57 * This function might run on the IO thread and we might race against 58 * main thread hot-plugging the device. 59 * We assume that as soon as .realized is set to true we can let 60 * the user access the device. 61 */ 62 63 if (retval && !include_unrealized && 64 !qatomic_load_acquire(&retval->qdev.realized)) { 65 retval = NULL; 66 } 67 68 return retval; 69 } 70 71 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun) 72 { 73 RCU_READ_LOCK_GUARD(); 74 return do_scsi_device_find(bus, channel, id, lun, false); 75 } 76 77 SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun) 78 { 79 SCSIDevice *d; 80 RCU_READ_LOCK_GUARD(); 81 d = do_scsi_device_find(bus, channel, id, lun, false); 82 if (d) { 83 object_ref(d); 84 } 85 return d; 86 } 87 88 static void scsi_device_realize(SCSIDevice *s, Error **errp) 89 { 90 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 91 if (sc->realize) { 92 sc->realize(s, errp); 93 } 94 } 95 96 static void scsi_device_unrealize(SCSIDevice *s) 97 { 98 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 99 if (sc->unrealize) { 100 sc->unrealize(s); 101 } 102 } 103 104 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, 105 size_t buf_len, void *hba_private) 106 { 107 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 108 int rc; 109 110 assert(cmd->len == 0); 111 rc = scsi_req_parse_cdb(dev, cmd, buf, buf_len); 112 if (bus->info->parse_cdb) { 113 rc = bus->info->parse_cdb(dev, cmd, buf, buf_len, hba_private); 114 } 115 return rc; 116 } 117 118 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun, 119 uint8_t *buf, void *hba_private) 120 { 121 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 122 if (sc->alloc_req) { 123 return sc->alloc_req(s, tag, lun, buf, hba_private); 124 } 125 126 return NULL; 127 } 128 129 void scsi_device_unit_attention_reported(SCSIDevice *s) 130 { 131 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 132 if (sc->unit_attention_reported) { 133 sc->unit_attention_reported(s); 134 } 135 } 136 137 /* Create a scsi bus, and attach devices to it. */ 138 void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host, 139 const SCSIBusInfo *info, const char *bus_name) 140 { 141 qbus_init(bus, bus_size, TYPE_SCSI_BUS, host, bus_name); 142 bus->busnr = next_scsi_bus++; 143 bus->info = info; 144 qbus_set_bus_hotplug_handler(BUS(bus)); 145 } 146 147 static void scsi_dma_restart_bh(void *opaque) 148 { 149 SCSIDevice *s = opaque; 150 SCSIRequest *req, *next; 151 152 qemu_bh_delete(s->bh); 153 s->bh = NULL; 154 155 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 156 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { 157 scsi_req_ref(req); 158 if (req->retry) { 159 req->retry = false; 160 switch (req->cmd.mode) { 161 case SCSI_XFER_FROM_DEV: 162 case SCSI_XFER_TO_DEV: 163 scsi_req_continue(req); 164 break; 165 case SCSI_XFER_NONE: 166 scsi_req_dequeue(req); 167 scsi_req_enqueue(req); 168 break; 169 } 170 } 171 scsi_req_unref(req); 172 } 173 aio_context_release(blk_get_aio_context(s->conf.blk)); 174 /* Drop the reference that was acquired in scsi_dma_restart_cb */ 175 object_unref(OBJECT(s)); 176 } 177 178 void scsi_req_retry(SCSIRequest *req) 179 { 180 /* No need to save a reference, because scsi_dma_restart_bh just 181 * looks at the request list. */ 182 req->retry = true; 183 } 184 185 static void scsi_dma_restart_cb(void *opaque, bool running, RunState state) 186 { 187 SCSIDevice *s = opaque; 188 189 if (!running) { 190 return; 191 } 192 if (!s->bh) { 193 AioContext *ctx = blk_get_aio_context(s->conf.blk); 194 /* The reference is dropped in scsi_dma_restart_bh.*/ 195 object_ref(OBJECT(s)); 196 s->bh = aio_bh_new_guarded(ctx, scsi_dma_restart_bh, s, 197 &DEVICE(s)->mem_reentrancy_guard); 198 qemu_bh_schedule(s->bh); 199 } 200 } 201 202 static bool scsi_bus_is_address_free(SCSIBus *bus, 203 int channel, int target, int lun, 204 SCSIDevice **p_dev) 205 { 206 SCSIDevice *d; 207 208 RCU_READ_LOCK_GUARD(); 209 d = do_scsi_device_find(bus, channel, target, lun, true); 210 if (d && d->lun == lun) { 211 if (p_dev) { 212 *p_dev = d; 213 } 214 return false; 215 } 216 if (p_dev) { 217 *p_dev = NULL; 218 } 219 return true; 220 } 221 222 static bool scsi_bus_check_address(BusState *qbus, DeviceState *qdev, Error **errp) 223 { 224 SCSIDevice *dev = SCSI_DEVICE(qdev); 225 SCSIBus *bus = SCSI_BUS(qbus); 226 227 if (dev->channel > bus->info->max_channel) { 228 error_setg(errp, "bad scsi channel id: %d", dev->channel); 229 return false; 230 } 231 if (dev->id != -1 && dev->id > bus->info->max_target) { 232 error_setg(errp, "bad scsi device id: %d", dev->id); 233 return false; 234 } 235 if (dev->lun != -1 && dev->lun > bus->info->max_lun) { 236 error_setg(errp, "bad scsi device lun: %d", dev->lun); 237 return false; 238 } 239 240 if (dev->id != -1 && dev->lun != -1) { 241 SCSIDevice *d; 242 if (!scsi_bus_is_address_free(bus, dev->channel, dev->id, dev->lun, &d)) { 243 error_setg(errp, "lun already used by '%s'", d->qdev.id); 244 return false; 245 } 246 } 247 248 return true; 249 } 250 251 static void scsi_qdev_realize(DeviceState *qdev, Error **errp) 252 { 253 SCSIDevice *dev = SCSI_DEVICE(qdev); 254 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 255 bool is_free; 256 Error *local_err = NULL; 257 258 if (dev->id == -1) { 259 int id = -1; 260 if (dev->lun == -1) { 261 dev->lun = 0; 262 } 263 do { 264 is_free = scsi_bus_is_address_free(bus, dev->channel, ++id, dev->lun, NULL); 265 } while (!is_free && id < bus->info->max_target); 266 if (!is_free) { 267 error_setg(errp, "no free target"); 268 return; 269 } 270 dev->id = id; 271 } else if (dev->lun == -1) { 272 int lun = -1; 273 do { 274 is_free = scsi_bus_is_address_free(bus, dev->channel, dev->id, ++lun, NULL); 275 } while (!is_free && lun < bus->info->max_lun); 276 if (!is_free) { 277 error_setg(errp, "no free lun"); 278 return; 279 } 280 dev->lun = lun; 281 } 282 283 QTAILQ_INIT(&dev->requests); 284 scsi_device_realize(dev, &local_err); 285 if (local_err) { 286 error_propagate(errp, local_err); 287 return; 288 } 289 dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev), 290 scsi_dma_restart_cb, dev); 291 } 292 293 static void scsi_qdev_unrealize(DeviceState *qdev) 294 { 295 SCSIDevice *dev = SCSI_DEVICE(qdev); 296 297 if (dev->vmsentry) { 298 qemu_del_vm_change_state_handler(dev->vmsentry); 299 } 300 301 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE)); 302 303 scsi_device_unrealize(dev); 304 305 blockdev_mark_auto_del(dev->conf.blk); 306 } 307 308 /* handle legacy '-drive if=scsi,...' cmd line args */ 309 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, 310 int unit, bool removable, int bootindex, 311 bool share_rw, 312 BlockdevOnError rerror, 313 BlockdevOnError werror, 314 const char *serial, Error **errp) 315 { 316 const char *driver; 317 char *name; 318 DeviceState *dev; 319 DriveInfo *dinfo; 320 321 if (blk_is_sg(blk)) { 322 driver = "scsi-generic"; 323 } else { 324 dinfo = blk_legacy_dinfo(blk); 325 if (dinfo && dinfo->media_cd) { 326 driver = "scsi-cd"; 327 } else { 328 driver = "scsi-hd"; 329 } 330 } 331 dev = qdev_new(driver); 332 name = g_strdup_printf("legacy[%d]", unit); 333 object_property_add_child(OBJECT(bus), name, OBJECT(dev)); 334 g_free(name); 335 336 qdev_prop_set_uint32(dev, "scsi-id", unit); 337 if (bootindex >= 0) { 338 object_property_set_int(OBJECT(dev), "bootindex", bootindex, 339 &error_abort); 340 } 341 if (object_property_find(OBJECT(dev), "removable")) { 342 qdev_prop_set_bit(dev, "removable", removable); 343 } 344 if (serial && object_property_find(OBJECT(dev), "serial")) { 345 qdev_prop_set_string(dev, "serial", serial); 346 } 347 if (!qdev_prop_set_drive_err(dev, "drive", blk, errp)) { 348 object_unparent(OBJECT(dev)); 349 return NULL; 350 } 351 if (!object_property_set_bool(OBJECT(dev), "share-rw", share_rw, errp)) { 352 object_unparent(OBJECT(dev)); 353 return NULL; 354 } 355 356 qdev_prop_set_enum(dev, "rerror", rerror); 357 qdev_prop_set_enum(dev, "werror", werror); 358 359 if (!qdev_realize_and_unref(dev, &bus->qbus, errp)) { 360 object_unparent(OBJECT(dev)); 361 return NULL; 362 } 363 return SCSI_DEVICE(dev); 364 } 365 366 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus) 367 { 368 Location loc; 369 DriveInfo *dinfo; 370 int unit; 371 372 loc_push_none(&loc); 373 for (unit = 0; unit <= bus->info->max_target; unit++) { 374 dinfo = drive_get(IF_SCSI, bus->busnr, unit); 375 if (dinfo == NULL) { 376 continue; 377 } 378 qemu_opts_loc_restore(dinfo->opts); 379 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo), 380 unit, false, -1, false, 381 BLOCKDEV_ON_ERROR_AUTO, 382 BLOCKDEV_ON_ERROR_AUTO, 383 NULL, &error_fatal); 384 } 385 loc_pop(&loc); 386 } 387 388 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf) 389 { 390 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 391 scsi_req_complete(req, CHECK_CONDITION); 392 return 0; 393 } 394 395 static const struct SCSIReqOps reqops_invalid_field = { 396 .size = sizeof(SCSIRequest), 397 .send_command = scsi_invalid_field 398 }; 399 400 /* SCSIReqOps implementation for invalid commands. */ 401 402 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf) 403 { 404 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 405 scsi_req_complete(req, CHECK_CONDITION); 406 return 0; 407 } 408 409 static const struct SCSIReqOps reqops_invalid_opcode = { 410 .size = sizeof(SCSIRequest), 411 .send_command = scsi_invalid_command 412 }; 413 414 /* SCSIReqOps implementation for unit attention conditions. */ 415 416 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf) 417 { 418 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 419 scsi_req_build_sense(req, req->dev->unit_attention); 420 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) { 421 scsi_req_build_sense(req, req->bus->unit_attention); 422 } 423 scsi_req_complete(req, CHECK_CONDITION); 424 return 0; 425 } 426 427 static const struct SCSIReqOps reqops_unit_attention = { 428 .size = sizeof(SCSIRequest), 429 .send_command = scsi_unit_attention 430 }; 431 432 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to 433 an invalid LUN. */ 434 435 typedef struct SCSITargetReq SCSITargetReq; 436 437 struct SCSITargetReq { 438 SCSIRequest req; 439 int len; 440 uint8_t *buf; 441 int buf_len; 442 }; 443 444 static void store_lun(uint8_t *outbuf, int lun) 445 { 446 if (lun < 256) { 447 /* Simple logical unit addressing method*/ 448 outbuf[0] = 0; 449 outbuf[1] = lun; 450 } else { 451 /* Flat space addressing method */ 452 outbuf[0] = 0x40 | (lun >> 8); 453 outbuf[1] = (lun & 255); 454 } 455 } 456 457 static bool scsi_target_emulate_report_luns(SCSITargetReq *r) 458 { 459 BusChild *kid; 460 int channel, id; 461 uint8_t tmp[8] = {0}; 462 int len = 0; 463 GByteArray *buf; 464 465 if (r->req.cmd.xfer < 16) { 466 return false; 467 } 468 if (r->req.cmd.buf[2] > 2) { 469 return false; 470 } 471 472 /* reserve space for 63 LUNs*/ 473 buf = g_byte_array_sized_new(512); 474 475 channel = r->req.dev->channel; 476 id = r->req.dev->id; 477 478 /* add size (will be updated later to correct value */ 479 g_byte_array_append(buf, tmp, 8); 480 len += 8; 481 482 /* add LUN0 */ 483 g_byte_array_append(buf, tmp, 8); 484 len += 8; 485 486 WITH_RCU_READ_LOCK_GUARD() { 487 QTAILQ_FOREACH_RCU(kid, &r->req.bus->qbus.children, sibling) { 488 DeviceState *qdev = kid->child; 489 SCSIDevice *dev = SCSI_DEVICE(qdev); 490 491 if (dev->channel == channel && dev->id == id && dev->lun != 0) { 492 store_lun(tmp, dev->lun); 493 g_byte_array_append(buf, tmp, 8); 494 len += 8; 495 } 496 } 497 } 498 499 r->buf_len = len; 500 r->buf = g_byte_array_free(buf, FALSE); 501 r->len = MIN(len, r->req.cmd.xfer & ~7); 502 503 /* store the LUN list length */ 504 stl_be_p(&r->buf[0], len - 8); 505 return true; 506 } 507 508 static bool scsi_target_emulate_inquiry(SCSITargetReq *r) 509 { 510 assert(r->req.dev->lun != r->req.lun); 511 512 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN); 513 514 if (r->req.cmd.buf[1] & 0x2) { 515 /* Command support data - optional, not implemented */ 516 return false; 517 } 518 519 if (r->req.cmd.buf[1] & 0x1) { 520 /* Vital product data */ 521 uint8_t page_code = r->req.cmd.buf[2]; 522 r->buf[r->len++] = page_code ; /* this page */ 523 r->buf[r->len++] = 0x00; 524 525 switch (page_code) { 526 case 0x00: /* Supported page codes, mandatory */ 527 { 528 int pages; 529 pages = r->len++; 530 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */ 531 r->buf[pages] = r->len - pages - 1; /* number of pages */ 532 break; 533 } 534 default: 535 return false; 536 } 537 /* done with EVPD */ 538 assert(r->len < r->buf_len); 539 r->len = MIN(r->req.cmd.xfer, r->len); 540 return true; 541 } 542 543 /* Standard INQUIRY data */ 544 if (r->req.cmd.buf[2] != 0) { 545 return false; 546 } 547 548 /* PAGE CODE == 0 */ 549 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN); 550 memset(r->buf, 0, r->len); 551 if (r->req.lun != 0) { 552 r->buf[0] = TYPE_NO_LUN; 553 } else { 554 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE; 555 r->buf[2] = 5; /* Version */ 556 r->buf[3] = 2 | 0x10; /* HiSup, response data format */ 557 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */ 558 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */ 559 memcpy(&r->buf[8], "QEMU ", 8); 560 memcpy(&r->buf[16], "QEMU TARGET ", 16); 561 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version()); 562 } 563 return true; 564 } 565 566 static size_t scsi_sense_len(SCSIRequest *req) 567 { 568 if (req->dev->type == TYPE_SCANNER) 569 return SCSI_SENSE_LEN_SCANNER; 570 else 571 return SCSI_SENSE_LEN; 572 } 573 574 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf) 575 { 576 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 577 int fixed_sense = (req->cmd.buf[1] & 1) == 0; 578 579 if (req->lun != 0 && 580 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) { 581 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED)); 582 scsi_req_complete(req, CHECK_CONDITION); 583 return 0; 584 } 585 switch (buf[0]) { 586 case REPORT_LUNS: 587 if (!scsi_target_emulate_report_luns(r)) { 588 goto illegal_request; 589 } 590 break; 591 case INQUIRY: 592 if (!scsi_target_emulate_inquiry(r)) { 593 goto illegal_request; 594 } 595 break; 596 case REQUEST_SENSE: 597 scsi_target_alloc_buf(&r->req, scsi_sense_len(req)); 598 if (req->lun != 0) { 599 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED); 600 601 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer, 602 sense, fixed_sense); 603 } else { 604 r->len = scsi_device_get_sense(r->req.dev, r->buf, 605 MIN(req->cmd.xfer, r->buf_len), 606 fixed_sense); 607 } 608 if (r->req.dev->sense_is_ua) { 609 scsi_device_unit_attention_reported(req->dev); 610 r->req.dev->sense_len = 0; 611 r->req.dev->sense_is_ua = false; 612 } 613 break; 614 case TEST_UNIT_READY: 615 break; 616 default: 617 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 618 scsi_req_complete(req, CHECK_CONDITION); 619 return 0; 620 illegal_request: 621 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 622 scsi_req_complete(req, CHECK_CONDITION); 623 return 0; 624 } 625 626 if (!r->len) { 627 scsi_req_complete(req, GOOD); 628 } 629 return r->len; 630 } 631 632 static void scsi_target_read_data(SCSIRequest *req) 633 { 634 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 635 uint32_t n; 636 637 n = r->len; 638 if (n > 0) { 639 r->len = 0; 640 scsi_req_data(&r->req, n); 641 } else { 642 scsi_req_complete(&r->req, GOOD); 643 } 644 } 645 646 static uint8_t *scsi_target_get_buf(SCSIRequest *req) 647 { 648 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 649 650 return r->buf; 651 } 652 653 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len) 654 { 655 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 656 657 r->buf = g_malloc(len); 658 r->buf_len = len; 659 660 return r->buf; 661 } 662 663 static void scsi_target_free_buf(SCSIRequest *req) 664 { 665 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 666 667 g_free(r->buf); 668 } 669 670 static const struct SCSIReqOps reqops_target_command = { 671 .size = sizeof(SCSITargetReq), 672 .send_command = scsi_target_send_command, 673 .read_data = scsi_target_read_data, 674 .get_buf = scsi_target_get_buf, 675 .free_req = scsi_target_free_buf, 676 }; 677 678 679 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d, 680 uint32_t tag, uint32_t lun, void *hba_private) 681 { 682 SCSIRequest *req; 683 SCSIBus *bus = scsi_bus_from_device(d); 684 BusState *qbus = BUS(bus); 685 const int memset_off = offsetof(SCSIRequest, sense) 686 + sizeof(req->sense); 687 688 req = g_malloc(reqops->size); 689 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off); 690 req->refcount = 1; 691 req->bus = bus; 692 req->dev = d; 693 req->tag = tag; 694 req->lun = lun; 695 req->hba_private = hba_private; 696 req->status = -1; 697 req->host_status = -1; 698 req->ops = reqops; 699 object_ref(OBJECT(d)); 700 object_ref(OBJECT(qbus->parent)); 701 notifier_list_init(&req->cancel_notifiers); 702 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag); 703 return req; 704 } 705 706 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, 707 uint8_t *buf, size_t buf_len, void *hba_private) 708 { 709 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus); 710 const SCSIReqOps *ops; 711 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d); 712 SCSIRequest *req; 713 SCSICommand cmd = { .len = 0 }; 714 int ret; 715 716 if (buf_len == 0) { 717 trace_scsi_req_parse_bad(d->id, lun, tag, 0); 718 goto invalid_opcode; 719 } 720 721 if ((d->unit_attention.key == UNIT_ATTENTION || 722 bus->unit_attention.key == UNIT_ATTENTION) && 723 (buf[0] != INQUIRY && 724 buf[0] != REPORT_LUNS && 725 buf[0] != GET_CONFIGURATION && 726 buf[0] != GET_EVENT_STATUS_NOTIFICATION && 727 728 /* 729 * If we already have a pending unit attention condition, 730 * report this one before triggering another one. 731 */ 732 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) { 733 ops = &reqops_unit_attention; 734 } else if (lun != d->lun || 735 buf[0] == REPORT_LUNS || 736 (buf[0] == REQUEST_SENSE && d->sense_len)) { 737 ops = &reqops_target_command; 738 } else { 739 ops = NULL; 740 } 741 742 if (ops != NULL || !sc->parse_cdb) { 743 ret = scsi_req_parse_cdb(d, &cmd, buf, buf_len); 744 } else { 745 ret = sc->parse_cdb(d, &cmd, buf, buf_len, hba_private); 746 } 747 748 if (ret != 0) { 749 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]); 750 invalid_opcode: 751 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private); 752 } else { 753 assert(cmd.len != 0); 754 trace_scsi_req_parsed(d->id, lun, tag, buf[0], 755 cmd.mode, cmd.xfer); 756 if (cmd.lba != -1) { 757 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0], 758 cmd.lba); 759 } 760 761 if (cmd.xfer > INT32_MAX) { 762 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private); 763 } else if (ops) { 764 req = scsi_req_alloc(ops, d, tag, lun, hba_private); 765 } else { 766 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private); 767 } 768 } 769 770 req->cmd = cmd; 771 req->residual = req->cmd.xfer; 772 773 switch (buf[0]) { 774 case INQUIRY: 775 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]); 776 break; 777 case TEST_UNIT_READY: 778 trace_scsi_test_unit_ready(d->id, lun, tag); 779 break; 780 case REPORT_LUNS: 781 trace_scsi_report_luns(d->id, lun, tag); 782 break; 783 case REQUEST_SENSE: 784 trace_scsi_request_sense(d->id, lun, tag); 785 break; 786 default: 787 break; 788 } 789 790 return req; 791 } 792 793 uint8_t *scsi_req_get_buf(SCSIRequest *req) 794 { 795 return req->ops->get_buf(req); 796 } 797 798 static void scsi_clear_unit_attention(SCSIRequest *req) 799 { 800 SCSISense *ua; 801 if (req->dev->unit_attention.key != UNIT_ATTENTION && 802 req->bus->unit_attention.key != UNIT_ATTENTION) { 803 return; 804 } 805 806 /* 807 * If an INQUIRY command enters the enabled command state, 808 * the device server shall [not] clear any unit attention condition; 809 * See also MMC-6, paragraphs 6.5 and 6.6.2. 810 */ 811 if (req->cmd.buf[0] == INQUIRY || 812 req->cmd.buf[0] == GET_CONFIGURATION || 813 req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) { 814 return; 815 } 816 817 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 818 ua = &req->dev->unit_attention; 819 } else { 820 ua = &req->bus->unit_attention; 821 } 822 823 /* 824 * If a REPORT LUNS command enters the enabled command state, [...] 825 * the device server shall clear any pending unit attention condition 826 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED. 827 */ 828 if (req->cmd.buf[0] == REPORT_LUNS && 829 !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc && 830 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) { 831 return; 832 } 833 834 *ua = SENSE_CODE(NO_SENSE); 835 } 836 837 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len) 838 { 839 int ret; 840 841 assert(len >= 14); 842 if (!req->sense_len) { 843 return 0; 844 } 845 846 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true); 847 848 /* 849 * FIXME: clearing unit attention conditions upon autosense should be done 850 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b 851 * (SAM-5, 5.14). 852 * 853 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and 854 * 10b for HBAs that do not support it (do not call scsi_req_get_sense). 855 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b. 856 */ 857 if (req->dev->sense_is_ua) { 858 scsi_device_unit_attention_reported(req->dev); 859 req->dev->sense_len = 0; 860 req->dev->sense_is_ua = false; 861 } 862 return ret; 863 } 864 865 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed) 866 { 867 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed); 868 } 869 870 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense) 871 { 872 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag, 873 sense.key, sense.asc, sense.ascq); 874 req->sense_len = scsi_build_sense(req->sense, sense); 875 } 876 877 static void scsi_req_enqueue_internal(SCSIRequest *req) 878 { 879 assert(!req->enqueued); 880 scsi_req_ref(req); 881 if (req->bus->info->get_sg_list) { 882 req->sg = req->bus->info->get_sg_list(req); 883 } else { 884 req->sg = NULL; 885 } 886 req->enqueued = true; 887 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next); 888 } 889 890 int32_t scsi_req_enqueue(SCSIRequest *req) 891 { 892 int32_t rc; 893 894 assert(!req->retry); 895 scsi_req_enqueue_internal(req); 896 scsi_req_ref(req); 897 rc = req->ops->send_command(req, req->cmd.buf); 898 scsi_req_unref(req); 899 return rc; 900 } 901 902 static void scsi_req_dequeue(SCSIRequest *req) 903 { 904 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag); 905 req->retry = false; 906 if (req->enqueued) { 907 QTAILQ_REMOVE(&req->dev->requests, req, next); 908 req->enqueued = false; 909 scsi_req_unref(req); 910 } 911 } 912 913 static int scsi_get_performance_length(int num_desc, int type, int data_type) 914 { 915 /* MMC-6, paragraph 6.7. */ 916 switch (type) { 917 case 0: 918 if ((data_type & 3) == 0) { 919 /* Each descriptor is as in Table 295 - Nominal performance. */ 920 return 16 * num_desc + 8; 921 } else { 922 /* Each descriptor is as in Table 296 - Exceptions. */ 923 return 6 * num_desc + 8; 924 } 925 case 1: 926 case 4: 927 case 5: 928 return 8 * num_desc + 8; 929 case 2: 930 return 2048 * num_desc + 8; 931 case 3: 932 return 16 * num_desc + 8; 933 default: 934 return 8; 935 } 936 } 937 938 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf) 939 { 940 int byte_block = (buf[2] >> 2) & 0x1; 941 int type = (buf[2] >> 4) & 0x1; 942 int xfer_unit; 943 944 if (byte_block) { 945 if (type) { 946 xfer_unit = dev->blocksize; 947 } else { 948 xfer_unit = 512; 949 } 950 } else { 951 xfer_unit = 1; 952 } 953 954 return xfer_unit; 955 } 956 957 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf) 958 { 959 int length = buf[2] & 0x3; 960 int xfer; 961 int unit = ata_passthrough_xfer_unit(dev, buf); 962 963 switch (length) { 964 case 0: 965 case 3: /* USB-specific. */ 966 default: 967 xfer = 0; 968 break; 969 case 1: 970 xfer = buf[3]; 971 break; 972 case 2: 973 xfer = buf[4]; 974 break; 975 } 976 977 return xfer * unit; 978 } 979 980 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf) 981 { 982 int extend = buf[1] & 0x1; 983 int length = buf[2] & 0x3; 984 int xfer; 985 int unit = ata_passthrough_xfer_unit(dev, buf); 986 987 switch (length) { 988 case 0: 989 case 3: /* USB-specific. */ 990 default: 991 xfer = 0; 992 break; 993 case 1: 994 xfer = buf[4]; 995 xfer |= (extend ? buf[3] << 8 : 0); 996 break; 997 case 2: 998 xfer = buf[6]; 999 xfer |= (extend ? buf[5] << 8 : 0); 1000 break; 1001 } 1002 1003 return xfer * unit; 1004 } 1005 1006 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1007 { 1008 cmd->xfer = scsi_cdb_xfer(buf); 1009 switch (buf[0]) { 1010 case TEST_UNIT_READY: 1011 case REWIND: 1012 case START_STOP: 1013 case SET_CAPACITY: 1014 case WRITE_FILEMARKS: 1015 case WRITE_FILEMARKS_16: 1016 case SPACE: 1017 case RESERVE: 1018 case RELEASE: 1019 case ERASE: 1020 case ALLOW_MEDIUM_REMOVAL: 1021 case SEEK_10: 1022 case SYNCHRONIZE_CACHE: 1023 case SYNCHRONIZE_CACHE_16: 1024 case LOCATE_16: 1025 case LOCK_UNLOCK_CACHE: 1026 case SET_CD_SPEED: 1027 case SET_LIMITS: 1028 case WRITE_LONG_10: 1029 case UPDATE_BLOCK: 1030 case RESERVE_TRACK: 1031 case SET_READ_AHEAD: 1032 case PRE_FETCH: 1033 case PRE_FETCH_16: 1034 case ALLOW_OVERWRITE: 1035 cmd->xfer = 0; 1036 break; 1037 case VERIFY_10: 1038 case VERIFY_12: 1039 case VERIFY_16: 1040 if ((buf[1] & 2) == 0) { 1041 cmd->xfer = 0; 1042 } else if ((buf[1] & 4) != 0) { 1043 cmd->xfer = 1; 1044 } 1045 cmd->xfer *= dev->blocksize; 1046 break; 1047 case MODE_SENSE: 1048 break; 1049 case WRITE_SAME_10: 1050 case WRITE_SAME_16: 1051 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize; 1052 break; 1053 case READ_CAPACITY_10: 1054 cmd->xfer = 8; 1055 break; 1056 case READ_BLOCK_LIMITS: 1057 cmd->xfer = 6; 1058 break; 1059 case SEND_VOLUME_TAG: 1060 /* GPCMD_SET_STREAMING from multimedia commands. */ 1061 if (dev->type == TYPE_ROM) { 1062 cmd->xfer = buf[10] | (buf[9] << 8); 1063 } else { 1064 cmd->xfer = buf[9] | (buf[8] << 8); 1065 } 1066 break; 1067 case WRITE_6: 1068 /* length 0 means 256 blocks */ 1069 if (cmd->xfer == 0) { 1070 cmd->xfer = 256; 1071 } 1072 /* fall through */ 1073 case WRITE_10: 1074 case WRITE_VERIFY_10: 1075 case WRITE_12: 1076 case WRITE_VERIFY_12: 1077 case WRITE_16: 1078 case WRITE_VERIFY_16: 1079 cmd->xfer *= dev->blocksize; 1080 break; 1081 case READ_6: 1082 case READ_REVERSE: 1083 /* length 0 means 256 blocks */ 1084 if (cmd->xfer == 0) { 1085 cmd->xfer = 256; 1086 } 1087 /* fall through */ 1088 case READ_10: 1089 case READ_12: 1090 case READ_16: 1091 cmd->xfer *= dev->blocksize; 1092 break; 1093 case FORMAT_UNIT: 1094 /* MMC mandates the parameter list to be 12-bytes long. Parameters 1095 * for block devices are restricted to the header right now. */ 1096 if (dev->type == TYPE_ROM && (buf[1] & 16)) { 1097 cmd->xfer = 12; 1098 } else { 1099 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4); 1100 } 1101 break; 1102 case INQUIRY: 1103 case RECEIVE_DIAGNOSTIC: 1104 case SEND_DIAGNOSTIC: 1105 cmd->xfer = buf[4] | (buf[3] << 8); 1106 break; 1107 case READ_CD: 1108 case READ_BUFFER: 1109 case WRITE_BUFFER: 1110 case SEND_CUE_SHEET: 1111 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1112 break; 1113 case PERSISTENT_RESERVE_OUT: 1114 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL; 1115 break; 1116 case ERASE_12: 1117 if (dev->type == TYPE_ROM) { 1118 /* MMC command GET PERFORMANCE. */ 1119 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8), 1120 buf[10], buf[1] & 0x1f); 1121 } 1122 break; 1123 case MECHANISM_STATUS: 1124 case READ_DVD_STRUCTURE: 1125 case SEND_DVD_STRUCTURE: 1126 case MAINTENANCE_OUT: 1127 case MAINTENANCE_IN: 1128 if (dev->type == TYPE_ROM) { 1129 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */ 1130 cmd->xfer = buf[9] | (buf[8] << 8); 1131 } 1132 break; 1133 case ATA_PASSTHROUGH_12: 1134 if (dev->type == TYPE_ROM) { 1135 /* BLANK command of MMC */ 1136 cmd->xfer = 0; 1137 } else { 1138 cmd->xfer = ata_passthrough_12_xfer(dev, buf); 1139 } 1140 break; 1141 case ATA_PASSTHROUGH_16: 1142 cmd->xfer = ata_passthrough_16_xfer(dev, buf); 1143 break; 1144 } 1145 return 0; 1146 } 1147 1148 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1149 { 1150 switch (buf[0]) { 1151 /* stream commands */ 1152 case ERASE_12: 1153 case ERASE_16: 1154 cmd->xfer = 0; 1155 break; 1156 case READ_6: 1157 case READ_REVERSE: 1158 case RECOVER_BUFFERED_DATA: 1159 case WRITE_6: 1160 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16); 1161 if (buf[1] & 0x01) { /* fixed */ 1162 cmd->xfer *= dev->blocksize; 1163 } 1164 break; 1165 case READ_16: 1166 case READ_REVERSE_16: 1167 case VERIFY_16: 1168 case WRITE_16: 1169 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16); 1170 if (buf[1] & 0x01) { /* fixed */ 1171 cmd->xfer *= dev->blocksize; 1172 } 1173 break; 1174 case REWIND: 1175 case LOAD_UNLOAD: 1176 cmd->xfer = 0; 1177 break; 1178 case SPACE_16: 1179 cmd->xfer = buf[13] | (buf[12] << 8); 1180 break; 1181 case READ_POSITION: 1182 switch (buf[1] & 0x1f) /* operation code */ { 1183 case SHORT_FORM_BLOCK_ID: 1184 case SHORT_FORM_VENDOR_SPECIFIC: 1185 cmd->xfer = 20; 1186 break; 1187 case LONG_FORM: 1188 cmd->xfer = 32; 1189 break; 1190 case EXTENDED_FORM: 1191 cmd->xfer = buf[8] | (buf[7] << 8); 1192 break; 1193 default: 1194 return -1; 1195 } 1196 1197 break; 1198 case FORMAT_UNIT: 1199 cmd->xfer = buf[4] | (buf[3] << 8); 1200 break; 1201 /* generic commands */ 1202 default: 1203 return scsi_req_xfer(cmd, dev, buf); 1204 } 1205 return 0; 1206 } 1207 1208 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1209 { 1210 switch (buf[0]) { 1211 /* medium changer commands */ 1212 case EXCHANGE_MEDIUM: 1213 case INITIALIZE_ELEMENT_STATUS: 1214 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE: 1215 case MOVE_MEDIUM: 1216 case POSITION_TO_ELEMENT: 1217 cmd->xfer = 0; 1218 break; 1219 case READ_ELEMENT_STATUS: 1220 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16); 1221 break; 1222 1223 /* generic commands */ 1224 default: 1225 return scsi_req_xfer(cmd, dev, buf); 1226 } 1227 return 0; 1228 } 1229 1230 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1231 { 1232 switch (buf[0]) { 1233 /* Scanner commands */ 1234 case OBJECT_POSITION: 1235 cmd->xfer = 0; 1236 break; 1237 case SCAN: 1238 cmd->xfer = buf[4]; 1239 break; 1240 case READ_10: 1241 case SEND: 1242 case GET_WINDOW: 1243 case SET_WINDOW: 1244 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1245 break; 1246 default: 1247 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */ 1248 return scsi_req_xfer(cmd, dev, buf); 1249 } 1250 1251 return 0; 1252 } 1253 1254 static void scsi_cmd_xfer_mode(SCSICommand *cmd) 1255 { 1256 if (!cmd->xfer) { 1257 cmd->mode = SCSI_XFER_NONE; 1258 return; 1259 } 1260 switch (cmd->buf[0]) { 1261 case WRITE_6: 1262 case WRITE_10: 1263 case WRITE_VERIFY_10: 1264 case WRITE_12: 1265 case WRITE_VERIFY_12: 1266 case WRITE_16: 1267 case WRITE_VERIFY_16: 1268 case VERIFY_10: 1269 case VERIFY_12: 1270 case VERIFY_16: 1271 case COPY: 1272 case COPY_VERIFY: 1273 case COMPARE: 1274 case CHANGE_DEFINITION: 1275 case LOG_SELECT: 1276 case MODE_SELECT: 1277 case MODE_SELECT_10: 1278 case SEND_DIAGNOSTIC: 1279 case WRITE_BUFFER: 1280 case FORMAT_UNIT: 1281 case REASSIGN_BLOCKS: 1282 case SEARCH_EQUAL: 1283 case SEARCH_HIGH: 1284 case SEARCH_LOW: 1285 case UPDATE_BLOCK: 1286 case WRITE_LONG_10: 1287 case WRITE_SAME_10: 1288 case WRITE_SAME_16: 1289 case UNMAP: 1290 case SEARCH_HIGH_12: 1291 case SEARCH_EQUAL_12: 1292 case SEARCH_LOW_12: 1293 case MEDIUM_SCAN: 1294 case SEND_VOLUME_TAG: 1295 case SEND_CUE_SHEET: 1296 case SEND_DVD_STRUCTURE: 1297 case PERSISTENT_RESERVE_OUT: 1298 case MAINTENANCE_OUT: 1299 case SET_WINDOW: 1300 case SCAN: 1301 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for 1302 * non-scanner devices, so we only get here for SCAN and not for START_STOP. 1303 */ 1304 cmd->mode = SCSI_XFER_TO_DEV; 1305 break; 1306 case ATA_PASSTHROUGH_12: 1307 case ATA_PASSTHROUGH_16: 1308 /* T_DIR */ 1309 cmd->mode = (cmd->buf[2] & 0x8) ? 1310 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV; 1311 break; 1312 default: 1313 cmd->mode = SCSI_XFER_FROM_DEV; 1314 break; 1315 } 1316 } 1317 1318 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, 1319 size_t buf_len) 1320 { 1321 int rc; 1322 int len; 1323 1324 cmd->lba = -1; 1325 len = scsi_cdb_length(buf); 1326 if (len < 0 || len > buf_len) { 1327 return -1; 1328 } 1329 1330 cmd->len = len; 1331 switch (dev->type) { 1332 case TYPE_TAPE: 1333 rc = scsi_req_stream_xfer(cmd, dev, buf); 1334 break; 1335 case TYPE_MEDIUM_CHANGER: 1336 rc = scsi_req_medium_changer_xfer(cmd, dev, buf); 1337 break; 1338 case TYPE_SCANNER: 1339 rc = scsi_req_scanner_length(cmd, dev, buf); 1340 break; 1341 default: 1342 rc = scsi_req_xfer(cmd, dev, buf); 1343 break; 1344 } 1345 1346 if (rc != 0) 1347 return rc; 1348 1349 memcpy(cmd->buf, buf, cmd->len); 1350 scsi_cmd_xfer_mode(cmd); 1351 cmd->lba = scsi_cmd_lba(cmd); 1352 return 0; 1353 } 1354 1355 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense) 1356 { 1357 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 1358 1359 scsi_device_set_ua(dev, sense); 1360 if (bus->info->change) { 1361 bus->info->change(bus, dev, sense); 1362 } 1363 } 1364 1365 SCSIRequest *scsi_req_ref(SCSIRequest *req) 1366 { 1367 assert(req->refcount > 0); 1368 req->refcount++; 1369 return req; 1370 } 1371 1372 void scsi_req_unref(SCSIRequest *req) 1373 { 1374 assert(req->refcount > 0); 1375 if (--req->refcount == 0) { 1376 BusState *qbus = req->dev->qdev.parent_bus; 1377 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus); 1378 1379 if (bus->info->free_request && req->hba_private) { 1380 bus->info->free_request(bus, req->hba_private); 1381 } 1382 if (req->ops->free_req) { 1383 req->ops->free_req(req); 1384 } 1385 object_unref(OBJECT(req->dev)); 1386 object_unref(OBJECT(qbus->parent)); 1387 g_free(req); 1388 } 1389 } 1390 1391 /* Tell the device that we finished processing this chunk of I/O. It 1392 will start the next chunk or complete the command. */ 1393 void scsi_req_continue(SCSIRequest *req) 1394 { 1395 if (req->io_canceled) { 1396 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag); 1397 return; 1398 } 1399 trace_scsi_req_continue(req->dev->id, req->lun, req->tag); 1400 if (req->cmd.mode == SCSI_XFER_TO_DEV) { 1401 req->ops->write_data(req); 1402 } else { 1403 req->ops->read_data(req); 1404 } 1405 } 1406 1407 /* Called by the devices when data is ready for the HBA. The HBA should 1408 start a DMA operation to read or fill the device's data buffer. 1409 Once it completes, calling scsi_req_continue will restart I/O. */ 1410 void scsi_req_data(SCSIRequest *req, int len) 1411 { 1412 uint8_t *buf; 1413 if (req->io_canceled) { 1414 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len); 1415 return; 1416 } 1417 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len); 1418 assert(req->cmd.mode != SCSI_XFER_NONE); 1419 if (!req->sg) { 1420 req->residual -= len; 1421 req->bus->info->transfer_data(req, len); 1422 return; 1423 } 1424 1425 /* If the device calls scsi_req_data and the HBA specified a 1426 * scatter/gather list, the transfer has to happen in a single 1427 * step. */ 1428 assert(!req->dma_started); 1429 req->dma_started = true; 1430 1431 buf = scsi_req_get_buf(req); 1432 if (req->cmd.mode == SCSI_XFER_FROM_DEV) { 1433 dma_buf_read(buf, len, &req->residual, req->sg, 1434 MEMTXATTRS_UNSPECIFIED); 1435 } else { 1436 dma_buf_write(buf, len, &req->residual, req->sg, 1437 MEMTXATTRS_UNSPECIFIED); 1438 } 1439 scsi_req_continue(req); 1440 } 1441 1442 void scsi_req_print(SCSIRequest *req) 1443 { 1444 FILE *fp = stderr; 1445 int i; 1446 1447 fprintf(fp, "[%s id=%d] %s", 1448 req->dev->qdev.parent_bus->name, 1449 req->dev->id, 1450 scsi_command_name(req->cmd.buf[0])); 1451 for (i = 1; i < req->cmd.len; i++) { 1452 fprintf(fp, " 0x%02x", req->cmd.buf[i]); 1453 } 1454 switch (req->cmd.mode) { 1455 case SCSI_XFER_NONE: 1456 fprintf(fp, " - none\n"); 1457 break; 1458 case SCSI_XFER_FROM_DEV: 1459 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer); 1460 break; 1461 case SCSI_XFER_TO_DEV: 1462 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer); 1463 break; 1464 default: 1465 fprintf(fp, " - Oops\n"); 1466 break; 1467 } 1468 } 1469 1470 void scsi_req_complete_failed(SCSIRequest *req, int host_status) 1471 { 1472 SCSISense sense; 1473 int status; 1474 1475 assert(req->status == -1 && req->host_status == -1); 1476 assert(req->ops != &reqops_unit_attention); 1477 1478 if (!req->bus->info->fail) { 1479 status = scsi_sense_from_host_status(req->host_status, &sense); 1480 if (status == CHECK_CONDITION) { 1481 scsi_req_build_sense(req, sense); 1482 } 1483 scsi_req_complete(req, status); 1484 return; 1485 } 1486 1487 req->host_status = host_status; 1488 scsi_req_ref(req); 1489 scsi_req_dequeue(req); 1490 req->bus->info->fail(req); 1491 1492 /* Cancelled requests might end up being completed instead of cancelled */ 1493 notifier_list_notify(&req->cancel_notifiers, req); 1494 scsi_req_unref(req); 1495 } 1496 1497 void scsi_req_complete(SCSIRequest *req, int status) 1498 { 1499 assert(req->status == -1 && req->host_status == -1); 1500 req->status = status; 1501 req->host_status = SCSI_HOST_OK; 1502 1503 assert(req->sense_len <= sizeof(req->sense)); 1504 if (status == GOOD) { 1505 req->sense_len = 0; 1506 } 1507 1508 if (req->sense_len) { 1509 memcpy(req->dev->sense, req->sense, req->sense_len); 1510 req->dev->sense_len = req->sense_len; 1511 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention); 1512 } else { 1513 req->dev->sense_len = 0; 1514 req->dev->sense_is_ua = false; 1515 } 1516 1517 /* 1518 * Unit attention state is now stored in the device's sense buffer 1519 * if the HBA didn't do autosense. Clear the pending unit attention 1520 * flags. 1521 */ 1522 scsi_clear_unit_attention(req); 1523 1524 scsi_req_ref(req); 1525 scsi_req_dequeue(req); 1526 req->bus->info->complete(req, req->residual); 1527 1528 /* Cancelled requests might end up being completed instead of cancelled */ 1529 notifier_list_notify(&req->cancel_notifiers, req); 1530 scsi_req_unref(req); 1531 } 1532 1533 /* Called by the devices when the request is canceled. */ 1534 void scsi_req_cancel_complete(SCSIRequest *req) 1535 { 1536 assert(req->io_canceled); 1537 if (req->bus->info->cancel) { 1538 req->bus->info->cancel(req); 1539 } 1540 notifier_list_notify(&req->cancel_notifiers, req); 1541 scsi_req_unref(req); 1542 } 1543 1544 /* Cancel @req asynchronously. @notifier is added to @req's cancellation 1545 * notifier list, the bus will be notified the requests cancellation is 1546 * completed. 1547 * */ 1548 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier) 1549 { 1550 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1551 if (notifier) { 1552 notifier_list_add(&req->cancel_notifiers, notifier); 1553 } 1554 if (req->io_canceled) { 1555 /* A blk_aio_cancel_async is pending; when it finishes, 1556 * scsi_req_cancel_complete will be called and will 1557 * call the notifier we just added. Just wait for that. 1558 */ 1559 assert(req->aiocb); 1560 return; 1561 } 1562 /* Dropped in scsi_req_cancel_complete. */ 1563 scsi_req_ref(req); 1564 scsi_req_dequeue(req); 1565 req->io_canceled = true; 1566 if (req->aiocb) { 1567 blk_aio_cancel_async(req->aiocb); 1568 } else { 1569 scsi_req_cancel_complete(req); 1570 } 1571 } 1572 1573 void scsi_req_cancel(SCSIRequest *req) 1574 { 1575 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1576 if (!req->enqueued) { 1577 return; 1578 } 1579 assert(!req->io_canceled); 1580 /* Dropped in scsi_req_cancel_complete. */ 1581 scsi_req_ref(req); 1582 scsi_req_dequeue(req); 1583 req->io_canceled = true; 1584 if (req->aiocb) { 1585 blk_aio_cancel(req->aiocb); 1586 } else { 1587 scsi_req_cancel_complete(req); 1588 } 1589 } 1590 1591 static int scsi_ua_precedence(SCSISense sense) 1592 { 1593 if (sense.key != UNIT_ATTENTION) { 1594 return INT_MAX; 1595 } 1596 if (sense.asc == 0x29 && sense.ascq == 0x04) { 1597 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */ 1598 return 1; 1599 } else if (sense.asc == 0x3F && sense.ascq == 0x01) { 1600 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */ 1601 return 2; 1602 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) { 1603 /* These two go with "all others". */ 1604 ; 1605 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) { 1606 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0 1607 * POWER ON OCCURRED = 1 1608 * SCSI BUS RESET OCCURRED = 2 1609 * BUS DEVICE RESET FUNCTION OCCURRED = 3 1610 * I_T NEXUS LOSS OCCURRED = 7 1611 */ 1612 return sense.ascq; 1613 } else if (sense.asc == 0x2F && sense.ascq == 0x01) { 1614 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */ 1615 return 8; 1616 } 1617 return (sense.asc << 8) | sense.ascq; 1618 } 1619 1620 void scsi_bus_set_ua(SCSIBus *bus, SCSISense sense) 1621 { 1622 int prec1, prec2; 1623 if (sense.key != UNIT_ATTENTION) { 1624 return; 1625 } 1626 1627 /* 1628 * Override a pre-existing unit attention condition, except for a more 1629 * important reset condition. 1630 */ 1631 prec1 = scsi_ua_precedence(bus->unit_attention); 1632 prec2 = scsi_ua_precedence(sense); 1633 if (prec2 < prec1) { 1634 bus->unit_attention = sense; 1635 } 1636 } 1637 1638 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense) 1639 { 1640 int prec1, prec2; 1641 if (sense.key != UNIT_ATTENTION) { 1642 return; 1643 } 1644 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key, 1645 sense.asc, sense.ascq); 1646 1647 /* 1648 * Override a pre-existing unit attention condition, except for a more 1649 * important reset condition. 1650 */ 1651 prec1 = scsi_ua_precedence(sdev->unit_attention); 1652 prec2 = scsi_ua_precedence(sense); 1653 if (prec2 < prec1) { 1654 sdev->unit_attention = sense; 1655 } 1656 } 1657 1658 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense) 1659 { 1660 SCSIRequest *req; 1661 1662 aio_context_acquire(blk_get_aio_context(sdev->conf.blk)); 1663 while (!QTAILQ_EMPTY(&sdev->requests)) { 1664 req = QTAILQ_FIRST(&sdev->requests); 1665 scsi_req_cancel_async(req, NULL); 1666 } 1667 blk_drain(sdev->conf.blk); 1668 aio_context_release(blk_get_aio_context(sdev->conf.blk)); 1669 scsi_device_set_ua(sdev, sense); 1670 } 1671 1672 static char *scsibus_get_dev_path(DeviceState *dev) 1673 { 1674 SCSIDevice *d = SCSI_DEVICE(dev); 1675 DeviceState *hba = dev->parent_bus->parent; 1676 char *id; 1677 char *path; 1678 1679 id = qdev_get_dev_path(hba); 1680 if (id) { 1681 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun); 1682 } else { 1683 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun); 1684 } 1685 g_free(id); 1686 return path; 1687 } 1688 1689 static char *scsibus_get_fw_dev_path(DeviceState *dev) 1690 { 1691 SCSIDevice *d = SCSI_DEVICE(dev); 1692 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel, 1693 qdev_fw_name(dev), d->id, d->lun); 1694 } 1695 1696 /* SCSI request list. For simplicity, pv points to the whole device */ 1697 1698 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size, 1699 const VMStateField *field, JSONWriter *vmdesc) 1700 { 1701 SCSIDevice *s = pv; 1702 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1703 SCSIRequest *req; 1704 1705 QTAILQ_FOREACH(req, &s->requests, next) { 1706 assert(!req->io_canceled); 1707 assert(req->status == -1 && req->host_status == -1); 1708 assert(req->enqueued); 1709 1710 qemu_put_sbyte(f, req->retry ? 1 : 2); 1711 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); 1712 qemu_put_be32s(f, &req->tag); 1713 qemu_put_be32s(f, &req->lun); 1714 if (bus->info->save_request) { 1715 bus->info->save_request(f, req); 1716 } 1717 if (req->ops->save_request) { 1718 req->ops->save_request(f, req); 1719 } 1720 } 1721 qemu_put_sbyte(f, 0); 1722 1723 return 0; 1724 } 1725 1726 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size, 1727 const VMStateField *field) 1728 { 1729 SCSIDevice *s = pv; 1730 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1731 int8_t sbyte; 1732 1733 while ((sbyte = qemu_get_sbyte(f)) > 0) { 1734 uint8_t buf[SCSI_CMD_BUF_SIZE]; 1735 uint32_t tag; 1736 uint32_t lun; 1737 SCSIRequest *req; 1738 1739 qemu_get_buffer(f, buf, sizeof(buf)); 1740 qemu_get_be32s(f, &tag); 1741 qemu_get_be32s(f, &lun); 1742 /* 1743 * A too-short CDB would have been rejected by scsi_req_new, so just use 1744 * SCSI_CMD_BUF_SIZE as the CDB length. 1745 */ 1746 req = scsi_req_new(s, tag, lun, buf, sizeof(buf), NULL); 1747 req->retry = (sbyte == 1); 1748 if (bus->info->load_request) { 1749 req->hba_private = bus->info->load_request(f, req); 1750 } 1751 if (req->ops->load_request) { 1752 req->ops->load_request(f, req); 1753 } 1754 1755 /* Just restart it later. */ 1756 scsi_req_enqueue_internal(req); 1757 1758 /* At this point, the request will be kept alive by the reference 1759 * added by scsi_req_enqueue_internal, so we can release our reference. 1760 * The HBA of course will add its own reference in the load_request 1761 * callback if it needs to hold on the SCSIRequest. 1762 */ 1763 scsi_req_unref(req); 1764 } 1765 1766 return 0; 1767 } 1768 1769 static const VMStateInfo vmstate_info_scsi_requests = { 1770 .name = "scsi-requests", 1771 .get = get_scsi_requests, 1772 .put = put_scsi_requests, 1773 }; 1774 1775 static bool scsi_sense_state_needed(void *opaque) 1776 { 1777 SCSIDevice *s = opaque; 1778 1779 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD; 1780 } 1781 1782 static const VMStateDescription vmstate_scsi_sense_state = { 1783 .name = "SCSIDevice/sense", 1784 .version_id = 1, 1785 .minimum_version_id = 1, 1786 .needed = scsi_sense_state_needed, 1787 .fields = (VMStateField[]) { 1788 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 1789 SCSI_SENSE_BUF_SIZE_OLD, 1790 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD), 1791 VMSTATE_END_OF_LIST() 1792 } 1793 }; 1794 1795 const VMStateDescription vmstate_scsi_device = { 1796 .name = "SCSIDevice", 1797 .version_id = 1, 1798 .minimum_version_id = 1, 1799 .fields = (VMStateField[]) { 1800 VMSTATE_UINT8(unit_attention.key, SCSIDevice), 1801 VMSTATE_UINT8(unit_attention.asc, SCSIDevice), 1802 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice), 1803 VMSTATE_BOOL(sense_is_ua, SCSIDevice), 1804 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD), 1805 VMSTATE_UINT32(sense_len, SCSIDevice), 1806 { 1807 .name = "requests", 1808 .version_id = 0, 1809 .field_exists = NULL, 1810 .size = 0, /* ouch */ 1811 .info = &vmstate_info_scsi_requests, 1812 .flags = VMS_SINGLE, 1813 .offset = 0, 1814 }, 1815 VMSTATE_END_OF_LIST() 1816 }, 1817 .subsections = (const VMStateDescription*[]) { 1818 &vmstate_scsi_sense_state, 1819 NULL 1820 } 1821 }; 1822 1823 static Property scsi_props[] = { 1824 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0), 1825 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1), 1826 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1), 1827 DEFINE_PROP_END_OF_LIST(), 1828 }; 1829 1830 static void scsi_device_class_init(ObjectClass *klass, void *data) 1831 { 1832 DeviceClass *k = DEVICE_CLASS(klass); 1833 set_bit(DEVICE_CATEGORY_STORAGE, k->categories); 1834 k->bus_type = TYPE_SCSI_BUS; 1835 k->realize = scsi_qdev_realize; 1836 k->unrealize = scsi_qdev_unrealize; 1837 device_class_set_props(k, scsi_props); 1838 } 1839 1840 static void scsi_dev_instance_init(Object *obj) 1841 { 1842 DeviceState *dev = DEVICE(obj); 1843 SCSIDevice *s = SCSI_DEVICE(dev); 1844 1845 device_add_bootindex_property(obj, &s->conf.bootindex, 1846 "bootindex", NULL, 1847 &s->qdev); 1848 } 1849 1850 static const TypeInfo scsi_device_type_info = { 1851 .name = TYPE_SCSI_DEVICE, 1852 .parent = TYPE_DEVICE, 1853 .instance_size = sizeof(SCSIDevice), 1854 .abstract = true, 1855 .class_size = sizeof(SCSIDeviceClass), 1856 .class_init = scsi_device_class_init, 1857 .instance_init = scsi_dev_instance_init, 1858 }; 1859 1860 static void scsi_bus_class_init(ObjectClass *klass, void *data) 1861 { 1862 BusClass *k = BUS_CLASS(klass); 1863 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 1864 1865 k->get_dev_path = scsibus_get_dev_path; 1866 k->get_fw_dev_path = scsibus_get_fw_dev_path; 1867 k->check_address = scsi_bus_check_address; 1868 hc->unplug = qdev_simple_device_unplug_cb; 1869 } 1870 1871 static const TypeInfo scsi_bus_info = { 1872 .name = TYPE_SCSI_BUS, 1873 .parent = TYPE_BUS, 1874 .instance_size = sizeof(SCSIBus), 1875 .class_init = scsi_bus_class_init, 1876 .interfaces = (InterfaceInfo[]) { 1877 { TYPE_HOTPLUG_HANDLER }, 1878 { } 1879 } 1880 }; 1881 1882 static void scsi_register_types(void) 1883 { 1884 type_register_static(&scsi_bus_info); 1885 type_register_static(&scsi_device_type_info); 1886 } 1887 1888 type_init(scsi_register_types) 1889