1 #include "qemu/osdep.h" 2 #include "qapi/error.h" 3 #include "qemu/error-report.h" 4 #include "qemu/module.h" 5 #include "qemu/option.h" 6 #include "hw/qdev-properties.h" 7 #include "hw/scsi/scsi.h" 8 #include "migration/qemu-file-types.h" 9 #include "migration/vmstate.h" 10 #include "scsi/constants.h" 11 #include "sysemu/block-backend.h" 12 #include "sysemu/blockdev.h" 13 #include "sysemu/sysemu.h" 14 #include "sysemu/runstate.h" 15 #include "trace.h" 16 #include "sysemu/dma.h" 17 #include "qemu/cutils.h" 18 19 static char *scsibus_get_dev_path(DeviceState *dev); 20 static char *scsibus_get_fw_dev_path(DeviceState *dev); 21 static void scsi_req_dequeue(SCSIRequest *req); 22 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len); 23 static void scsi_target_free_buf(SCSIRequest *req); 24 25 static Property scsi_props[] = { 26 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0), 27 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1), 28 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1), 29 DEFINE_PROP_END_OF_LIST(), 30 }; 31 32 static void scsi_bus_class_init(ObjectClass *klass, void *data) 33 { 34 BusClass *k = BUS_CLASS(klass); 35 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 36 37 k->get_dev_path = scsibus_get_dev_path; 38 k->get_fw_dev_path = scsibus_get_fw_dev_path; 39 hc->unplug = qdev_simple_device_unplug_cb; 40 } 41 42 static const TypeInfo scsi_bus_info = { 43 .name = TYPE_SCSI_BUS, 44 .parent = TYPE_BUS, 45 .instance_size = sizeof(SCSIBus), 46 .class_init = scsi_bus_class_init, 47 .interfaces = (InterfaceInfo[]) { 48 { TYPE_HOTPLUG_HANDLER }, 49 { } 50 } 51 }; 52 static int next_scsi_bus; 53 54 static void scsi_device_realize(SCSIDevice *s, Error **errp) 55 { 56 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 57 if (sc->realize) { 58 sc->realize(s, errp); 59 } 60 } 61 62 static void scsi_device_unrealize(SCSIDevice *s) 63 { 64 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 65 if (sc->unrealize) { 66 sc->unrealize(s); 67 } 68 } 69 70 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, 71 void *hba_private) 72 { 73 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 74 int rc; 75 76 assert(cmd->len == 0); 77 rc = scsi_req_parse_cdb(dev, cmd, buf); 78 if (bus->info->parse_cdb) { 79 rc = bus->info->parse_cdb(dev, cmd, buf, hba_private); 80 } 81 return rc; 82 } 83 84 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun, 85 uint8_t *buf, void *hba_private) 86 { 87 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 88 if (sc->alloc_req) { 89 return sc->alloc_req(s, tag, lun, buf, hba_private); 90 } 91 92 return NULL; 93 } 94 95 void scsi_device_unit_attention_reported(SCSIDevice *s) 96 { 97 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 98 if (sc->unit_attention_reported) { 99 sc->unit_attention_reported(s); 100 } 101 } 102 103 /* Create a scsi bus, and attach devices to it. */ 104 void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host, 105 const SCSIBusInfo *info, const char *bus_name) 106 { 107 qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name); 108 bus->busnr = next_scsi_bus++; 109 bus->info = info; 110 qbus_set_bus_hotplug_handler(BUS(bus)); 111 } 112 113 static void scsi_dma_restart_bh(void *opaque) 114 { 115 SCSIDevice *s = opaque; 116 SCSIRequest *req, *next; 117 118 qemu_bh_delete(s->bh); 119 s->bh = NULL; 120 121 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 122 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { 123 scsi_req_ref(req); 124 if (req->retry) { 125 req->retry = false; 126 switch (req->cmd.mode) { 127 case SCSI_XFER_FROM_DEV: 128 case SCSI_XFER_TO_DEV: 129 scsi_req_continue(req); 130 break; 131 case SCSI_XFER_NONE: 132 scsi_req_dequeue(req); 133 scsi_req_enqueue(req); 134 break; 135 } 136 } 137 scsi_req_unref(req); 138 } 139 aio_context_release(blk_get_aio_context(s->conf.blk)); 140 } 141 142 void scsi_req_retry(SCSIRequest *req) 143 { 144 /* No need to save a reference, because scsi_dma_restart_bh just 145 * looks at the request list. */ 146 req->retry = true; 147 } 148 149 static void scsi_dma_restart_cb(void *opaque, int running, RunState state) 150 { 151 SCSIDevice *s = opaque; 152 153 if (!running) { 154 return; 155 } 156 if (!s->bh) { 157 AioContext *ctx = blk_get_aio_context(s->conf.blk); 158 s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s); 159 qemu_bh_schedule(s->bh); 160 } 161 } 162 163 static void scsi_qdev_realize(DeviceState *qdev, Error **errp) 164 { 165 SCSIDevice *dev = SCSI_DEVICE(qdev); 166 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 167 SCSIDevice *d; 168 Error *local_err = NULL; 169 170 if (dev->channel > bus->info->max_channel) { 171 error_setg(errp, "bad scsi channel id: %d", dev->channel); 172 return; 173 } 174 if (dev->id != -1 && dev->id > bus->info->max_target) { 175 error_setg(errp, "bad scsi device id: %d", dev->id); 176 return; 177 } 178 if (dev->lun != -1 && dev->lun > bus->info->max_lun) { 179 error_setg(errp, "bad scsi device lun: %d", dev->lun); 180 return; 181 } 182 183 if (dev->id == -1) { 184 int id = -1; 185 if (dev->lun == -1) { 186 dev->lun = 0; 187 } 188 do { 189 d = scsi_device_find(bus, dev->channel, ++id, dev->lun); 190 } while (d && d->lun == dev->lun && id < bus->info->max_target); 191 if (d && d->lun == dev->lun) { 192 error_setg(errp, "no free target"); 193 return; 194 } 195 dev->id = id; 196 } else if (dev->lun == -1) { 197 int lun = -1; 198 do { 199 d = scsi_device_find(bus, dev->channel, dev->id, ++lun); 200 } while (d && d->lun == lun && lun < bus->info->max_lun); 201 if (d && d->lun == lun) { 202 error_setg(errp, "no free lun"); 203 return; 204 } 205 dev->lun = lun; 206 } else { 207 d = scsi_device_find(bus, dev->channel, dev->id, dev->lun); 208 assert(d); 209 if (d->lun == dev->lun && dev != d) { 210 error_setg(errp, "lun already used by '%s'", d->qdev.id); 211 return; 212 } 213 } 214 215 QTAILQ_INIT(&dev->requests); 216 scsi_device_realize(dev, &local_err); 217 if (local_err) { 218 error_propagate(errp, local_err); 219 return; 220 } 221 dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev), 222 scsi_dma_restart_cb, dev); 223 } 224 225 static void scsi_qdev_unrealize(DeviceState *qdev) 226 { 227 SCSIDevice *dev = SCSI_DEVICE(qdev); 228 229 if (dev->vmsentry) { 230 qemu_del_vm_change_state_handler(dev->vmsentry); 231 } 232 233 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE)); 234 235 scsi_device_unrealize(dev); 236 237 blockdev_mark_auto_del(dev->conf.blk); 238 } 239 240 /* handle legacy '-drive if=scsi,...' cmd line args */ 241 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, 242 int unit, bool removable, int bootindex, 243 bool share_rw, 244 BlockdevOnError rerror, 245 BlockdevOnError werror, 246 const char *serial, Error **errp) 247 { 248 const char *driver; 249 char *name; 250 DeviceState *dev; 251 Error *err = NULL; 252 DriveInfo *dinfo; 253 254 if (blk_is_sg(blk)) { 255 driver = "scsi-generic"; 256 } else { 257 dinfo = blk_legacy_dinfo(blk); 258 if (dinfo && dinfo->media_cd) { 259 driver = "scsi-cd"; 260 } else { 261 driver = "scsi-hd"; 262 } 263 } 264 dev = qdev_new(driver); 265 name = g_strdup_printf("legacy[%d]", unit); 266 object_property_add_child(OBJECT(bus), name, OBJECT(dev)); 267 g_free(name); 268 269 qdev_prop_set_uint32(dev, "scsi-id", unit); 270 if (bootindex >= 0) { 271 object_property_set_int(OBJECT(dev), "bootindex", bootindex, 272 &error_abort); 273 } 274 if (object_property_find(OBJECT(dev), "removable", NULL)) { 275 qdev_prop_set_bit(dev, "removable", removable); 276 } 277 if (serial && object_property_find(OBJECT(dev), "serial", NULL)) { 278 qdev_prop_set_string(dev, "serial", serial); 279 } 280 qdev_prop_set_drive_err(dev, "drive", blk, &err); 281 if (err) { 282 error_propagate(errp, err); 283 object_unparent(OBJECT(dev)); 284 return NULL; 285 } 286 if (!object_property_set_bool(OBJECT(dev), "share-rw", share_rw, &err)) { 287 error_propagate(errp, err); 288 object_unparent(OBJECT(dev)); 289 return NULL; 290 } 291 292 qdev_prop_set_enum(dev, "rerror", rerror); 293 qdev_prop_set_enum(dev, "werror", werror); 294 295 if (!qdev_realize_and_unref(dev, &bus->qbus, &err)) { 296 error_propagate(errp, err); 297 object_unparent(OBJECT(dev)); 298 return NULL; 299 } 300 return SCSI_DEVICE(dev); 301 } 302 303 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus) 304 { 305 Location loc; 306 DriveInfo *dinfo; 307 int unit; 308 309 loc_push_none(&loc); 310 for (unit = 0; unit <= bus->info->max_target; unit++) { 311 dinfo = drive_get(IF_SCSI, bus->busnr, unit); 312 if (dinfo == NULL) { 313 continue; 314 } 315 qemu_opts_loc_restore(dinfo->opts); 316 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo), 317 unit, false, -1, false, 318 BLOCKDEV_ON_ERROR_AUTO, 319 BLOCKDEV_ON_ERROR_AUTO, 320 NULL, &error_fatal); 321 } 322 loc_pop(&loc); 323 } 324 325 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf) 326 { 327 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 328 scsi_req_complete(req, CHECK_CONDITION); 329 return 0; 330 } 331 332 static const struct SCSIReqOps reqops_invalid_field = { 333 .size = sizeof(SCSIRequest), 334 .send_command = scsi_invalid_field 335 }; 336 337 /* SCSIReqOps implementation for invalid commands. */ 338 339 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf) 340 { 341 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 342 scsi_req_complete(req, CHECK_CONDITION); 343 return 0; 344 } 345 346 static const struct SCSIReqOps reqops_invalid_opcode = { 347 .size = sizeof(SCSIRequest), 348 .send_command = scsi_invalid_command 349 }; 350 351 /* SCSIReqOps implementation for unit attention conditions. */ 352 353 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf) 354 { 355 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 356 scsi_req_build_sense(req, req->dev->unit_attention); 357 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) { 358 scsi_req_build_sense(req, req->bus->unit_attention); 359 } 360 scsi_req_complete(req, CHECK_CONDITION); 361 return 0; 362 } 363 364 static const struct SCSIReqOps reqops_unit_attention = { 365 .size = sizeof(SCSIRequest), 366 .send_command = scsi_unit_attention 367 }; 368 369 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to 370 an invalid LUN. */ 371 372 typedef struct SCSITargetReq SCSITargetReq; 373 374 struct SCSITargetReq { 375 SCSIRequest req; 376 int len; 377 uint8_t *buf; 378 int buf_len; 379 }; 380 381 static void store_lun(uint8_t *outbuf, int lun) 382 { 383 if (lun < 256) { 384 outbuf[1] = lun; 385 return; 386 } 387 outbuf[1] = (lun & 255); 388 outbuf[0] = (lun >> 8) | 0x40; 389 } 390 391 static bool scsi_target_emulate_report_luns(SCSITargetReq *r) 392 { 393 BusChild *kid; 394 int i, len, n; 395 int channel, id; 396 bool found_lun0; 397 398 if (r->req.cmd.xfer < 16) { 399 return false; 400 } 401 if (r->req.cmd.buf[2] > 2) { 402 return false; 403 } 404 channel = r->req.dev->channel; 405 id = r->req.dev->id; 406 found_lun0 = false; 407 n = 0; 408 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) { 409 DeviceState *qdev = kid->child; 410 SCSIDevice *dev = SCSI_DEVICE(qdev); 411 412 if (dev->channel == channel && dev->id == id) { 413 if (dev->lun == 0) { 414 found_lun0 = true; 415 } 416 n += 8; 417 } 418 } 419 if (!found_lun0) { 420 n += 8; 421 } 422 423 scsi_target_alloc_buf(&r->req, n + 8); 424 425 len = MIN(n + 8, r->req.cmd.xfer & ~7); 426 memset(r->buf, 0, len); 427 stl_be_p(&r->buf[0], n); 428 i = found_lun0 ? 8 : 16; 429 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) { 430 DeviceState *qdev = kid->child; 431 SCSIDevice *dev = SCSI_DEVICE(qdev); 432 433 if (dev->channel == channel && dev->id == id) { 434 store_lun(&r->buf[i], dev->lun); 435 i += 8; 436 } 437 } 438 assert(i == n + 8); 439 r->len = len; 440 return true; 441 } 442 443 static bool scsi_target_emulate_inquiry(SCSITargetReq *r) 444 { 445 assert(r->req.dev->lun != r->req.lun); 446 447 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN); 448 449 if (r->req.cmd.buf[1] & 0x2) { 450 /* Command support data - optional, not implemented */ 451 return false; 452 } 453 454 if (r->req.cmd.buf[1] & 0x1) { 455 /* Vital product data */ 456 uint8_t page_code = r->req.cmd.buf[2]; 457 r->buf[r->len++] = page_code ; /* this page */ 458 r->buf[r->len++] = 0x00; 459 460 switch (page_code) { 461 case 0x00: /* Supported page codes, mandatory */ 462 { 463 int pages; 464 pages = r->len++; 465 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */ 466 r->buf[pages] = r->len - pages - 1; /* number of pages */ 467 break; 468 } 469 default: 470 return false; 471 } 472 /* done with EVPD */ 473 assert(r->len < r->buf_len); 474 r->len = MIN(r->req.cmd.xfer, r->len); 475 return true; 476 } 477 478 /* Standard INQUIRY data */ 479 if (r->req.cmd.buf[2] != 0) { 480 return false; 481 } 482 483 /* PAGE CODE == 0 */ 484 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN); 485 memset(r->buf, 0, r->len); 486 if (r->req.lun != 0) { 487 r->buf[0] = TYPE_NO_LUN; 488 } else { 489 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE; 490 r->buf[2] = 5; /* Version */ 491 r->buf[3] = 2 | 0x10; /* HiSup, response data format */ 492 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */ 493 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */ 494 memcpy(&r->buf[8], "QEMU ", 8); 495 memcpy(&r->buf[16], "QEMU TARGET ", 16); 496 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version()); 497 } 498 return true; 499 } 500 501 static size_t scsi_sense_len(SCSIRequest *req) 502 { 503 if (req->dev->type == TYPE_SCANNER) 504 return SCSI_SENSE_LEN_SCANNER; 505 else 506 return SCSI_SENSE_LEN; 507 } 508 509 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf) 510 { 511 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 512 int fixed_sense = (req->cmd.buf[1] & 1) == 0; 513 514 if (req->lun != 0 && 515 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) { 516 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED)); 517 scsi_req_complete(req, CHECK_CONDITION); 518 return 0; 519 } 520 switch (buf[0]) { 521 case REPORT_LUNS: 522 if (!scsi_target_emulate_report_luns(r)) { 523 goto illegal_request; 524 } 525 break; 526 case INQUIRY: 527 if (!scsi_target_emulate_inquiry(r)) { 528 goto illegal_request; 529 } 530 break; 531 case REQUEST_SENSE: 532 scsi_target_alloc_buf(&r->req, scsi_sense_len(req)); 533 if (req->lun != 0) { 534 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED); 535 536 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer, 537 sense, fixed_sense); 538 } else { 539 r->len = scsi_device_get_sense(r->req.dev, r->buf, 540 MIN(req->cmd.xfer, r->buf_len), 541 fixed_sense); 542 } 543 if (r->req.dev->sense_is_ua) { 544 scsi_device_unit_attention_reported(req->dev); 545 r->req.dev->sense_len = 0; 546 r->req.dev->sense_is_ua = false; 547 } 548 break; 549 case TEST_UNIT_READY: 550 break; 551 default: 552 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 553 scsi_req_complete(req, CHECK_CONDITION); 554 return 0; 555 illegal_request: 556 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 557 scsi_req_complete(req, CHECK_CONDITION); 558 return 0; 559 } 560 561 if (!r->len) { 562 scsi_req_complete(req, GOOD); 563 } 564 return r->len; 565 } 566 567 static void scsi_target_read_data(SCSIRequest *req) 568 { 569 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 570 uint32_t n; 571 572 n = r->len; 573 if (n > 0) { 574 r->len = 0; 575 scsi_req_data(&r->req, n); 576 } else { 577 scsi_req_complete(&r->req, GOOD); 578 } 579 } 580 581 static uint8_t *scsi_target_get_buf(SCSIRequest *req) 582 { 583 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 584 585 return r->buf; 586 } 587 588 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len) 589 { 590 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 591 592 r->buf = g_malloc(len); 593 r->buf_len = len; 594 595 return r->buf; 596 } 597 598 static void scsi_target_free_buf(SCSIRequest *req) 599 { 600 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 601 602 g_free(r->buf); 603 } 604 605 static const struct SCSIReqOps reqops_target_command = { 606 .size = sizeof(SCSITargetReq), 607 .send_command = scsi_target_send_command, 608 .read_data = scsi_target_read_data, 609 .get_buf = scsi_target_get_buf, 610 .free_req = scsi_target_free_buf, 611 }; 612 613 614 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d, 615 uint32_t tag, uint32_t lun, void *hba_private) 616 { 617 SCSIRequest *req; 618 SCSIBus *bus = scsi_bus_from_device(d); 619 BusState *qbus = BUS(bus); 620 const int memset_off = offsetof(SCSIRequest, sense) 621 + sizeof(req->sense); 622 623 req = g_malloc(reqops->size); 624 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off); 625 req->refcount = 1; 626 req->bus = bus; 627 req->dev = d; 628 req->tag = tag; 629 req->lun = lun; 630 req->hba_private = hba_private; 631 req->status = -1; 632 req->ops = reqops; 633 object_ref(OBJECT(d)); 634 object_ref(OBJECT(qbus->parent)); 635 notifier_list_init(&req->cancel_notifiers); 636 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag); 637 return req; 638 } 639 640 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, 641 uint8_t *buf, void *hba_private) 642 { 643 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus); 644 const SCSIReqOps *ops; 645 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d); 646 SCSIRequest *req; 647 SCSICommand cmd = { .len = 0 }; 648 int ret; 649 650 if ((d->unit_attention.key == UNIT_ATTENTION || 651 bus->unit_attention.key == UNIT_ATTENTION) && 652 (buf[0] != INQUIRY && 653 buf[0] != REPORT_LUNS && 654 buf[0] != GET_CONFIGURATION && 655 buf[0] != GET_EVENT_STATUS_NOTIFICATION && 656 657 /* 658 * If we already have a pending unit attention condition, 659 * report this one before triggering another one. 660 */ 661 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) { 662 ops = &reqops_unit_attention; 663 } else if (lun != d->lun || 664 buf[0] == REPORT_LUNS || 665 (buf[0] == REQUEST_SENSE && d->sense_len)) { 666 ops = &reqops_target_command; 667 } else { 668 ops = NULL; 669 } 670 671 if (ops != NULL || !sc->parse_cdb) { 672 ret = scsi_req_parse_cdb(d, &cmd, buf); 673 } else { 674 ret = sc->parse_cdb(d, &cmd, buf, hba_private); 675 } 676 677 if (ret != 0) { 678 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]); 679 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private); 680 } else { 681 assert(cmd.len != 0); 682 trace_scsi_req_parsed(d->id, lun, tag, buf[0], 683 cmd.mode, cmd.xfer); 684 if (cmd.lba != -1) { 685 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0], 686 cmd.lba); 687 } 688 689 if (cmd.xfer > INT32_MAX) { 690 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private); 691 } else if (ops) { 692 req = scsi_req_alloc(ops, d, tag, lun, hba_private); 693 } else { 694 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private); 695 } 696 } 697 698 req->cmd = cmd; 699 req->resid = req->cmd.xfer; 700 701 switch (buf[0]) { 702 case INQUIRY: 703 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]); 704 break; 705 case TEST_UNIT_READY: 706 trace_scsi_test_unit_ready(d->id, lun, tag); 707 break; 708 case REPORT_LUNS: 709 trace_scsi_report_luns(d->id, lun, tag); 710 break; 711 case REQUEST_SENSE: 712 trace_scsi_request_sense(d->id, lun, tag); 713 break; 714 default: 715 break; 716 } 717 718 return req; 719 } 720 721 uint8_t *scsi_req_get_buf(SCSIRequest *req) 722 { 723 return req->ops->get_buf(req); 724 } 725 726 static void scsi_clear_unit_attention(SCSIRequest *req) 727 { 728 SCSISense *ua; 729 if (req->dev->unit_attention.key != UNIT_ATTENTION && 730 req->bus->unit_attention.key != UNIT_ATTENTION) { 731 return; 732 } 733 734 /* 735 * If an INQUIRY command enters the enabled command state, 736 * the device server shall [not] clear any unit attention condition; 737 * See also MMC-6, paragraphs 6.5 and 6.6.2. 738 */ 739 if (req->cmd.buf[0] == INQUIRY || 740 req->cmd.buf[0] == GET_CONFIGURATION || 741 req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) { 742 return; 743 } 744 745 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 746 ua = &req->dev->unit_attention; 747 } else { 748 ua = &req->bus->unit_attention; 749 } 750 751 /* 752 * If a REPORT LUNS command enters the enabled command state, [...] 753 * the device server shall clear any pending unit attention condition 754 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED. 755 */ 756 if (req->cmd.buf[0] == REPORT_LUNS && 757 !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc && 758 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) { 759 return; 760 } 761 762 *ua = SENSE_CODE(NO_SENSE); 763 } 764 765 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len) 766 { 767 int ret; 768 769 assert(len >= 14); 770 if (!req->sense_len) { 771 return 0; 772 } 773 774 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true); 775 776 /* 777 * FIXME: clearing unit attention conditions upon autosense should be done 778 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b 779 * (SAM-5, 5.14). 780 * 781 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and 782 * 10b for HBAs that do not support it (do not call scsi_req_get_sense). 783 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b. 784 */ 785 if (req->dev->sense_is_ua) { 786 scsi_device_unit_attention_reported(req->dev); 787 req->dev->sense_len = 0; 788 req->dev->sense_is_ua = false; 789 } 790 return ret; 791 } 792 793 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed) 794 { 795 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed); 796 } 797 798 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense) 799 { 800 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag, 801 sense.key, sense.asc, sense.ascq); 802 req->sense_len = scsi_build_sense(req->sense, sense); 803 } 804 805 static void scsi_req_enqueue_internal(SCSIRequest *req) 806 { 807 assert(!req->enqueued); 808 scsi_req_ref(req); 809 if (req->bus->info->get_sg_list) { 810 req->sg = req->bus->info->get_sg_list(req); 811 } else { 812 req->sg = NULL; 813 } 814 req->enqueued = true; 815 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next); 816 } 817 818 int32_t scsi_req_enqueue(SCSIRequest *req) 819 { 820 int32_t rc; 821 822 assert(!req->retry); 823 scsi_req_enqueue_internal(req); 824 scsi_req_ref(req); 825 rc = req->ops->send_command(req, req->cmd.buf); 826 scsi_req_unref(req); 827 return rc; 828 } 829 830 static void scsi_req_dequeue(SCSIRequest *req) 831 { 832 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag); 833 req->retry = false; 834 if (req->enqueued) { 835 QTAILQ_REMOVE(&req->dev->requests, req, next); 836 req->enqueued = false; 837 scsi_req_unref(req); 838 } 839 } 840 841 static int scsi_get_performance_length(int num_desc, int type, int data_type) 842 { 843 /* MMC-6, paragraph 6.7. */ 844 switch (type) { 845 case 0: 846 if ((data_type & 3) == 0) { 847 /* Each descriptor is as in Table 295 - Nominal performance. */ 848 return 16 * num_desc + 8; 849 } else { 850 /* Each descriptor is as in Table 296 - Exceptions. */ 851 return 6 * num_desc + 8; 852 } 853 case 1: 854 case 4: 855 case 5: 856 return 8 * num_desc + 8; 857 case 2: 858 return 2048 * num_desc + 8; 859 case 3: 860 return 16 * num_desc + 8; 861 default: 862 return 8; 863 } 864 } 865 866 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf) 867 { 868 int byte_block = (buf[2] >> 2) & 0x1; 869 int type = (buf[2] >> 4) & 0x1; 870 int xfer_unit; 871 872 if (byte_block) { 873 if (type) { 874 xfer_unit = dev->blocksize; 875 } else { 876 xfer_unit = 512; 877 } 878 } else { 879 xfer_unit = 1; 880 } 881 882 return xfer_unit; 883 } 884 885 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf) 886 { 887 int length = buf[2] & 0x3; 888 int xfer; 889 int unit = ata_passthrough_xfer_unit(dev, buf); 890 891 switch (length) { 892 case 0: 893 case 3: /* USB-specific. */ 894 default: 895 xfer = 0; 896 break; 897 case 1: 898 xfer = buf[3]; 899 break; 900 case 2: 901 xfer = buf[4]; 902 break; 903 } 904 905 return xfer * unit; 906 } 907 908 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf) 909 { 910 int extend = buf[1] & 0x1; 911 int length = buf[2] & 0x3; 912 int xfer; 913 int unit = ata_passthrough_xfer_unit(dev, buf); 914 915 switch (length) { 916 case 0: 917 case 3: /* USB-specific. */ 918 default: 919 xfer = 0; 920 break; 921 case 1: 922 xfer = buf[4]; 923 xfer |= (extend ? buf[3] << 8 : 0); 924 break; 925 case 2: 926 xfer = buf[6]; 927 xfer |= (extend ? buf[5] << 8 : 0); 928 break; 929 } 930 931 return xfer * unit; 932 } 933 934 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 935 { 936 cmd->xfer = scsi_cdb_xfer(buf); 937 switch (buf[0]) { 938 case TEST_UNIT_READY: 939 case REWIND: 940 case START_STOP: 941 case SET_CAPACITY: 942 case WRITE_FILEMARKS: 943 case WRITE_FILEMARKS_16: 944 case SPACE: 945 case RESERVE: 946 case RELEASE: 947 case ERASE: 948 case ALLOW_MEDIUM_REMOVAL: 949 case SEEK_10: 950 case SYNCHRONIZE_CACHE: 951 case SYNCHRONIZE_CACHE_16: 952 case LOCATE_16: 953 case LOCK_UNLOCK_CACHE: 954 case SET_CD_SPEED: 955 case SET_LIMITS: 956 case WRITE_LONG_10: 957 case UPDATE_BLOCK: 958 case RESERVE_TRACK: 959 case SET_READ_AHEAD: 960 case PRE_FETCH: 961 case PRE_FETCH_16: 962 case ALLOW_OVERWRITE: 963 cmd->xfer = 0; 964 break; 965 case VERIFY_10: 966 case VERIFY_12: 967 case VERIFY_16: 968 if ((buf[1] & 2) == 0) { 969 cmd->xfer = 0; 970 } else if ((buf[1] & 4) != 0) { 971 cmd->xfer = 1; 972 } 973 cmd->xfer *= dev->blocksize; 974 break; 975 case MODE_SENSE: 976 break; 977 case WRITE_SAME_10: 978 case WRITE_SAME_16: 979 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize; 980 break; 981 case READ_CAPACITY_10: 982 cmd->xfer = 8; 983 break; 984 case READ_BLOCK_LIMITS: 985 cmd->xfer = 6; 986 break; 987 case SEND_VOLUME_TAG: 988 /* GPCMD_SET_STREAMING from multimedia commands. */ 989 if (dev->type == TYPE_ROM) { 990 cmd->xfer = buf[10] | (buf[9] << 8); 991 } else { 992 cmd->xfer = buf[9] | (buf[8] << 8); 993 } 994 break; 995 case WRITE_6: 996 /* length 0 means 256 blocks */ 997 if (cmd->xfer == 0) { 998 cmd->xfer = 256; 999 } 1000 /* fall through */ 1001 case WRITE_10: 1002 case WRITE_VERIFY_10: 1003 case WRITE_12: 1004 case WRITE_VERIFY_12: 1005 case WRITE_16: 1006 case WRITE_VERIFY_16: 1007 cmd->xfer *= dev->blocksize; 1008 break; 1009 case READ_6: 1010 case READ_REVERSE: 1011 /* length 0 means 256 blocks */ 1012 if (cmd->xfer == 0) { 1013 cmd->xfer = 256; 1014 } 1015 /* fall through */ 1016 case READ_10: 1017 case READ_12: 1018 case READ_16: 1019 cmd->xfer *= dev->blocksize; 1020 break; 1021 case FORMAT_UNIT: 1022 /* MMC mandates the parameter list to be 12-bytes long. Parameters 1023 * for block devices are restricted to the header right now. */ 1024 if (dev->type == TYPE_ROM && (buf[1] & 16)) { 1025 cmd->xfer = 12; 1026 } else { 1027 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4); 1028 } 1029 break; 1030 case INQUIRY: 1031 case RECEIVE_DIAGNOSTIC: 1032 case SEND_DIAGNOSTIC: 1033 cmd->xfer = buf[4] | (buf[3] << 8); 1034 break; 1035 case READ_CD: 1036 case READ_BUFFER: 1037 case WRITE_BUFFER: 1038 case SEND_CUE_SHEET: 1039 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1040 break; 1041 case PERSISTENT_RESERVE_OUT: 1042 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL; 1043 break; 1044 case ERASE_12: 1045 if (dev->type == TYPE_ROM) { 1046 /* MMC command GET PERFORMANCE. */ 1047 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8), 1048 buf[10], buf[1] & 0x1f); 1049 } 1050 break; 1051 case MECHANISM_STATUS: 1052 case READ_DVD_STRUCTURE: 1053 case SEND_DVD_STRUCTURE: 1054 case MAINTENANCE_OUT: 1055 case MAINTENANCE_IN: 1056 if (dev->type == TYPE_ROM) { 1057 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */ 1058 cmd->xfer = buf[9] | (buf[8] << 8); 1059 } 1060 break; 1061 case ATA_PASSTHROUGH_12: 1062 if (dev->type == TYPE_ROM) { 1063 /* BLANK command of MMC */ 1064 cmd->xfer = 0; 1065 } else { 1066 cmd->xfer = ata_passthrough_12_xfer(dev, buf); 1067 } 1068 break; 1069 case ATA_PASSTHROUGH_16: 1070 cmd->xfer = ata_passthrough_16_xfer(dev, buf); 1071 break; 1072 } 1073 return 0; 1074 } 1075 1076 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1077 { 1078 switch (buf[0]) { 1079 /* stream commands */ 1080 case ERASE_12: 1081 case ERASE_16: 1082 cmd->xfer = 0; 1083 break; 1084 case READ_6: 1085 case READ_REVERSE: 1086 case RECOVER_BUFFERED_DATA: 1087 case WRITE_6: 1088 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16); 1089 if (buf[1] & 0x01) { /* fixed */ 1090 cmd->xfer *= dev->blocksize; 1091 } 1092 break; 1093 case READ_16: 1094 case READ_REVERSE_16: 1095 case VERIFY_16: 1096 case WRITE_16: 1097 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16); 1098 if (buf[1] & 0x01) { /* fixed */ 1099 cmd->xfer *= dev->blocksize; 1100 } 1101 break; 1102 case REWIND: 1103 case LOAD_UNLOAD: 1104 cmd->xfer = 0; 1105 break; 1106 case SPACE_16: 1107 cmd->xfer = buf[13] | (buf[12] << 8); 1108 break; 1109 case READ_POSITION: 1110 switch (buf[1] & 0x1f) /* operation code */ { 1111 case SHORT_FORM_BLOCK_ID: 1112 case SHORT_FORM_VENDOR_SPECIFIC: 1113 cmd->xfer = 20; 1114 break; 1115 case LONG_FORM: 1116 cmd->xfer = 32; 1117 break; 1118 case EXTENDED_FORM: 1119 cmd->xfer = buf[8] | (buf[7] << 8); 1120 break; 1121 default: 1122 return -1; 1123 } 1124 1125 break; 1126 case FORMAT_UNIT: 1127 cmd->xfer = buf[4] | (buf[3] << 8); 1128 break; 1129 /* generic commands */ 1130 default: 1131 return scsi_req_xfer(cmd, dev, buf); 1132 } 1133 return 0; 1134 } 1135 1136 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1137 { 1138 switch (buf[0]) { 1139 /* medium changer commands */ 1140 case EXCHANGE_MEDIUM: 1141 case INITIALIZE_ELEMENT_STATUS: 1142 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE: 1143 case MOVE_MEDIUM: 1144 case POSITION_TO_ELEMENT: 1145 cmd->xfer = 0; 1146 break; 1147 case READ_ELEMENT_STATUS: 1148 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16); 1149 break; 1150 1151 /* generic commands */ 1152 default: 1153 return scsi_req_xfer(cmd, dev, buf); 1154 } 1155 return 0; 1156 } 1157 1158 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1159 { 1160 switch (buf[0]) { 1161 /* Scanner commands */ 1162 case OBJECT_POSITION: 1163 cmd->xfer = 0; 1164 break; 1165 case SCAN: 1166 cmd->xfer = buf[4]; 1167 break; 1168 case READ_10: 1169 case SEND: 1170 case GET_WINDOW: 1171 case SET_WINDOW: 1172 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1173 break; 1174 default: 1175 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */ 1176 return scsi_req_xfer(cmd, dev, buf); 1177 } 1178 1179 return 0; 1180 } 1181 1182 static void scsi_cmd_xfer_mode(SCSICommand *cmd) 1183 { 1184 if (!cmd->xfer) { 1185 cmd->mode = SCSI_XFER_NONE; 1186 return; 1187 } 1188 switch (cmd->buf[0]) { 1189 case WRITE_6: 1190 case WRITE_10: 1191 case WRITE_VERIFY_10: 1192 case WRITE_12: 1193 case WRITE_VERIFY_12: 1194 case WRITE_16: 1195 case WRITE_VERIFY_16: 1196 case VERIFY_10: 1197 case VERIFY_12: 1198 case VERIFY_16: 1199 case COPY: 1200 case COPY_VERIFY: 1201 case COMPARE: 1202 case CHANGE_DEFINITION: 1203 case LOG_SELECT: 1204 case MODE_SELECT: 1205 case MODE_SELECT_10: 1206 case SEND_DIAGNOSTIC: 1207 case WRITE_BUFFER: 1208 case FORMAT_UNIT: 1209 case REASSIGN_BLOCKS: 1210 case SEARCH_EQUAL: 1211 case SEARCH_HIGH: 1212 case SEARCH_LOW: 1213 case UPDATE_BLOCK: 1214 case WRITE_LONG_10: 1215 case WRITE_SAME_10: 1216 case WRITE_SAME_16: 1217 case UNMAP: 1218 case SEARCH_HIGH_12: 1219 case SEARCH_EQUAL_12: 1220 case SEARCH_LOW_12: 1221 case MEDIUM_SCAN: 1222 case SEND_VOLUME_TAG: 1223 case SEND_CUE_SHEET: 1224 case SEND_DVD_STRUCTURE: 1225 case PERSISTENT_RESERVE_OUT: 1226 case MAINTENANCE_OUT: 1227 case SET_WINDOW: 1228 case SCAN: 1229 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for 1230 * non-scanner devices, so we only get here for SCAN and not for START_STOP. 1231 */ 1232 cmd->mode = SCSI_XFER_TO_DEV; 1233 break; 1234 case ATA_PASSTHROUGH_12: 1235 case ATA_PASSTHROUGH_16: 1236 /* T_DIR */ 1237 cmd->mode = (cmd->buf[2] & 0x8) ? 1238 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV; 1239 break; 1240 default: 1241 cmd->mode = SCSI_XFER_FROM_DEV; 1242 break; 1243 } 1244 } 1245 1246 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf) 1247 { 1248 int rc; 1249 int len; 1250 1251 cmd->lba = -1; 1252 len = scsi_cdb_length(buf); 1253 if (len < 0) { 1254 return -1; 1255 } 1256 1257 cmd->len = len; 1258 switch (dev->type) { 1259 case TYPE_TAPE: 1260 rc = scsi_req_stream_xfer(cmd, dev, buf); 1261 break; 1262 case TYPE_MEDIUM_CHANGER: 1263 rc = scsi_req_medium_changer_xfer(cmd, dev, buf); 1264 break; 1265 case TYPE_SCANNER: 1266 rc = scsi_req_scanner_length(cmd, dev, buf); 1267 break; 1268 default: 1269 rc = scsi_req_xfer(cmd, dev, buf); 1270 break; 1271 } 1272 1273 if (rc != 0) 1274 return rc; 1275 1276 memcpy(cmd->buf, buf, cmd->len); 1277 scsi_cmd_xfer_mode(cmd); 1278 cmd->lba = scsi_cmd_lba(cmd); 1279 return 0; 1280 } 1281 1282 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense) 1283 { 1284 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 1285 1286 scsi_device_set_ua(dev, sense); 1287 if (bus->info->change) { 1288 bus->info->change(bus, dev, sense); 1289 } 1290 } 1291 1292 SCSIRequest *scsi_req_ref(SCSIRequest *req) 1293 { 1294 assert(req->refcount > 0); 1295 req->refcount++; 1296 return req; 1297 } 1298 1299 void scsi_req_unref(SCSIRequest *req) 1300 { 1301 assert(req->refcount > 0); 1302 if (--req->refcount == 0) { 1303 BusState *qbus = req->dev->qdev.parent_bus; 1304 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus); 1305 1306 if (bus->info->free_request && req->hba_private) { 1307 bus->info->free_request(bus, req->hba_private); 1308 } 1309 if (req->ops->free_req) { 1310 req->ops->free_req(req); 1311 } 1312 object_unref(OBJECT(req->dev)); 1313 object_unref(OBJECT(qbus->parent)); 1314 g_free(req); 1315 } 1316 } 1317 1318 /* Tell the device that we finished processing this chunk of I/O. It 1319 will start the next chunk or complete the command. */ 1320 void scsi_req_continue(SCSIRequest *req) 1321 { 1322 if (req->io_canceled) { 1323 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag); 1324 return; 1325 } 1326 trace_scsi_req_continue(req->dev->id, req->lun, req->tag); 1327 if (req->cmd.mode == SCSI_XFER_TO_DEV) { 1328 req->ops->write_data(req); 1329 } else { 1330 req->ops->read_data(req); 1331 } 1332 } 1333 1334 /* Called by the devices when data is ready for the HBA. The HBA should 1335 start a DMA operation to read or fill the device's data buffer. 1336 Once it completes, calling scsi_req_continue will restart I/O. */ 1337 void scsi_req_data(SCSIRequest *req, int len) 1338 { 1339 uint8_t *buf; 1340 if (req->io_canceled) { 1341 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len); 1342 return; 1343 } 1344 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len); 1345 assert(req->cmd.mode != SCSI_XFER_NONE); 1346 if (!req->sg) { 1347 req->resid -= len; 1348 req->bus->info->transfer_data(req, len); 1349 return; 1350 } 1351 1352 /* If the device calls scsi_req_data and the HBA specified a 1353 * scatter/gather list, the transfer has to happen in a single 1354 * step. */ 1355 assert(!req->dma_started); 1356 req->dma_started = true; 1357 1358 buf = scsi_req_get_buf(req); 1359 if (req->cmd.mode == SCSI_XFER_FROM_DEV) { 1360 req->resid = dma_buf_read(buf, len, req->sg); 1361 } else { 1362 req->resid = dma_buf_write(buf, len, req->sg); 1363 } 1364 scsi_req_continue(req); 1365 } 1366 1367 void scsi_req_print(SCSIRequest *req) 1368 { 1369 FILE *fp = stderr; 1370 int i; 1371 1372 fprintf(fp, "[%s id=%d] %s", 1373 req->dev->qdev.parent_bus->name, 1374 req->dev->id, 1375 scsi_command_name(req->cmd.buf[0])); 1376 for (i = 1; i < req->cmd.len; i++) { 1377 fprintf(fp, " 0x%02x", req->cmd.buf[i]); 1378 } 1379 switch (req->cmd.mode) { 1380 case SCSI_XFER_NONE: 1381 fprintf(fp, " - none\n"); 1382 break; 1383 case SCSI_XFER_FROM_DEV: 1384 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer); 1385 break; 1386 case SCSI_XFER_TO_DEV: 1387 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer); 1388 break; 1389 default: 1390 fprintf(fp, " - Oops\n"); 1391 break; 1392 } 1393 } 1394 1395 void scsi_req_complete(SCSIRequest *req, int status) 1396 { 1397 assert(req->status == -1); 1398 req->status = status; 1399 1400 assert(req->sense_len <= sizeof(req->sense)); 1401 if (status == GOOD) { 1402 req->sense_len = 0; 1403 } 1404 1405 if (req->sense_len) { 1406 memcpy(req->dev->sense, req->sense, req->sense_len); 1407 req->dev->sense_len = req->sense_len; 1408 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention); 1409 } else { 1410 req->dev->sense_len = 0; 1411 req->dev->sense_is_ua = false; 1412 } 1413 1414 /* 1415 * Unit attention state is now stored in the device's sense buffer 1416 * if the HBA didn't do autosense. Clear the pending unit attention 1417 * flags. 1418 */ 1419 scsi_clear_unit_attention(req); 1420 1421 scsi_req_ref(req); 1422 scsi_req_dequeue(req); 1423 req->bus->info->complete(req, req->status, req->resid); 1424 1425 /* Cancelled requests might end up being completed instead of cancelled */ 1426 notifier_list_notify(&req->cancel_notifiers, req); 1427 scsi_req_unref(req); 1428 } 1429 1430 /* Called by the devices when the request is canceled. */ 1431 void scsi_req_cancel_complete(SCSIRequest *req) 1432 { 1433 assert(req->io_canceled); 1434 if (req->bus->info->cancel) { 1435 req->bus->info->cancel(req); 1436 } 1437 notifier_list_notify(&req->cancel_notifiers, req); 1438 scsi_req_unref(req); 1439 } 1440 1441 /* Cancel @req asynchronously. @notifier is added to @req's cancellation 1442 * notifier list, the bus will be notified the requests cancellation is 1443 * completed. 1444 * */ 1445 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier) 1446 { 1447 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1448 if (notifier) { 1449 notifier_list_add(&req->cancel_notifiers, notifier); 1450 } 1451 if (req->io_canceled) { 1452 /* A blk_aio_cancel_async is pending; when it finishes, 1453 * scsi_req_cancel_complete will be called and will 1454 * call the notifier we just added. Just wait for that. 1455 */ 1456 assert(req->aiocb); 1457 return; 1458 } 1459 /* Dropped in scsi_req_cancel_complete. */ 1460 scsi_req_ref(req); 1461 scsi_req_dequeue(req); 1462 req->io_canceled = true; 1463 if (req->aiocb) { 1464 blk_aio_cancel_async(req->aiocb); 1465 } else { 1466 scsi_req_cancel_complete(req); 1467 } 1468 } 1469 1470 void scsi_req_cancel(SCSIRequest *req) 1471 { 1472 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1473 if (!req->enqueued) { 1474 return; 1475 } 1476 assert(!req->io_canceled); 1477 /* Dropped in scsi_req_cancel_complete. */ 1478 scsi_req_ref(req); 1479 scsi_req_dequeue(req); 1480 req->io_canceled = true; 1481 if (req->aiocb) { 1482 blk_aio_cancel(req->aiocb); 1483 } else { 1484 scsi_req_cancel_complete(req); 1485 } 1486 } 1487 1488 static int scsi_ua_precedence(SCSISense sense) 1489 { 1490 if (sense.key != UNIT_ATTENTION) { 1491 return INT_MAX; 1492 } 1493 if (sense.asc == 0x29 && sense.ascq == 0x04) { 1494 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */ 1495 return 1; 1496 } else if (sense.asc == 0x3F && sense.ascq == 0x01) { 1497 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */ 1498 return 2; 1499 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) { 1500 /* These two go with "all others". */ 1501 ; 1502 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) { 1503 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0 1504 * POWER ON OCCURRED = 1 1505 * SCSI BUS RESET OCCURRED = 2 1506 * BUS DEVICE RESET FUNCTION OCCURRED = 3 1507 * I_T NEXUS LOSS OCCURRED = 7 1508 */ 1509 return sense.ascq; 1510 } else if (sense.asc == 0x2F && sense.ascq == 0x01) { 1511 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */ 1512 return 8; 1513 } 1514 return (sense.asc << 8) | sense.ascq; 1515 } 1516 1517 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense) 1518 { 1519 int prec1, prec2; 1520 if (sense.key != UNIT_ATTENTION) { 1521 return; 1522 } 1523 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key, 1524 sense.asc, sense.ascq); 1525 1526 /* 1527 * Override a pre-existing unit attention condition, except for a more 1528 * important reset condition. 1529 */ 1530 prec1 = scsi_ua_precedence(sdev->unit_attention); 1531 prec2 = scsi_ua_precedence(sense); 1532 if (prec2 < prec1) { 1533 sdev->unit_attention = sense; 1534 } 1535 } 1536 1537 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense) 1538 { 1539 SCSIRequest *req; 1540 1541 aio_context_acquire(blk_get_aio_context(sdev->conf.blk)); 1542 while (!QTAILQ_EMPTY(&sdev->requests)) { 1543 req = QTAILQ_FIRST(&sdev->requests); 1544 scsi_req_cancel_async(req, NULL); 1545 } 1546 blk_drain(sdev->conf.blk); 1547 aio_context_release(blk_get_aio_context(sdev->conf.blk)); 1548 scsi_device_set_ua(sdev, sense); 1549 } 1550 1551 static char *scsibus_get_dev_path(DeviceState *dev) 1552 { 1553 SCSIDevice *d = SCSI_DEVICE(dev); 1554 DeviceState *hba = dev->parent_bus->parent; 1555 char *id; 1556 char *path; 1557 1558 id = qdev_get_dev_path(hba); 1559 if (id) { 1560 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun); 1561 } else { 1562 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun); 1563 } 1564 g_free(id); 1565 return path; 1566 } 1567 1568 static char *scsibus_get_fw_dev_path(DeviceState *dev) 1569 { 1570 SCSIDevice *d = SCSI_DEVICE(dev); 1571 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel, 1572 qdev_fw_name(dev), d->id, d->lun); 1573 } 1574 1575 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun) 1576 { 1577 BusChild *kid; 1578 SCSIDevice *target_dev = NULL; 1579 1580 QTAILQ_FOREACH_REVERSE(kid, &bus->qbus.children, sibling) { 1581 DeviceState *qdev = kid->child; 1582 SCSIDevice *dev = SCSI_DEVICE(qdev); 1583 1584 if (dev->channel == channel && dev->id == id) { 1585 if (dev->lun == lun) { 1586 return dev; 1587 } 1588 target_dev = dev; 1589 } 1590 } 1591 return target_dev; 1592 } 1593 1594 /* SCSI request list. For simplicity, pv points to the whole device */ 1595 1596 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size, 1597 const VMStateField *field, QJSON *vmdesc) 1598 { 1599 SCSIDevice *s = pv; 1600 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1601 SCSIRequest *req; 1602 1603 QTAILQ_FOREACH(req, &s->requests, next) { 1604 assert(!req->io_canceled); 1605 assert(req->status == -1); 1606 assert(req->enqueued); 1607 1608 qemu_put_sbyte(f, req->retry ? 1 : 2); 1609 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); 1610 qemu_put_be32s(f, &req->tag); 1611 qemu_put_be32s(f, &req->lun); 1612 if (bus->info->save_request) { 1613 bus->info->save_request(f, req); 1614 } 1615 if (req->ops->save_request) { 1616 req->ops->save_request(f, req); 1617 } 1618 } 1619 qemu_put_sbyte(f, 0); 1620 1621 return 0; 1622 } 1623 1624 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size, 1625 const VMStateField *field) 1626 { 1627 SCSIDevice *s = pv; 1628 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1629 int8_t sbyte; 1630 1631 while ((sbyte = qemu_get_sbyte(f)) > 0) { 1632 uint8_t buf[SCSI_CMD_BUF_SIZE]; 1633 uint32_t tag; 1634 uint32_t lun; 1635 SCSIRequest *req; 1636 1637 qemu_get_buffer(f, buf, sizeof(buf)); 1638 qemu_get_be32s(f, &tag); 1639 qemu_get_be32s(f, &lun); 1640 req = scsi_req_new(s, tag, lun, buf, NULL); 1641 req->retry = (sbyte == 1); 1642 if (bus->info->load_request) { 1643 req->hba_private = bus->info->load_request(f, req); 1644 } 1645 if (req->ops->load_request) { 1646 req->ops->load_request(f, req); 1647 } 1648 1649 /* Just restart it later. */ 1650 scsi_req_enqueue_internal(req); 1651 1652 /* At this point, the request will be kept alive by the reference 1653 * added by scsi_req_enqueue_internal, so we can release our reference. 1654 * The HBA of course will add its own reference in the load_request 1655 * callback if it needs to hold on the SCSIRequest. 1656 */ 1657 scsi_req_unref(req); 1658 } 1659 1660 return 0; 1661 } 1662 1663 static const VMStateInfo vmstate_info_scsi_requests = { 1664 .name = "scsi-requests", 1665 .get = get_scsi_requests, 1666 .put = put_scsi_requests, 1667 }; 1668 1669 static bool scsi_sense_state_needed(void *opaque) 1670 { 1671 SCSIDevice *s = opaque; 1672 1673 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD; 1674 } 1675 1676 static const VMStateDescription vmstate_scsi_sense_state = { 1677 .name = "SCSIDevice/sense", 1678 .version_id = 1, 1679 .minimum_version_id = 1, 1680 .needed = scsi_sense_state_needed, 1681 .fields = (VMStateField[]) { 1682 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 1683 SCSI_SENSE_BUF_SIZE_OLD, 1684 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD), 1685 VMSTATE_END_OF_LIST() 1686 } 1687 }; 1688 1689 const VMStateDescription vmstate_scsi_device = { 1690 .name = "SCSIDevice", 1691 .version_id = 1, 1692 .minimum_version_id = 1, 1693 .fields = (VMStateField[]) { 1694 VMSTATE_UINT8(unit_attention.key, SCSIDevice), 1695 VMSTATE_UINT8(unit_attention.asc, SCSIDevice), 1696 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice), 1697 VMSTATE_BOOL(sense_is_ua, SCSIDevice), 1698 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD), 1699 VMSTATE_UINT32(sense_len, SCSIDevice), 1700 { 1701 .name = "requests", 1702 .version_id = 0, 1703 .field_exists = NULL, 1704 .size = 0, /* ouch */ 1705 .info = &vmstate_info_scsi_requests, 1706 .flags = VMS_SINGLE, 1707 .offset = 0, 1708 }, 1709 VMSTATE_END_OF_LIST() 1710 }, 1711 .subsections = (const VMStateDescription*[]) { 1712 &vmstate_scsi_sense_state, 1713 NULL 1714 } 1715 }; 1716 1717 static void scsi_device_class_init(ObjectClass *klass, void *data) 1718 { 1719 DeviceClass *k = DEVICE_CLASS(klass); 1720 set_bit(DEVICE_CATEGORY_STORAGE, k->categories); 1721 k->bus_type = TYPE_SCSI_BUS; 1722 k->realize = scsi_qdev_realize; 1723 k->unrealize = scsi_qdev_unrealize; 1724 device_class_set_props(k, scsi_props); 1725 } 1726 1727 static void scsi_dev_instance_init(Object *obj) 1728 { 1729 DeviceState *dev = DEVICE(obj); 1730 SCSIDevice *s = SCSI_DEVICE(dev); 1731 1732 device_add_bootindex_property(obj, &s->conf.bootindex, 1733 "bootindex", NULL, 1734 &s->qdev); 1735 } 1736 1737 static const TypeInfo scsi_device_type_info = { 1738 .name = TYPE_SCSI_DEVICE, 1739 .parent = TYPE_DEVICE, 1740 .instance_size = sizeof(SCSIDevice), 1741 .abstract = true, 1742 .class_size = sizeof(SCSIDeviceClass), 1743 .class_init = scsi_device_class_init, 1744 .instance_init = scsi_dev_instance_init, 1745 }; 1746 1747 static void scsi_register_types(void) 1748 { 1749 type_register_static(&scsi_bus_info); 1750 type_register_static(&scsi_device_type_info); 1751 } 1752 1753 type_init(scsi_register_types) 1754