1 #include "qemu/osdep.h" 2 #include "hw/hw.h" 3 #include "qapi/error.h" 4 #include "qemu/error-report.h" 5 #include "qemu/module.h" 6 #include "qemu/option.h" 7 #include "hw/scsi/scsi.h" 8 #include "migration/qemu-file-types.h" 9 #include "migration/vmstate.h" 10 #include "scsi/constants.h" 11 #include "hw/qdev.h" 12 #include "sysemu/block-backend.h" 13 #include "sysemu/blockdev.h" 14 #include "trace.h" 15 #include "sysemu/dma.h" 16 #include "qemu/cutils.h" 17 18 static char *scsibus_get_dev_path(DeviceState *dev); 19 static char *scsibus_get_fw_dev_path(DeviceState *dev); 20 static void scsi_req_dequeue(SCSIRequest *req); 21 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len); 22 static void scsi_target_free_buf(SCSIRequest *req); 23 24 static Property scsi_props[] = { 25 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0), 26 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1), 27 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1), 28 DEFINE_PROP_END_OF_LIST(), 29 }; 30 31 static void scsi_bus_class_init(ObjectClass *klass, void *data) 32 { 33 BusClass *k = BUS_CLASS(klass); 34 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 35 36 k->get_dev_path = scsibus_get_dev_path; 37 k->get_fw_dev_path = scsibus_get_fw_dev_path; 38 hc->unplug = qdev_simple_device_unplug_cb; 39 } 40 41 static const TypeInfo scsi_bus_info = { 42 .name = TYPE_SCSI_BUS, 43 .parent = TYPE_BUS, 44 .instance_size = sizeof(SCSIBus), 45 .class_init = scsi_bus_class_init, 46 .interfaces = (InterfaceInfo[]) { 47 { TYPE_HOTPLUG_HANDLER }, 48 { } 49 } 50 }; 51 static int next_scsi_bus; 52 53 static void scsi_device_realize(SCSIDevice *s, Error **errp) 54 { 55 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 56 if (sc->realize) { 57 sc->realize(s, errp); 58 } 59 } 60 61 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, 62 void *hba_private) 63 { 64 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 65 int rc; 66 67 assert(cmd->len == 0); 68 rc = scsi_req_parse_cdb(dev, cmd, buf); 69 if (bus->info->parse_cdb) { 70 rc = bus->info->parse_cdb(dev, cmd, buf, hba_private); 71 } 72 return rc; 73 } 74 75 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun, 76 uint8_t *buf, void *hba_private) 77 { 78 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 79 if (sc->alloc_req) { 80 return sc->alloc_req(s, tag, lun, buf, hba_private); 81 } 82 83 return NULL; 84 } 85 86 void scsi_device_unit_attention_reported(SCSIDevice *s) 87 { 88 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 89 if (sc->unit_attention_reported) { 90 sc->unit_attention_reported(s); 91 } 92 } 93 94 /* Create a scsi bus, and attach devices to it. */ 95 void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host, 96 const SCSIBusInfo *info, const char *bus_name) 97 { 98 qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name); 99 bus->busnr = next_scsi_bus++; 100 bus->info = info; 101 qbus_set_bus_hotplug_handler(BUS(bus), &error_abort); 102 } 103 104 static void scsi_dma_restart_bh(void *opaque) 105 { 106 SCSIDevice *s = opaque; 107 SCSIRequest *req, *next; 108 109 qemu_bh_delete(s->bh); 110 s->bh = NULL; 111 112 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 113 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { 114 scsi_req_ref(req); 115 if (req->retry) { 116 req->retry = false; 117 switch (req->cmd.mode) { 118 case SCSI_XFER_FROM_DEV: 119 case SCSI_XFER_TO_DEV: 120 scsi_req_continue(req); 121 break; 122 case SCSI_XFER_NONE: 123 scsi_req_dequeue(req); 124 scsi_req_enqueue(req); 125 break; 126 } 127 } 128 scsi_req_unref(req); 129 } 130 aio_context_release(blk_get_aio_context(s->conf.blk)); 131 } 132 133 void scsi_req_retry(SCSIRequest *req) 134 { 135 /* No need to save a reference, because scsi_dma_restart_bh just 136 * looks at the request list. */ 137 req->retry = true; 138 } 139 140 static void scsi_dma_restart_cb(void *opaque, int running, RunState state) 141 { 142 SCSIDevice *s = opaque; 143 144 if (!running) { 145 return; 146 } 147 if (!s->bh) { 148 AioContext *ctx = blk_get_aio_context(s->conf.blk); 149 s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s); 150 qemu_bh_schedule(s->bh); 151 } 152 } 153 154 static void scsi_qdev_realize(DeviceState *qdev, Error **errp) 155 { 156 SCSIDevice *dev = SCSI_DEVICE(qdev); 157 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 158 SCSIDevice *d; 159 Error *local_err = NULL; 160 161 if (dev->channel > bus->info->max_channel) { 162 error_setg(errp, "bad scsi channel id: %d", dev->channel); 163 return; 164 } 165 if (dev->id != -1 && dev->id > bus->info->max_target) { 166 error_setg(errp, "bad scsi device id: %d", dev->id); 167 return; 168 } 169 if (dev->lun != -1 && dev->lun > bus->info->max_lun) { 170 error_setg(errp, "bad scsi device lun: %d", dev->lun); 171 return; 172 } 173 174 if (dev->id == -1) { 175 int id = -1; 176 if (dev->lun == -1) { 177 dev->lun = 0; 178 } 179 do { 180 d = scsi_device_find(bus, dev->channel, ++id, dev->lun); 181 } while (d && d->lun == dev->lun && id < bus->info->max_target); 182 if (d && d->lun == dev->lun) { 183 error_setg(errp, "no free target"); 184 return; 185 } 186 dev->id = id; 187 } else if (dev->lun == -1) { 188 int lun = -1; 189 do { 190 d = scsi_device_find(bus, dev->channel, dev->id, ++lun); 191 } while (d && d->lun == lun && lun < bus->info->max_lun); 192 if (d && d->lun == lun) { 193 error_setg(errp, "no free lun"); 194 return; 195 } 196 dev->lun = lun; 197 } else { 198 d = scsi_device_find(bus, dev->channel, dev->id, dev->lun); 199 assert(d); 200 if (d->lun == dev->lun && dev != d) { 201 error_setg(errp, "lun already used by '%s'", d->qdev.id); 202 return; 203 } 204 } 205 206 QTAILQ_INIT(&dev->requests); 207 scsi_device_realize(dev, &local_err); 208 if (local_err) { 209 error_propagate(errp, local_err); 210 return; 211 } 212 dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev), 213 scsi_dma_restart_cb, dev); 214 } 215 216 static void scsi_qdev_unrealize(DeviceState *qdev, Error **errp) 217 { 218 SCSIDevice *dev = SCSI_DEVICE(qdev); 219 220 if (dev->vmsentry) { 221 qemu_del_vm_change_state_handler(dev->vmsentry); 222 } 223 224 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE)); 225 blockdev_mark_auto_del(dev->conf.blk); 226 } 227 228 /* handle legacy '-drive if=scsi,...' cmd line args */ 229 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, 230 int unit, bool removable, int bootindex, 231 bool share_rw, 232 BlockdevOnError rerror, 233 BlockdevOnError werror, 234 const char *serial, Error **errp) 235 { 236 const char *driver; 237 char *name; 238 DeviceState *dev; 239 Error *err = NULL; 240 241 driver = blk_is_sg(blk) ? "scsi-generic" : "scsi-disk"; 242 dev = qdev_create(&bus->qbus, driver); 243 name = g_strdup_printf("legacy[%d]", unit); 244 object_property_add_child(OBJECT(bus), name, OBJECT(dev), NULL); 245 g_free(name); 246 247 qdev_prop_set_uint32(dev, "scsi-id", unit); 248 if (bootindex >= 0) { 249 object_property_set_int(OBJECT(dev), bootindex, "bootindex", 250 &error_abort); 251 } 252 if (object_property_find(OBJECT(dev), "removable", NULL)) { 253 qdev_prop_set_bit(dev, "removable", removable); 254 } 255 if (serial && object_property_find(OBJECT(dev), "serial", NULL)) { 256 qdev_prop_set_string(dev, "serial", serial); 257 } 258 qdev_prop_set_drive(dev, "drive", blk, &err); 259 if (err) { 260 error_propagate(errp, err); 261 object_unparent(OBJECT(dev)); 262 return NULL; 263 } 264 object_property_set_bool(OBJECT(dev), share_rw, "share-rw", &err); 265 if (err != NULL) { 266 error_propagate(errp, err); 267 object_unparent(OBJECT(dev)); 268 return NULL; 269 } 270 271 qdev_prop_set_enum(dev, "rerror", rerror); 272 qdev_prop_set_enum(dev, "werror", werror); 273 274 object_property_set_bool(OBJECT(dev), true, "realized", &err); 275 if (err != NULL) { 276 error_propagate(errp, err); 277 object_unparent(OBJECT(dev)); 278 return NULL; 279 } 280 return SCSI_DEVICE(dev); 281 } 282 283 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus) 284 { 285 Location loc; 286 DriveInfo *dinfo; 287 int unit; 288 289 loc_push_none(&loc); 290 for (unit = 0; unit <= bus->info->max_target; unit++) { 291 dinfo = drive_get(IF_SCSI, bus->busnr, unit); 292 if (dinfo == NULL) { 293 continue; 294 } 295 qemu_opts_loc_restore(dinfo->opts); 296 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo), 297 unit, false, -1, false, 298 BLOCKDEV_ON_ERROR_AUTO, 299 BLOCKDEV_ON_ERROR_AUTO, 300 NULL, &error_fatal); 301 } 302 loc_pop(&loc); 303 } 304 305 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf) 306 { 307 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 308 scsi_req_complete(req, CHECK_CONDITION); 309 return 0; 310 } 311 312 static const struct SCSIReqOps reqops_invalid_field = { 313 .size = sizeof(SCSIRequest), 314 .send_command = scsi_invalid_field 315 }; 316 317 /* SCSIReqOps implementation for invalid commands. */ 318 319 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf) 320 { 321 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 322 scsi_req_complete(req, CHECK_CONDITION); 323 return 0; 324 } 325 326 static const struct SCSIReqOps reqops_invalid_opcode = { 327 .size = sizeof(SCSIRequest), 328 .send_command = scsi_invalid_command 329 }; 330 331 /* SCSIReqOps implementation for unit attention conditions. */ 332 333 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf) 334 { 335 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 336 scsi_req_build_sense(req, req->dev->unit_attention); 337 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) { 338 scsi_req_build_sense(req, req->bus->unit_attention); 339 } 340 scsi_req_complete(req, CHECK_CONDITION); 341 return 0; 342 } 343 344 static const struct SCSIReqOps reqops_unit_attention = { 345 .size = sizeof(SCSIRequest), 346 .send_command = scsi_unit_attention 347 }; 348 349 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to 350 an invalid LUN. */ 351 352 typedef struct SCSITargetReq SCSITargetReq; 353 354 struct SCSITargetReq { 355 SCSIRequest req; 356 int len; 357 uint8_t *buf; 358 int buf_len; 359 }; 360 361 static void store_lun(uint8_t *outbuf, int lun) 362 { 363 if (lun < 256) { 364 outbuf[1] = lun; 365 return; 366 } 367 outbuf[1] = (lun & 255); 368 outbuf[0] = (lun >> 8) | 0x40; 369 } 370 371 static bool scsi_target_emulate_report_luns(SCSITargetReq *r) 372 { 373 BusChild *kid; 374 int i, len, n; 375 int channel, id; 376 bool found_lun0; 377 378 if (r->req.cmd.xfer < 16) { 379 return false; 380 } 381 if (r->req.cmd.buf[2] > 2) { 382 return false; 383 } 384 channel = r->req.dev->channel; 385 id = r->req.dev->id; 386 found_lun0 = false; 387 n = 0; 388 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) { 389 DeviceState *qdev = kid->child; 390 SCSIDevice *dev = SCSI_DEVICE(qdev); 391 392 if (dev->channel == channel && dev->id == id) { 393 if (dev->lun == 0) { 394 found_lun0 = true; 395 } 396 n += 8; 397 } 398 } 399 if (!found_lun0) { 400 n += 8; 401 } 402 403 scsi_target_alloc_buf(&r->req, n + 8); 404 405 len = MIN(n + 8, r->req.cmd.xfer & ~7); 406 memset(r->buf, 0, len); 407 stl_be_p(&r->buf[0], n); 408 i = found_lun0 ? 8 : 16; 409 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) { 410 DeviceState *qdev = kid->child; 411 SCSIDevice *dev = SCSI_DEVICE(qdev); 412 413 if (dev->channel == channel && dev->id == id) { 414 store_lun(&r->buf[i], dev->lun); 415 i += 8; 416 } 417 } 418 assert(i == n + 8); 419 r->len = len; 420 return true; 421 } 422 423 static bool scsi_target_emulate_inquiry(SCSITargetReq *r) 424 { 425 assert(r->req.dev->lun != r->req.lun); 426 427 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN); 428 429 if (r->req.cmd.buf[1] & 0x2) { 430 /* Command support data - optional, not implemented */ 431 return false; 432 } 433 434 if (r->req.cmd.buf[1] & 0x1) { 435 /* Vital product data */ 436 uint8_t page_code = r->req.cmd.buf[2]; 437 r->buf[r->len++] = page_code ; /* this page */ 438 r->buf[r->len++] = 0x00; 439 440 switch (page_code) { 441 case 0x00: /* Supported page codes, mandatory */ 442 { 443 int pages; 444 pages = r->len++; 445 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */ 446 r->buf[pages] = r->len - pages - 1; /* number of pages */ 447 break; 448 } 449 default: 450 return false; 451 } 452 /* done with EVPD */ 453 assert(r->len < r->buf_len); 454 r->len = MIN(r->req.cmd.xfer, r->len); 455 return true; 456 } 457 458 /* Standard INQUIRY data */ 459 if (r->req.cmd.buf[2] != 0) { 460 return false; 461 } 462 463 /* PAGE CODE == 0 */ 464 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN); 465 memset(r->buf, 0, r->len); 466 if (r->req.lun != 0) { 467 r->buf[0] = TYPE_NO_LUN; 468 } else { 469 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE; 470 r->buf[2] = 5; /* Version */ 471 r->buf[3] = 2 | 0x10; /* HiSup, response data format */ 472 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */ 473 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */ 474 memcpy(&r->buf[8], "QEMU ", 8); 475 memcpy(&r->buf[16], "QEMU TARGET ", 16); 476 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version()); 477 } 478 return true; 479 } 480 481 static size_t scsi_sense_len(SCSIRequest *req) 482 { 483 if (req->dev->type == TYPE_SCANNER) 484 return SCSI_SENSE_LEN_SCANNER; 485 else 486 return SCSI_SENSE_LEN; 487 } 488 489 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf) 490 { 491 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 492 int fixed_sense = (req->cmd.buf[1] & 1) == 0; 493 494 if (req->lun != 0 && 495 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) { 496 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED)); 497 scsi_req_complete(req, CHECK_CONDITION); 498 return 0; 499 } 500 switch (buf[0]) { 501 case REPORT_LUNS: 502 if (!scsi_target_emulate_report_luns(r)) { 503 goto illegal_request; 504 } 505 break; 506 case INQUIRY: 507 if (!scsi_target_emulate_inquiry(r)) { 508 goto illegal_request; 509 } 510 break; 511 case REQUEST_SENSE: 512 scsi_target_alloc_buf(&r->req, scsi_sense_len(req)); 513 if (req->lun != 0) { 514 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED); 515 516 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer, 517 sense, fixed_sense); 518 } else { 519 r->len = scsi_device_get_sense(r->req.dev, r->buf, 520 MIN(req->cmd.xfer, r->buf_len), 521 fixed_sense); 522 } 523 if (r->req.dev->sense_is_ua) { 524 scsi_device_unit_attention_reported(req->dev); 525 r->req.dev->sense_len = 0; 526 r->req.dev->sense_is_ua = false; 527 } 528 break; 529 case TEST_UNIT_READY: 530 break; 531 default: 532 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 533 scsi_req_complete(req, CHECK_CONDITION); 534 return 0; 535 illegal_request: 536 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 537 scsi_req_complete(req, CHECK_CONDITION); 538 return 0; 539 } 540 541 if (!r->len) { 542 scsi_req_complete(req, GOOD); 543 } 544 return r->len; 545 } 546 547 static void scsi_target_read_data(SCSIRequest *req) 548 { 549 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 550 uint32_t n; 551 552 n = r->len; 553 if (n > 0) { 554 r->len = 0; 555 scsi_req_data(&r->req, n); 556 } else { 557 scsi_req_complete(&r->req, GOOD); 558 } 559 } 560 561 static uint8_t *scsi_target_get_buf(SCSIRequest *req) 562 { 563 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 564 565 return r->buf; 566 } 567 568 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len) 569 { 570 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 571 572 r->buf = g_malloc(len); 573 r->buf_len = len; 574 575 return r->buf; 576 } 577 578 static void scsi_target_free_buf(SCSIRequest *req) 579 { 580 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 581 582 g_free(r->buf); 583 } 584 585 static const struct SCSIReqOps reqops_target_command = { 586 .size = sizeof(SCSITargetReq), 587 .send_command = scsi_target_send_command, 588 .read_data = scsi_target_read_data, 589 .get_buf = scsi_target_get_buf, 590 .free_req = scsi_target_free_buf, 591 }; 592 593 594 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d, 595 uint32_t tag, uint32_t lun, void *hba_private) 596 { 597 SCSIRequest *req; 598 SCSIBus *bus = scsi_bus_from_device(d); 599 BusState *qbus = BUS(bus); 600 const int memset_off = offsetof(SCSIRequest, sense) 601 + sizeof(req->sense); 602 603 req = g_malloc(reqops->size); 604 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off); 605 req->refcount = 1; 606 req->bus = bus; 607 req->dev = d; 608 req->tag = tag; 609 req->lun = lun; 610 req->hba_private = hba_private; 611 req->status = -1; 612 req->ops = reqops; 613 object_ref(OBJECT(d)); 614 object_ref(OBJECT(qbus->parent)); 615 notifier_list_init(&req->cancel_notifiers); 616 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag); 617 return req; 618 } 619 620 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, 621 uint8_t *buf, void *hba_private) 622 { 623 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus); 624 const SCSIReqOps *ops; 625 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d); 626 SCSIRequest *req; 627 SCSICommand cmd = { .len = 0 }; 628 int ret; 629 630 if ((d->unit_attention.key == UNIT_ATTENTION || 631 bus->unit_attention.key == UNIT_ATTENTION) && 632 (buf[0] != INQUIRY && 633 buf[0] != REPORT_LUNS && 634 buf[0] != GET_CONFIGURATION && 635 buf[0] != GET_EVENT_STATUS_NOTIFICATION && 636 637 /* 638 * If we already have a pending unit attention condition, 639 * report this one before triggering another one. 640 */ 641 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) { 642 ops = &reqops_unit_attention; 643 } else if (lun != d->lun || 644 buf[0] == REPORT_LUNS || 645 (buf[0] == REQUEST_SENSE && d->sense_len)) { 646 ops = &reqops_target_command; 647 } else { 648 ops = NULL; 649 } 650 651 if (ops != NULL || !sc->parse_cdb) { 652 ret = scsi_req_parse_cdb(d, &cmd, buf); 653 } else { 654 ret = sc->parse_cdb(d, &cmd, buf, hba_private); 655 } 656 657 if (ret != 0) { 658 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]); 659 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private); 660 } else { 661 assert(cmd.len != 0); 662 trace_scsi_req_parsed(d->id, lun, tag, buf[0], 663 cmd.mode, cmd.xfer); 664 if (cmd.lba != -1) { 665 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0], 666 cmd.lba); 667 } 668 669 if (cmd.xfer > INT32_MAX) { 670 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private); 671 } else if (ops) { 672 req = scsi_req_alloc(ops, d, tag, lun, hba_private); 673 } else { 674 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private); 675 } 676 } 677 678 req->cmd = cmd; 679 req->resid = req->cmd.xfer; 680 681 switch (buf[0]) { 682 case INQUIRY: 683 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]); 684 break; 685 case TEST_UNIT_READY: 686 trace_scsi_test_unit_ready(d->id, lun, tag); 687 break; 688 case REPORT_LUNS: 689 trace_scsi_report_luns(d->id, lun, tag); 690 break; 691 case REQUEST_SENSE: 692 trace_scsi_request_sense(d->id, lun, tag); 693 break; 694 default: 695 break; 696 } 697 698 return req; 699 } 700 701 uint8_t *scsi_req_get_buf(SCSIRequest *req) 702 { 703 return req->ops->get_buf(req); 704 } 705 706 static void scsi_clear_unit_attention(SCSIRequest *req) 707 { 708 SCSISense *ua; 709 if (req->dev->unit_attention.key != UNIT_ATTENTION && 710 req->bus->unit_attention.key != UNIT_ATTENTION) { 711 return; 712 } 713 714 /* 715 * If an INQUIRY command enters the enabled command state, 716 * the device server shall [not] clear any unit attention condition; 717 * See also MMC-6, paragraphs 6.5 and 6.6.2. 718 */ 719 if (req->cmd.buf[0] == INQUIRY || 720 req->cmd.buf[0] == GET_CONFIGURATION || 721 req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) { 722 return; 723 } 724 725 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 726 ua = &req->dev->unit_attention; 727 } else { 728 ua = &req->bus->unit_attention; 729 } 730 731 /* 732 * If a REPORT LUNS command enters the enabled command state, [...] 733 * the device server shall clear any pending unit attention condition 734 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED. 735 */ 736 if (req->cmd.buf[0] == REPORT_LUNS && 737 !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc && 738 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) { 739 return; 740 } 741 742 *ua = SENSE_CODE(NO_SENSE); 743 } 744 745 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len) 746 { 747 int ret; 748 749 assert(len >= 14); 750 if (!req->sense_len) { 751 return 0; 752 } 753 754 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true); 755 756 /* 757 * FIXME: clearing unit attention conditions upon autosense should be done 758 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b 759 * (SAM-5, 5.14). 760 * 761 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and 762 * 10b for HBAs that do not support it (do not call scsi_req_get_sense). 763 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b. 764 */ 765 if (req->dev->sense_is_ua) { 766 scsi_device_unit_attention_reported(req->dev); 767 req->dev->sense_len = 0; 768 req->dev->sense_is_ua = false; 769 } 770 return ret; 771 } 772 773 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed) 774 { 775 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed); 776 } 777 778 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense) 779 { 780 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag, 781 sense.key, sense.asc, sense.ascq); 782 req->sense_len = scsi_build_sense(req->sense, sense); 783 } 784 785 static void scsi_req_enqueue_internal(SCSIRequest *req) 786 { 787 assert(!req->enqueued); 788 scsi_req_ref(req); 789 if (req->bus->info->get_sg_list) { 790 req->sg = req->bus->info->get_sg_list(req); 791 } else { 792 req->sg = NULL; 793 } 794 req->enqueued = true; 795 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next); 796 } 797 798 int32_t scsi_req_enqueue(SCSIRequest *req) 799 { 800 int32_t rc; 801 802 assert(!req->retry); 803 scsi_req_enqueue_internal(req); 804 scsi_req_ref(req); 805 rc = req->ops->send_command(req, req->cmd.buf); 806 scsi_req_unref(req); 807 return rc; 808 } 809 810 static void scsi_req_dequeue(SCSIRequest *req) 811 { 812 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag); 813 req->retry = false; 814 if (req->enqueued) { 815 QTAILQ_REMOVE(&req->dev->requests, req, next); 816 req->enqueued = false; 817 scsi_req_unref(req); 818 } 819 } 820 821 static int scsi_get_performance_length(int num_desc, int type, int data_type) 822 { 823 /* MMC-6, paragraph 6.7. */ 824 switch (type) { 825 case 0: 826 if ((data_type & 3) == 0) { 827 /* Each descriptor is as in Table 295 - Nominal performance. */ 828 return 16 * num_desc + 8; 829 } else { 830 /* Each descriptor is as in Table 296 - Exceptions. */ 831 return 6 * num_desc + 8; 832 } 833 case 1: 834 case 4: 835 case 5: 836 return 8 * num_desc + 8; 837 case 2: 838 return 2048 * num_desc + 8; 839 case 3: 840 return 16 * num_desc + 8; 841 default: 842 return 8; 843 } 844 } 845 846 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf) 847 { 848 int byte_block = (buf[2] >> 2) & 0x1; 849 int type = (buf[2] >> 4) & 0x1; 850 int xfer_unit; 851 852 if (byte_block) { 853 if (type) { 854 xfer_unit = dev->blocksize; 855 } else { 856 xfer_unit = 512; 857 } 858 } else { 859 xfer_unit = 1; 860 } 861 862 return xfer_unit; 863 } 864 865 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf) 866 { 867 int length = buf[2] & 0x3; 868 int xfer; 869 int unit = ata_passthrough_xfer_unit(dev, buf); 870 871 switch (length) { 872 case 0: 873 case 3: /* USB-specific. */ 874 default: 875 xfer = 0; 876 break; 877 case 1: 878 xfer = buf[3]; 879 break; 880 case 2: 881 xfer = buf[4]; 882 break; 883 } 884 885 return xfer * unit; 886 } 887 888 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf) 889 { 890 int extend = buf[1] & 0x1; 891 int length = buf[2] & 0x3; 892 int xfer; 893 int unit = ata_passthrough_xfer_unit(dev, buf); 894 895 switch (length) { 896 case 0: 897 case 3: /* USB-specific. */ 898 default: 899 xfer = 0; 900 break; 901 case 1: 902 xfer = buf[4]; 903 xfer |= (extend ? buf[3] << 8 : 0); 904 break; 905 case 2: 906 xfer = buf[6]; 907 xfer |= (extend ? buf[5] << 8 : 0); 908 break; 909 } 910 911 return xfer * unit; 912 } 913 914 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 915 { 916 cmd->xfer = scsi_cdb_xfer(buf); 917 switch (buf[0]) { 918 case TEST_UNIT_READY: 919 case REWIND: 920 case START_STOP: 921 case SET_CAPACITY: 922 case WRITE_FILEMARKS: 923 case WRITE_FILEMARKS_16: 924 case SPACE: 925 case RESERVE: 926 case RELEASE: 927 case ERASE: 928 case ALLOW_MEDIUM_REMOVAL: 929 case SEEK_10: 930 case SYNCHRONIZE_CACHE: 931 case SYNCHRONIZE_CACHE_16: 932 case LOCATE_16: 933 case LOCK_UNLOCK_CACHE: 934 case SET_CD_SPEED: 935 case SET_LIMITS: 936 case WRITE_LONG_10: 937 case UPDATE_BLOCK: 938 case RESERVE_TRACK: 939 case SET_READ_AHEAD: 940 case PRE_FETCH: 941 case PRE_FETCH_16: 942 case ALLOW_OVERWRITE: 943 cmd->xfer = 0; 944 break; 945 case VERIFY_10: 946 case VERIFY_12: 947 case VERIFY_16: 948 if ((buf[1] & 2) == 0) { 949 cmd->xfer = 0; 950 } else if ((buf[1] & 4) != 0) { 951 cmd->xfer = 1; 952 } 953 cmd->xfer *= dev->blocksize; 954 break; 955 case MODE_SENSE: 956 break; 957 case WRITE_SAME_10: 958 case WRITE_SAME_16: 959 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize; 960 break; 961 case READ_CAPACITY_10: 962 cmd->xfer = 8; 963 break; 964 case READ_BLOCK_LIMITS: 965 cmd->xfer = 6; 966 break; 967 case SEND_VOLUME_TAG: 968 /* GPCMD_SET_STREAMING from multimedia commands. */ 969 if (dev->type == TYPE_ROM) { 970 cmd->xfer = buf[10] | (buf[9] << 8); 971 } else { 972 cmd->xfer = buf[9] | (buf[8] << 8); 973 } 974 break; 975 case WRITE_6: 976 /* length 0 means 256 blocks */ 977 if (cmd->xfer == 0) { 978 cmd->xfer = 256; 979 } 980 /* fall through */ 981 case WRITE_10: 982 case WRITE_VERIFY_10: 983 case WRITE_12: 984 case WRITE_VERIFY_12: 985 case WRITE_16: 986 case WRITE_VERIFY_16: 987 cmd->xfer *= dev->blocksize; 988 break; 989 case READ_6: 990 case READ_REVERSE: 991 /* length 0 means 256 blocks */ 992 if (cmd->xfer == 0) { 993 cmd->xfer = 256; 994 } 995 /* fall through */ 996 case READ_10: 997 case READ_12: 998 case READ_16: 999 cmd->xfer *= dev->blocksize; 1000 break; 1001 case FORMAT_UNIT: 1002 /* MMC mandates the parameter list to be 12-bytes long. Parameters 1003 * for block devices are restricted to the header right now. */ 1004 if (dev->type == TYPE_ROM && (buf[1] & 16)) { 1005 cmd->xfer = 12; 1006 } else { 1007 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4); 1008 } 1009 break; 1010 case INQUIRY: 1011 case RECEIVE_DIAGNOSTIC: 1012 case SEND_DIAGNOSTIC: 1013 cmd->xfer = buf[4] | (buf[3] << 8); 1014 break; 1015 case READ_CD: 1016 case READ_BUFFER: 1017 case WRITE_BUFFER: 1018 case SEND_CUE_SHEET: 1019 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1020 break; 1021 case PERSISTENT_RESERVE_OUT: 1022 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL; 1023 break; 1024 case ERASE_12: 1025 if (dev->type == TYPE_ROM) { 1026 /* MMC command GET PERFORMANCE. */ 1027 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8), 1028 buf[10], buf[1] & 0x1f); 1029 } 1030 break; 1031 case MECHANISM_STATUS: 1032 case READ_DVD_STRUCTURE: 1033 case SEND_DVD_STRUCTURE: 1034 case MAINTENANCE_OUT: 1035 case MAINTENANCE_IN: 1036 if (dev->type == TYPE_ROM) { 1037 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */ 1038 cmd->xfer = buf[9] | (buf[8] << 8); 1039 } 1040 break; 1041 case ATA_PASSTHROUGH_12: 1042 if (dev->type == TYPE_ROM) { 1043 /* BLANK command of MMC */ 1044 cmd->xfer = 0; 1045 } else { 1046 cmd->xfer = ata_passthrough_12_xfer(dev, buf); 1047 } 1048 break; 1049 case ATA_PASSTHROUGH_16: 1050 cmd->xfer = ata_passthrough_16_xfer(dev, buf); 1051 break; 1052 } 1053 return 0; 1054 } 1055 1056 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1057 { 1058 switch (buf[0]) { 1059 /* stream commands */ 1060 case ERASE_12: 1061 case ERASE_16: 1062 cmd->xfer = 0; 1063 break; 1064 case READ_6: 1065 case READ_REVERSE: 1066 case RECOVER_BUFFERED_DATA: 1067 case WRITE_6: 1068 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16); 1069 if (buf[1] & 0x01) { /* fixed */ 1070 cmd->xfer *= dev->blocksize; 1071 } 1072 break; 1073 case READ_16: 1074 case READ_REVERSE_16: 1075 case VERIFY_16: 1076 case WRITE_16: 1077 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16); 1078 if (buf[1] & 0x01) { /* fixed */ 1079 cmd->xfer *= dev->blocksize; 1080 } 1081 break; 1082 case REWIND: 1083 case LOAD_UNLOAD: 1084 cmd->xfer = 0; 1085 break; 1086 case SPACE_16: 1087 cmd->xfer = buf[13] | (buf[12] << 8); 1088 break; 1089 case READ_POSITION: 1090 switch (buf[1] & 0x1f) /* operation code */ { 1091 case SHORT_FORM_BLOCK_ID: 1092 case SHORT_FORM_VENDOR_SPECIFIC: 1093 cmd->xfer = 20; 1094 break; 1095 case LONG_FORM: 1096 cmd->xfer = 32; 1097 break; 1098 case EXTENDED_FORM: 1099 cmd->xfer = buf[8] | (buf[7] << 8); 1100 break; 1101 default: 1102 return -1; 1103 } 1104 1105 break; 1106 case FORMAT_UNIT: 1107 cmd->xfer = buf[4] | (buf[3] << 8); 1108 break; 1109 /* generic commands */ 1110 default: 1111 return scsi_req_xfer(cmd, dev, buf); 1112 } 1113 return 0; 1114 } 1115 1116 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1117 { 1118 switch (buf[0]) { 1119 /* medium changer commands */ 1120 case EXCHANGE_MEDIUM: 1121 case INITIALIZE_ELEMENT_STATUS: 1122 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE: 1123 case MOVE_MEDIUM: 1124 case POSITION_TO_ELEMENT: 1125 cmd->xfer = 0; 1126 break; 1127 case READ_ELEMENT_STATUS: 1128 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16); 1129 break; 1130 1131 /* generic commands */ 1132 default: 1133 return scsi_req_xfer(cmd, dev, buf); 1134 } 1135 return 0; 1136 } 1137 1138 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1139 { 1140 switch (buf[0]) { 1141 /* Scanner commands */ 1142 case OBJECT_POSITION: 1143 cmd->xfer = 0; 1144 break; 1145 case SCAN: 1146 cmd->xfer = buf[4]; 1147 break; 1148 case READ_10: 1149 case SEND: 1150 case GET_WINDOW: 1151 case SET_WINDOW: 1152 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1153 break; 1154 default: 1155 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */ 1156 return scsi_req_xfer(cmd, dev, buf); 1157 } 1158 1159 return 0; 1160 } 1161 1162 static void scsi_cmd_xfer_mode(SCSICommand *cmd) 1163 { 1164 if (!cmd->xfer) { 1165 cmd->mode = SCSI_XFER_NONE; 1166 return; 1167 } 1168 switch (cmd->buf[0]) { 1169 case WRITE_6: 1170 case WRITE_10: 1171 case WRITE_VERIFY_10: 1172 case WRITE_12: 1173 case WRITE_VERIFY_12: 1174 case WRITE_16: 1175 case WRITE_VERIFY_16: 1176 case VERIFY_10: 1177 case VERIFY_12: 1178 case VERIFY_16: 1179 case COPY: 1180 case COPY_VERIFY: 1181 case COMPARE: 1182 case CHANGE_DEFINITION: 1183 case LOG_SELECT: 1184 case MODE_SELECT: 1185 case MODE_SELECT_10: 1186 case SEND_DIAGNOSTIC: 1187 case WRITE_BUFFER: 1188 case FORMAT_UNIT: 1189 case REASSIGN_BLOCKS: 1190 case SEARCH_EQUAL: 1191 case SEARCH_HIGH: 1192 case SEARCH_LOW: 1193 case UPDATE_BLOCK: 1194 case WRITE_LONG_10: 1195 case WRITE_SAME_10: 1196 case WRITE_SAME_16: 1197 case UNMAP: 1198 case SEARCH_HIGH_12: 1199 case SEARCH_EQUAL_12: 1200 case SEARCH_LOW_12: 1201 case MEDIUM_SCAN: 1202 case SEND_VOLUME_TAG: 1203 case SEND_CUE_SHEET: 1204 case SEND_DVD_STRUCTURE: 1205 case PERSISTENT_RESERVE_OUT: 1206 case MAINTENANCE_OUT: 1207 case SET_WINDOW: 1208 case SCAN: 1209 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for 1210 * non-scanner devices, so we only get here for SCAN and not for START_STOP. 1211 */ 1212 cmd->mode = SCSI_XFER_TO_DEV; 1213 break; 1214 case ATA_PASSTHROUGH_12: 1215 case ATA_PASSTHROUGH_16: 1216 /* T_DIR */ 1217 cmd->mode = (cmd->buf[2] & 0x8) ? 1218 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV; 1219 break; 1220 default: 1221 cmd->mode = SCSI_XFER_FROM_DEV; 1222 break; 1223 } 1224 } 1225 1226 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf) 1227 { 1228 int rc; 1229 int len; 1230 1231 cmd->lba = -1; 1232 len = scsi_cdb_length(buf); 1233 if (len < 0) { 1234 return -1; 1235 } 1236 1237 cmd->len = len; 1238 switch (dev->type) { 1239 case TYPE_TAPE: 1240 rc = scsi_req_stream_xfer(cmd, dev, buf); 1241 break; 1242 case TYPE_MEDIUM_CHANGER: 1243 rc = scsi_req_medium_changer_xfer(cmd, dev, buf); 1244 break; 1245 case TYPE_SCANNER: 1246 rc = scsi_req_scanner_length(cmd, dev, buf); 1247 break; 1248 default: 1249 rc = scsi_req_xfer(cmd, dev, buf); 1250 break; 1251 } 1252 1253 if (rc != 0) 1254 return rc; 1255 1256 memcpy(cmd->buf, buf, cmd->len); 1257 scsi_cmd_xfer_mode(cmd); 1258 cmd->lba = scsi_cmd_lba(cmd); 1259 return 0; 1260 } 1261 1262 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense) 1263 { 1264 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 1265 1266 scsi_device_set_ua(dev, sense); 1267 if (bus->info->change) { 1268 bus->info->change(bus, dev, sense); 1269 } 1270 } 1271 1272 SCSIRequest *scsi_req_ref(SCSIRequest *req) 1273 { 1274 assert(req->refcount > 0); 1275 req->refcount++; 1276 return req; 1277 } 1278 1279 void scsi_req_unref(SCSIRequest *req) 1280 { 1281 assert(req->refcount > 0); 1282 if (--req->refcount == 0) { 1283 BusState *qbus = req->dev->qdev.parent_bus; 1284 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus); 1285 1286 if (bus->info->free_request && req->hba_private) { 1287 bus->info->free_request(bus, req->hba_private); 1288 } 1289 if (req->ops->free_req) { 1290 req->ops->free_req(req); 1291 } 1292 object_unref(OBJECT(req->dev)); 1293 object_unref(OBJECT(qbus->parent)); 1294 g_free(req); 1295 } 1296 } 1297 1298 /* Tell the device that we finished processing this chunk of I/O. It 1299 will start the next chunk or complete the command. */ 1300 void scsi_req_continue(SCSIRequest *req) 1301 { 1302 if (req->io_canceled) { 1303 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag); 1304 return; 1305 } 1306 trace_scsi_req_continue(req->dev->id, req->lun, req->tag); 1307 if (req->cmd.mode == SCSI_XFER_TO_DEV) { 1308 req->ops->write_data(req); 1309 } else { 1310 req->ops->read_data(req); 1311 } 1312 } 1313 1314 /* Called by the devices when data is ready for the HBA. The HBA should 1315 start a DMA operation to read or fill the device's data buffer. 1316 Once it completes, calling scsi_req_continue will restart I/O. */ 1317 void scsi_req_data(SCSIRequest *req, int len) 1318 { 1319 uint8_t *buf; 1320 if (req->io_canceled) { 1321 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len); 1322 return; 1323 } 1324 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len); 1325 assert(req->cmd.mode != SCSI_XFER_NONE); 1326 if (!req->sg) { 1327 req->resid -= len; 1328 req->bus->info->transfer_data(req, len); 1329 return; 1330 } 1331 1332 /* If the device calls scsi_req_data and the HBA specified a 1333 * scatter/gather list, the transfer has to happen in a single 1334 * step. */ 1335 assert(!req->dma_started); 1336 req->dma_started = true; 1337 1338 buf = scsi_req_get_buf(req); 1339 if (req->cmd.mode == SCSI_XFER_FROM_DEV) { 1340 req->resid = dma_buf_read(buf, len, req->sg); 1341 } else { 1342 req->resid = dma_buf_write(buf, len, req->sg); 1343 } 1344 scsi_req_continue(req); 1345 } 1346 1347 void scsi_req_print(SCSIRequest *req) 1348 { 1349 FILE *fp = stderr; 1350 int i; 1351 1352 fprintf(fp, "[%s id=%d] %s", 1353 req->dev->qdev.parent_bus->name, 1354 req->dev->id, 1355 scsi_command_name(req->cmd.buf[0])); 1356 for (i = 1; i < req->cmd.len; i++) { 1357 fprintf(fp, " 0x%02x", req->cmd.buf[i]); 1358 } 1359 switch (req->cmd.mode) { 1360 case SCSI_XFER_NONE: 1361 fprintf(fp, " - none\n"); 1362 break; 1363 case SCSI_XFER_FROM_DEV: 1364 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer); 1365 break; 1366 case SCSI_XFER_TO_DEV: 1367 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer); 1368 break; 1369 default: 1370 fprintf(fp, " - Oops\n"); 1371 break; 1372 } 1373 } 1374 1375 void scsi_req_complete(SCSIRequest *req, int status) 1376 { 1377 assert(req->status == -1); 1378 req->status = status; 1379 1380 assert(req->sense_len <= sizeof(req->sense)); 1381 if (status == GOOD) { 1382 req->sense_len = 0; 1383 } 1384 1385 if (req->sense_len) { 1386 memcpy(req->dev->sense, req->sense, req->sense_len); 1387 req->dev->sense_len = req->sense_len; 1388 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention); 1389 } else { 1390 req->dev->sense_len = 0; 1391 req->dev->sense_is_ua = false; 1392 } 1393 1394 /* 1395 * Unit attention state is now stored in the device's sense buffer 1396 * if the HBA didn't do autosense. Clear the pending unit attention 1397 * flags. 1398 */ 1399 scsi_clear_unit_attention(req); 1400 1401 scsi_req_ref(req); 1402 scsi_req_dequeue(req); 1403 req->bus->info->complete(req, req->status, req->resid); 1404 1405 /* Cancelled requests might end up being completed instead of cancelled */ 1406 notifier_list_notify(&req->cancel_notifiers, req); 1407 scsi_req_unref(req); 1408 } 1409 1410 /* Called by the devices when the request is canceled. */ 1411 void scsi_req_cancel_complete(SCSIRequest *req) 1412 { 1413 assert(req->io_canceled); 1414 if (req->bus->info->cancel) { 1415 req->bus->info->cancel(req); 1416 } 1417 notifier_list_notify(&req->cancel_notifiers, req); 1418 scsi_req_unref(req); 1419 } 1420 1421 /* Cancel @req asynchronously. @notifier is added to @req's cancellation 1422 * notifier list, the bus will be notified the requests cancellation is 1423 * completed. 1424 * */ 1425 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier) 1426 { 1427 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1428 if (notifier) { 1429 notifier_list_add(&req->cancel_notifiers, notifier); 1430 } 1431 if (req->io_canceled) { 1432 /* A blk_aio_cancel_async is pending; when it finishes, 1433 * scsi_req_cancel_complete will be called and will 1434 * call the notifier we just added. Just wait for that. 1435 */ 1436 assert(req->aiocb); 1437 return; 1438 } 1439 /* Dropped in scsi_req_cancel_complete. */ 1440 scsi_req_ref(req); 1441 scsi_req_dequeue(req); 1442 req->io_canceled = true; 1443 if (req->aiocb) { 1444 blk_aio_cancel_async(req->aiocb); 1445 } else { 1446 scsi_req_cancel_complete(req); 1447 } 1448 } 1449 1450 void scsi_req_cancel(SCSIRequest *req) 1451 { 1452 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1453 if (!req->enqueued) { 1454 return; 1455 } 1456 assert(!req->io_canceled); 1457 /* Dropped in scsi_req_cancel_complete. */ 1458 scsi_req_ref(req); 1459 scsi_req_dequeue(req); 1460 req->io_canceled = true; 1461 if (req->aiocb) { 1462 blk_aio_cancel(req->aiocb); 1463 } else { 1464 scsi_req_cancel_complete(req); 1465 } 1466 } 1467 1468 static int scsi_ua_precedence(SCSISense sense) 1469 { 1470 if (sense.key != UNIT_ATTENTION) { 1471 return INT_MAX; 1472 } 1473 if (sense.asc == 0x29 && sense.ascq == 0x04) { 1474 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */ 1475 return 1; 1476 } else if (sense.asc == 0x3F && sense.ascq == 0x01) { 1477 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */ 1478 return 2; 1479 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) { 1480 /* These two go with "all others". */ 1481 ; 1482 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) { 1483 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0 1484 * POWER ON OCCURRED = 1 1485 * SCSI BUS RESET OCCURRED = 2 1486 * BUS DEVICE RESET FUNCTION OCCURRED = 3 1487 * I_T NEXUS LOSS OCCURRED = 7 1488 */ 1489 return sense.ascq; 1490 } else if (sense.asc == 0x2F && sense.ascq == 0x01) { 1491 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */ 1492 return 8; 1493 } 1494 return (sense.asc << 8) | sense.ascq; 1495 } 1496 1497 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense) 1498 { 1499 int prec1, prec2; 1500 if (sense.key != UNIT_ATTENTION) { 1501 return; 1502 } 1503 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key, 1504 sense.asc, sense.ascq); 1505 1506 /* 1507 * Override a pre-existing unit attention condition, except for a more 1508 * important reset condition. 1509 */ 1510 prec1 = scsi_ua_precedence(sdev->unit_attention); 1511 prec2 = scsi_ua_precedence(sense); 1512 if (prec2 < prec1) { 1513 sdev->unit_attention = sense; 1514 } 1515 } 1516 1517 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense) 1518 { 1519 SCSIRequest *req; 1520 1521 aio_context_acquire(blk_get_aio_context(sdev->conf.blk)); 1522 while (!QTAILQ_EMPTY(&sdev->requests)) { 1523 req = QTAILQ_FIRST(&sdev->requests); 1524 scsi_req_cancel_async(req, NULL); 1525 } 1526 blk_drain(sdev->conf.blk); 1527 aio_context_release(blk_get_aio_context(sdev->conf.blk)); 1528 scsi_device_set_ua(sdev, sense); 1529 } 1530 1531 static char *scsibus_get_dev_path(DeviceState *dev) 1532 { 1533 SCSIDevice *d = SCSI_DEVICE(dev); 1534 DeviceState *hba = dev->parent_bus->parent; 1535 char *id; 1536 char *path; 1537 1538 id = qdev_get_dev_path(hba); 1539 if (id) { 1540 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun); 1541 } else { 1542 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun); 1543 } 1544 g_free(id); 1545 return path; 1546 } 1547 1548 static char *scsibus_get_fw_dev_path(DeviceState *dev) 1549 { 1550 SCSIDevice *d = SCSI_DEVICE(dev); 1551 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel, 1552 qdev_fw_name(dev), d->id, d->lun); 1553 } 1554 1555 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun) 1556 { 1557 BusChild *kid; 1558 SCSIDevice *target_dev = NULL; 1559 1560 QTAILQ_FOREACH_REVERSE(kid, &bus->qbus.children, sibling) { 1561 DeviceState *qdev = kid->child; 1562 SCSIDevice *dev = SCSI_DEVICE(qdev); 1563 1564 if (dev->channel == channel && dev->id == id) { 1565 if (dev->lun == lun) { 1566 return dev; 1567 } 1568 target_dev = dev; 1569 } 1570 } 1571 return target_dev; 1572 } 1573 1574 /* SCSI request list. For simplicity, pv points to the whole device */ 1575 1576 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size, 1577 const VMStateField *field, QJSON *vmdesc) 1578 { 1579 SCSIDevice *s = pv; 1580 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1581 SCSIRequest *req; 1582 1583 QTAILQ_FOREACH(req, &s->requests, next) { 1584 assert(!req->io_canceled); 1585 assert(req->status == -1); 1586 assert(req->enqueued); 1587 1588 qemu_put_sbyte(f, req->retry ? 1 : 2); 1589 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); 1590 qemu_put_be32s(f, &req->tag); 1591 qemu_put_be32s(f, &req->lun); 1592 if (bus->info->save_request) { 1593 bus->info->save_request(f, req); 1594 } 1595 if (req->ops->save_request) { 1596 req->ops->save_request(f, req); 1597 } 1598 } 1599 qemu_put_sbyte(f, 0); 1600 1601 return 0; 1602 } 1603 1604 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size, 1605 const VMStateField *field) 1606 { 1607 SCSIDevice *s = pv; 1608 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1609 int8_t sbyte; 1610 1611 while ((sbyte = qemu_get_sbyte(f)) > 0) { 1612 uint8_t buf[SCSI_CMD_BUF_SIZE]; 1613 uint32_t tag; 1614 uint32_t lun; 1615 SCSIRequest *req; 1616 1617 qemu_get_buffer(f, buf, sizeof(buf)); 1618 qemu_get_be32s(f, &tag); 1619 qemu_get_be32s(f, &lun); 1620 req = scsi_req_new(s, tag, lun, buf, NULL); 1621 req->retry = (sbyte == 1); 1622 if (bus->info->load_request) { 1623 req->hba_private = bus->info->load_request(f, req); 1624 } 1625 if (req->ops->load_request) { 1626 req->ops->load_request(f, req); 1627 } 1628 1629 /* Just restart it later. */ 1630 scsi_req_enqueue_internal(req); 1631 1632 /* At this point, the request will be kept alive by the reference 1633 * added by scsi_req_enqueue_internal, so we can release our reference. 1634 * The HBA of course will add its own reference in the load_request 1635 * callback if it needs to hold on the SCSIRequest. 1636 */ 1637 scsi_req_unref(req); 1638 } 1639 1640 return 0; 1641 } 1642 1643 static const VMStateInfo vmstate_info_scsi_requests = { 1644 .name = "scsi-requests", 1645 .get = get_scsi_requests, 1646 .put = put_scsi_requests, 1647 }; 1648 1649 static bool scsi_sense_state_needed(void *opaque) 1650 { 1651 SCSIDevice *s = opaque; 1652 1653 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD; 1654 } 1655 1656 static const VMStateDescription vmstate_scsi_sense_state = { 1657 .name = "SCSIDevice/sense", 1658 .version_id = 1, 1659 .minimum_version_id = 1, 1660 .needed = scsi_sense_state_needed, 1661 .fields = (VMStateField[]) { 1662 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 1663 SCSI_SENSE_BUF_SIZE_OLD, 1664 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD), 1665 VMSTATE_END_OF_LIST() 1666 } 1667 }; 1668 1669 const VMStateDescription vmstate_scsi_device = { 1670 .name = "SCSIDevice", 1671 .version_id = 1, 1672 .minimum_version_id = 1, 1673 .fields = (VMStateField[]) { 1674 VMSTATE_UINT8(unit_attention.key, SCSIDevice), 1675 VMSTATE_UINT8(unit_attention.asc, SCSIDevice), 1676 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice), 1677 VMSTATE_BOOL(sense_is_ua, SCSIDevice), 1678 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD), 1679 VMSTATE_UINT32(sense_len, SCSIDevice), 1680 { 1681 .name = "requests", 1682 .version_id = 0, 1683 .field_exists = NULL, 1684 .size = 0, /* ouch */ 1685 .info = &vmstate_info_scsi_requests, 1686 .flags = VMS_SINGLE, 1687 .offset = 0, 1688 }, 1689 VMSTATE_END_OF_LIST() 1690 }, 1691 .subsections = (const VMStateDescription*[]) { 1692 &vmstate_scsi_sense_state, 1693 NULL 1694 } 1695 }; 1696 1697 static void scsi_device_class_init(ObjectClass *klass, void *data) 1698 { 1699 DeviceClass *k = DEVICE_CLASS(klass); 1700 set_bit(DEVICE_CATEGORY_STORAGE, k->categories); 1701 k->bus_type = TYPE_SCSI_BUS; 1702 k->realize = scsi_qdev_realize; 1703 k->unrealize = scsi_qdev_unrealize; 1704 k->props = scsi_props; 1705 } 1706 1707 static void scsi_dev_instance_init(Object *obj) 1708 { 1709 DeviceState *dev = DEVICE(obj); 1710 SCSIDevice *s = SCSI_DEVICE(dev); 1711 1712 device_add_bootindex_property(obj, &s->conf.bootindex, 1713 "bootindex", NULL, 1714 &s->qdev, NULL); 1715 } 1716 1717 static const TypeInfo scsi_device_type_info = { 1718 .name = TYPE_SCSI_DEVICE, 1719 .parent = TYPE_DEVICE, 1720 .instance_size = sizeof(SCSIDevice), 1721 .abstract = true, 1722 .class_size = sizeof(SCSIDeviceClass), 1723 .class_init = scsi_device_class_init, 1724 .instance_init = scsi_dev_instance_init, 1725 }; 1726 1727 static void scsi_register_types(void) 1728 { 1729 type_register_static(&scsi_bus_info); 1730 type_register_static(&scsi_device_type_info); 1731 } 1732 1733 type_init(scsi_register_types) 1734