1 #include "qemu/osdep.h" 2 #include "hw/hw.h" 3 #include "qapi/error.h" 4 #include "qemu/error-report.h" 5 #include "qemu/module.h" 6 #include "qemu/option.h" 7 #include "hw/scsi/scsi.h" 8 #include "migration/qemu-file-types.h" 9 #include "scsi/constants.h" 10 #include "hw/qdev.h" 11 #include "sysemu/block-backend.h" 12 #include "sysemu/blockdev.h" 13 #include "trace.h" 14 #include "sysemu/dma.h" 15 #include "qemu/cutils.h" 16 17 static char *scsibus_get_dev_path(DeviceState *dev); 18 static char *scsibus_get_fw_dev_path(DeviceState *dev); 19 static void scsi_req_dequeue(SCSIRequest *req); 20 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len); 21 static void scsi_target_free_buf(SCSIRequest *req); 22 23 static Property scsi_props[] = { 24 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0), 25 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1), 26 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1), 27 DEFINE_PROP_END_OF_LIST(), 28 }; 29 30 static void scsi_bus_class_init(ObjectClass *klass, void *data) 31 { 32 BusClass *k = BUS_CLASS(klass); 33 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 34 35 k->get_dev_path = scsibus_get_dev_path; 36 k->get_fw_dev_path = scsibus_get_fw_dev_path; 37 hc->unplug = qdev_simple_device_unplug_cb; 38 } 39 40 static const TypeInfo scsi_bus_info = { 41 .name = TYPE_SCSI_BUS, 42 .parent = TYPE_BUS, 43 .instance_size = sizeof(SCSIBus), 44 .class_init = scsi_bus_class_init, 45 .interfaces = (InterfaceInfo[]) { 46 { TYPE_HOTPLUG_HANDLER }, 47 { } 48 } 49 }; 50 static int next_scsi_bus; 51 52 static void scsi_device_realize(SCSIDevice *s, Error **errp) 53 { 54 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 55 if (sc->realize) { 56 sc->realize(s, errp); 57 } 58 } 59 60 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, 61 void *hba_private) 62 { 63 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 64 int rc; 65 66 assert(cmd->len == 0); 67 rc = scsi_req_parse_cdb(dev, cmd, buf); 68 if (bus->info->parse_cdb) { 69 rc = bus->info->parse_cdb(dev, cmd, buf, hba_private); 70 } 71 return rc; 72 } 73 74 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun, 75 uint8_t *buf, void *hba_private) 76 { 77 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 78 if (sc->alloc_req) { 79 return sc->alloc_req(s, tag, lun, buf, hba_private); 80 } 81 82 return NULL; 83 } 84 85 void scsi_device_unit_attention_reported(SCSIDevice *s) 86 { 87 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 88 if (sc->unit_attention_reported) { 89 sc->unit_attention_reported(s); 90 } 91 } 92 93 /* Create a scsi bus, and attach devices to it. */ 94 void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host, 95 const SCSIBusInfo *info, const char *bus_name) 96 { 97 qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name); 98 bus->busnr = next_scsi_bus++; 99 bus->info = info; 100 qbus_set_bus_hotplug_handler(BUS(bus), &error_abort); 101 } 102 103 static void scsi_dma_restart_bh(void *opaque) 104 { 105 SCSIDevice *s = opaque; 106 SCSIRequest *req, *next; 107 108 qemu_bh_delete(s->bh); 109 s->bh = NULL; 110 111 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 112 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { 113 scsi_req_ref(req); 114 if (req->retry) { 115 req->retry = false; 116 switch (req->cmd.mode) { 117 case SCSI_XFER_FROM_DEV: 118 case SCSI_XFER_TO_DEV: 119 scsi_req_continue(req); 120 break; 121 case SCSI_XFER_NONE: 122 scsi_req_dequeue(req); 123 scsi_req_enqueue(req); 124 break; 125 } 126 } 127 scsi_req_unref(req); 128 } 129 aio_context_release(blk_get_aio_context(s->conf.blk)); 130 } 131 132 void scsi_req_retry(SCSIRequest *req) 133 { 134 /* No need to save a reference, because scsi_dma_restart_bh just 135 * looks at the request list. */ 136 req->retry = true; 137 } 138 139 static void scsi_dma_restart_cb(void *opaque, int running, RunState state) 140 { 141 SCSIDevice *s = opaque; 142 143 if (!running) { 144 return; 145 } 146 if (!s->bh) { 147 AioContext *ctx = blk_get_aio_context(s->conf.blk); 148 s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s); 149 qemu_bh_schedule(s->bh); 150 } 151 } 152 153 static void scsi_qdev_realize(DeviceState *qdev, Error **errp) 154 { 155 SCSIDevice *dev = SCSI_DEVICE(qdev); 156 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 157 SCSIDevice *d; 158 Error *local_err = NULL; 159 160 if (dev->channel > bus->info->max_channel) { 161 error_setg(errp, "bad scsi channel id: %d", dev->channel); 162 return; 163 } 164 if (dev->id != -1 && dev->id > bus->info->max_target) { 165 error_setg(errp, "bad scsi device id: %d", dev->id); 166 return; 167 } 168 if (dev->lun != -1 && dev->lun > bus->info->max_lun) { 169 error_setg(errp, "bad scsi device lun: %d", dev->lun); 170 return; 171 } 172 173 if (dev->id == -1) { 174 int id = -1; 175 if (dev->lun == -1) { 176 dev->lun = 0; 177 } 178 do { 179 d = scsi_device_find(bus, dev->channel, ++id, dev->lun); 180 } while (d && d->lun == dev->lun && id < bus->info->max_target); 181 if (d && d->lun == dev->lun) { 182 error_setg(errp, "no free target"); 183 return; 184 } 185 dev->id = id; 186 } else if (dev->lun == -1) { 187 int lun = -1; 188 do { 189 d = scsi_device_find(bus, dev->channel, dev->id, ++lun); 190 } while (d && d->lun == lun && lun < bus->info->max_lun); 191 if (d && d->lun == lun) { 192 error_setg(errp, "no free lun"); 193 return; 194 } 195 dev->lun = lun; 196 } else { 197 d = scsi_device_find(bus, dev->channel, dev->id, dev->lun); 198 assert(d); 199 if (d->lun == dev->lun && dev != d) { 200 error_setg(errp, "lun already used by '%s'", d->qdev.id); 201 return; 202 } 203 } 204 205 QTAILQ_INIT(&dev->requests); 206 scsi_device_realize(dev, &local_err); 207 if (local_err) { 208 error_propagate(errp, local_err); 209 return; 210 } 211 dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev), 212 scsi_dma_restart_cb, dev); 213 } 214 215 static void scsi_qdev_unrealize(DeviceState *qdev, Error **errp) 216 { 217 SCSIDevice *dev = SCSI_DEVICE(qdev); 218 219 if (dev->vmsentry) { 220 qemu_del_vm_change_state_handler(dev->vmsentry); 221 } 222 223 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE)); 224 blockdev_mark_auto_del(dev->conf.blk); 225 } 226 227 /* handle legacy '-drive if=scsi,...' cmd line args */ 228 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, 229 int unit, bool removable, int bootindex, 230 bool share_rw, 231 BlockdevOnError rerror, 232 BlockdevOnError werror, 233 const char *serial, Error **errp) 234 { 235 const char *driver; 236 char *name; 237 DeviceState *dev; 238 Error *err = NULL; 239 240 driver = blk_is_sg(blk) ? "scsi-generic" : "scsi-disk"; 241 dev = qdev_create(&bus->qbus, driver); 242 name = g_strdup_printf("legacy[%d]", unit); 243 object_property_add_child(OBJECT(bus), name, OBJECT(dev), NULL); 244 g_free(name); 245 246 qdev_prop_set_uint32(dev, "scsi-id", unit); 247 if (bootindex >= 0) { 248 object_property_set_int(OBJECT(dev), bootindex, "bootindex", 249 &error_abort); 250 } 251 if (object_property_find(OBJECT(dev), "removable", NULL)) { 252 qdev_prop_set_bit(dev, "removable", removable); 253 } 254 if (serial && object_property_find(OBJECT(dev), "serial", NULL)) { 255 qdev_prop_set_string(dev, "serial", serial); 256 } 257 qdev_prop_set_drive(dev, "drive", blk, &err); 258 if (err) { 259 error_propagate(errp, err); 260 object_unparent(OBJECT(dev)); 261 return NULL; 262 } 263 object_property_set_bool(OBJECT(dev), share_rw, "share-rw", &err); 264 if (err != NULL) { 265 error_propagate(errp, err); 266 object_unparent(OBJECT(dev)); 267 return NULL; 268 } 269 270 qdev_prop_set_enum(dev, "rerror", rerror); 271 qdev_prop_set_enum(dev, "werror", werror); 272 273 object_property_set_bool(OBJECT(dev), true, "realized", &err); 274 if (err != NULL) { 275 error_propagate(errp, err); 276 object_unparent(OBJECT(dev)); 277 return NULL; 278 } 279 return SCSI_DEVICE(dev); 280 } 281 282 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus) 283 { 284 Location loc; 285 DriveInfo *dinfo; 286 int unit; 287 288 loc_push_none(&loc); 289 for (unit = 0; unit <= bus->info->max_target; unit++) { 290 dinfo = drive_get(IF_SCSI, bus->busnr, unit); 291 if (dinfo == NULL) { 292 continue; 293 } 294 qemu_opts_loc_restore(dinfo->opts); 295 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo), 296 unit, false, -1, false, 297 BLOCKDEV_ON_ERROR_AUTO, 298 BLOCKDEV_ON_ERROR_AUTO, 299 NULL, &error_fatal); 300 } 301 loc_pop(&loc); 302 } 303 304 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf) 305 { 306 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 307 scsi_req_complete(req, CHECK_CONDITION); 308 return 0; 309 } 310 311 static const struct SCSIReqOps reqops_invalid_field = { 312 .size = sizeof(SCSIRequest), 313 .send_command = scsi_invalid_field 314 }; 315 316 /* SCSIReqOps implementation for invalid commands. */ 317 318 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf) 319 { 320 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 321 scsi_req_complete(req, CHECK_CONDITION); 322 return 0; 323 } 324 325 static const struct SCSIReqOps reqops_invalid_opcode = { 326 .size = sizeof(SCSIRequest), 327 .send_command = scsi_invalid_command 328 }; 329 330 /* SCSIReqOps implementation for unit attention conditions. */ 331 332 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf) 333 { 334 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 335 scsi_req_build_sense(req, req->dev->unit_attention); 336 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) { 337 scsi_req_build_sense(req, req->bus->unit_attention); 338 } 339 scsi_req_complete(req, CHECK_CONDITION); 340 return 0; 341 } 342 343 static const struct SCSIReqOps reqops_unit_attention = { 344 .size = sizeof(SCSIRequest), 345 .send_command = scsi_unit_attention 346 }; 347 348 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to 349 an invalid LUN. */ 350 351 typedef struct SCSITargetReq SCSITargetReq; 352 353 struct SCSITargetReq { 354 SCSIRequest req; 355 int len; 356 uint8_t *buf; 357 int buf_len; 358 }; 359 360 static void store_lun(uint8_t *outbuf, int lun) 361 { 362 if (lun < 256) { 363 outbuf[1] = lun; 364 return; 365 } 366 outbuf[1] = (lun & 255); 367 outbuf[0] = (lun >> 8) | 0x40; 368 } 369 370 static bool scsi_target_emulate_report_luns(SCSITargetReq *r) 371 { 372 BusChild *kid; 373 int i, len, n; 374 int channel, id; 375 bool found_lun0; 376 377 if (r->req.cmd.xfer < 16) { 378 return false; 379 } 380 if (r->req.cmd.buf[2] > 2) { 381 return false; 382 } 383 channel = r->req.dev->channel; 384 id = r->req.dev->id; 385 found_lun0 = false; 386 n = 0; 387 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) { 388 DeviceState *qdev = kid->child; 389 SCSIDevice *dev = SCSI_DEVICE(qdev); 390 391 if (dev->channel == channel && dev->id == id) { 392 if (dev->lun == 0) { 393 found_lun0 = true; 394 } 395 n += 8; 396 } 397 } 398 if (!found_lun0) { 399 n += 8; 400 } 401 402 scsi_target_alloc_buf(&r->req, n + 8); 403 404 len = MIN(n + 8, r->req.cmd.xfer & ~7); 405 memset(r->buf, 0, len); 406 stl_be_p(&r->buf[0], n); 407 i = found_lun0 ? 8 : 16; 408 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) { 409 DeviceState *qdev = kid->child; 410 SCSIDevice *dev = SCSI_DEVICE(qdev); 411 412 if (dev->channel == channel && dev->id == id) { 413 store_lun(&r->buf[i], dev->lun); 414 i += 8; 415 } 416 } 417 assert(i == n + 8); 418 r->len = len; 419 return true; 420 } 421 422 static bool scsi_target_emulate_inquiry(SCSITargetReq *r) 423 { 424 assert(r->req.dev->lun != r->req.lun); 425 426 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN); 427 428 if (r->req.cmd.buf[1] & 0x2) { 429 /* Command support data - optional, not implemented */ 430 return false; 431 } 432 433 if (r->req.cmd.buf[1] & 0x1) { 434 /* Vital product data */ 435 uint8_t page_code = r->req.cmd.buf[2]; 436 r->buf[r->len++] = page_code ; /* this page */ 437 r->buf[r->len++] = 0x00; 438 439 switch (page_code) { 440 case 0x00: /* Supported page codes, mandatory */ 441 { 442 int pages; 443 pages = r->len++; 444 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */ 445 r->buf[pages] = r->len - pages - 1; /* number of pages */ 446 break; 447 } 448 default: 449 return false; 450 } 451 /* done with EVPD */ 452 assert(r->len < r->buf_len); 453 r->len = MIN(r->req.cmd.xfer, r->len); 454 return true; 455 } 456 457 /* Standard INQUIRY data */ 458 if (r->req.cmd.buf[2] != 0) { 459 return false; 460 } 461 462 /* PAGE CODE == 0 */ 463 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN); 464 memset(r->buf, 0, r->len); 465 if (r->req.lun != 0) { 466 r->buf[0] = TYPE_NO_LUN; 467 } else { 468 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE; 469 r->buf[2] = 5; /* Version */ 470 r->buf[3] = 2 | 0x10; /* HiSup, response data format */ 471 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */ 472 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */ 473 memcpy(&r->buf[8], "QEMU ", 8); 474 memcpy(&r->buf[16], "QEMU TARGET ", 16); 475 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version()); 476 } 477 return true; 478 } 479 480 static size_t scsi_sense_len(SCSIRequest *req) 481 { 482 if (req->dev->type == TYPE_SCANNER) 483 return SCSI_SENSE_LEN_SCANNER; 484 else 485 return SCSI_SENSE_LEN; 486 } 487 488 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf) 489 { 490 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 491 int fixed_sense = (req->cmd.buf[1] & 1) == 0; 492 493 if (req->lun != 0 && 494 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) { 495 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED)); 496 scsi_req_complete(req, CHECK_CONDITION); 497 return 0; 498 } 499 switch (buf[0]) { 500 case REPORT_LUNS: 501 if (!scsi_target_emulate_report_luns(r)) { 502 goto illegal_request; 503 } 504 break; 505 case INQUIRY: 506 if (!scsi_target_emulate_inquiry(r)) { 507 goto illegal_request; 508 } 509 break; 510 case REQUEST_SENSE: 511 scsi_target_alloc_buf(&r->req, scsi_sense_len(req)); 512 if (req->lun != 0) { 513 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED); 514 515 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer, 516 sense, fixed_sense); 517 } else { 518 r->len = scsi_device_get_sense(r->req.dev, r->buf, 519 MIN(req->cmd.xfer, r->buf_len), 520 fixed_sense); 521 } 522 if (r->req.dev->sense_is_ua) { 523 scsi_device_unit_attention_reported(req->dev); 524 r->req.dev->sense_len = 0; 525 r->req.dev->sense_is_ua = false; 526 } 527 break; 528 case TEST_UNIT_READY: 529 break; 530 default: 531 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 532 scsi_req_complete(req, CHECK_CONDITION); 533 return 0; 534 illegal_request: 535 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 536 scsi_req_complete(req, CHECK_CONDITION); 537 return 0; 538 } 539 540 if (!r->len) { 541 scsi_req_complete(req, GOOD); 542 } 543 return r->len; 544 } 545 546 static void scsi_target_read_data(SCSIRequest *req) 547 { 548 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 549 uint32_t n; 550 551 n = r->len; 552 if (n > 0) { 553 r->len = 0; 554 scsi_req_data(&r->req, n); 555 } else { 556 scsi_req_complete(&r->req, GOOD); 557 } 558 } 559 560 static uint8_t *scsi_target_get_buf(SCSIRequest *req) 561 { 562 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 563 564 return r->buf; 565 } 566 567 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len) 568 { 569 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 570 571 r->buf = g_malloc(len); 572 r->buf_len = len; 573 574 return r->buf; 575 } 576 577 static void scsi_target_free_buf(SCSIRequest *req) 578 { 579 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 580 581 g_free(r->buf); 582 } 583 584 static const struct SCSIReqOps reqops_target_command = { 585 .size = sizeof(SCSITargetReq), 586 .send_command = scsi_target_send_command, 587 .read_data = scsi_target_read_data, 588 .get_buf = scsi_target_get_buf, 589 .free_req = scsi_target_free_buf, 590 }; 591 592 593 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d, 594 uint32_t tag, uint32_t lun, void *hba_private) 595 { 596 SCSIRequest *req; 597 SCSIBus *bus = scsi_bus_from_device(d); 598 BusState *qbus = BUS(bus); 599 const int memset_off = offsetof(SCSIRequest, sense) 600 + sizeof(req->sense); 601 602 req = g_malloc(reqops->size); 603 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off); 604 req->refcount = 1; 605 req->bus = bus; 606 req->dev = d; 607 req->tag = tag; 608 req->lun = lun; 609 req->hba_private = hba_private; 610 req->status = -1; 611 req->ops = reqops; 612 object_ref(OBJECT(d)); 613 object_ref(OBJECT(qbus->parent)); 614 notifier_list_init(&req->cancel_notifiers); 615 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag); 616 return req; 617 } 618 619 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, 620 uint8_t *buf, void *hba_private) 621 { 622 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus); 623 const SCSIReqOps *ops; 624 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d); 625 SCSIRequest *req; 626 SCSICommand cmd = { .len = 0 }; 627 int ret; 628 629 if ((d->unit_attention.key == UNIT_ATTENTION || 630 bus->unit_attention.key == UNIT_ATTENTION) && 631 (buf[0] != INQUIRY && 632 buf[0] != REPORT_LUNS && 633 buf[0] != GET_CONFIGURATION && 634 buf[0] != GET_EVENT_STATUS_NOTIFICATION && 635 636 /* 637 * If we already have a pending unit attention condition, 638 * report this one before triggering another one. 639 */ 640 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) { 641 ops = &reqops_unit_attention; 642 } else if (lun != d->lun || 643 buf[0] == REPORT_LUNS || 644 (buf[0] == REQUEST_SENSE && d->sense_len)) { 645 ops = &reqops_target_command; 646 } else { 647 ops = NULL; 648 } 649 650 if (ops != NULL || !sc->parse_cdb) { 651 ret = scsi_req_parse_cdb(d, &cmd, buf); 652 } else { 653 ret = sc->parse_cdb(d, &cmd, buf, hba_private); 654 } 655 656 if (ret != 0) { 657 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]); 658 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private); 659 } else { 660 assert(cmd.len != 0); 661 trace_scsi_req_parsed(d->id, lun, tag, buf[0], 662 cmd.mode, cmd.xfer); 663 if (cmd.lba != -1) { 664 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0], 665 cmd.lba); 666 } 667 668 if (cmd.xfer > INT32_MAX) { 669 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private); 670 } else if (ops) { 671 req = scsi_req_alloc(ops, d, tag, lun, hba_private); 672 } else { 673 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private); 674 } 675 } 676 677 req->cmd = cmd; 678 req->resid = req->cmd.xfer; 679 680 switch (buf[0]) { 681 case INQUIRY: 682 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]); 683 break; 684 case TEST_UNIT_READY: 685 trace_scsi_test_unit_ready(d->id, lun, tag); 686 break; 687 case REPORT_LUNS: 688 trace_scsi_report_luns(d->id, lun, tag); 689 break; 690 case REQUEST_SENSE: 691 trace_scsi_request_sense(d->id, lun, tag); 692 break; 693 default: 694 break; 695 } 696 697 return req; 698 } 699 700 uint8_t *scsi_req_get_buf(SCSIRequest *req) 701 { 702 return req->ops->get_buf(req); 703 } 704 705 static void scsi_clear_unit_attention(SCSIRequest *req) 706 { 707 SCSISense *ua; 708 if (req->dev->unit_attention.key != UNIT_ATTENTION && 709 req->bus->unit_attention.key != UNIT_ATTENTION) { 710 return; 711 } 712 713 /* 714 * If an INQUIRY command enters the enabled command state, 715 * the device server shall [not] clear any unit attention condition; 716 * See also MMC-6, paragraphs 6.5 and 6.6.2. 717 */ 718 if (req->cmd.buf[0] == INQUIRY || 719 req->cmd.buf[0] == GET_CONFIGURATION || 720 req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) { 721 return; 722 } 723 724 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 725 ua = &req->dev->unit_attention; 726 } else { 727 ua = &req->bus->unit_attention; 728 } 729 730 /* 731 * If a REPORT LUNS command enters the enabled command state, [...] 732 * the device server shall clear any pending unit attention condition 733 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED. 734 */ 735 if (req->cmd.buf[0] == REPORT_LUNS && 736 !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc && 737 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) { 738 return; 739 } 740 741 *ua = SENSE_CODE(NO_SENSE); 742 } 743 744 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len) 745 { 746 int ret; 747 748 assert(len >= 14); 749 if (!req->sense_len) { 750 return 0; 751 } 752 753 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true); 754 755 /* 756 * FIXME: clearing unit attention conditions upon autosense should be done 757 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b 758 * (SAM-5, 5.14). 759 * 760 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and 761 * 10b for HBAs that do not support it (do not call scsi_req_get_sense). 762 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b. 763 */ 764 if (req->dev->sense_is_ua) { 765 scsi_device_unit_attention_reported(req->dev); 766 req->dev->sense_len = 0; 767 req->dev->sense_is_ua = false; 768 } 769 return ret; 770 } 771 772 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed) 773 { 774 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed); 775 } 776 777 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense) 778 { 779 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag, 780 sense.key, sense.asc, sense.ascq); 781 req->sense_len = scsi_build_sense(req->sense, sense); 782 } 783 784 static void scsi_req_enqueue_internal(SCSIRequest *req) 785 { 786 assert(!req->enqueued); 787 scsi_req_ref(req); 788 if (req->bus->info->get_sg_list) { 789 req->sg = req->bus->info->get_sg_list(req); 790 } else { 791 req->sg = NULL; 792 } 793 req->enqueued = true; 794 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next); 795 } 796 797 int32_t scsi_req_enqueue(SCSIRequest *req) 798 { 799 int32_t rc; 800 801 assert(!req->retry); 802 scsi_req_enqueue_internal(req); 803 scsi_req_ref(req); 804 rc = req->ops->send_command(req, req->cmd.buf); 805 scsi_req_unref(req); 806 return rc; 807 } 808 809 static void scsi_req_dequeue(SCSIRequest *req) 810 { 811 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag); 812 req->retry = false; 813 if (req->enqueued) { 814 QTAILQ_REMOVE(&req->dev->requests, req, next); 815 req->enqueued = false; 816 scsi_req_unref(req); 817 } 818 } 819 820 static int scsi_get_performance_length(int num_desc, int type, int data_type) 821 { 822 /* MMC-6, paragraph 6.7. */ 823 switch (type) { 824 case 0: 825 if ((data_type & 3) == 0) { 826 /* Each descriptor is as in Table 295 - Nominal performance. */ 827 return 16 * num_desc + 8; 828 } else { 829 /* Each descriptor is as in Table 296 - Exceptions. */ 830 return 6 * num_desc + 8; 831 } 832 case 1: 833 case 4: 834 case 5: 835 return 8 * num_desc + 8; 836 case 2: 837 return 2048 * num_desc + 8; 838 case 3: 839 return 16 * num_desc + 8; 840 default: 841 return 8; 842 } 843 } 844 845 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf) 846 { 847 int byte_block = (buf[2] >> 2) & 0x1; 848 int type = (buf[2] >> 4) & 0x1; 849 int xfer_unit; 850 851 if (byte_block) { 852 if (type) { 853 xfer_unit = dev->blocksize; 854 } else { 855 xfer_unit = 512; 856 } 857 } else { 858 xfer_unit = 1; 859 } 860 861 return xfer_unit; 862 } 863 864 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf) 865 { 866 int length = buf[2] & 0x3; 867 int xfer; 868 int unit = ata_passthrough_xfer_unit(dev, buf); 869 870 switch (length) { 871 case 0: 872 case 3: /* USB-specific. */ 873 default: 874 xfer = 0; 875 break; 876 case 1: 877 xfer = buf[3]; 878 break; 879 case 2: 880 xfer = buf[4]; 881 break; 882 } 883 884 return xfer * unit; 885 } 886 887 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf) 888 { 889 int extend = buf[1] & 0x1; 890 int length = buf[2] & 0x3; 891 int xfer; 892 int unit = ata_passthrough_xfer_unit(dev, buf); 893 894 switch (length) { 895 case 0: 896 case 3: /* USB-specific. */ 897 default: 898 xfer = 0; 899 break; 900 case 1: 901 xfer = buf[4]; 902 xfer |= (extend ? buf[3] << 8 : 0); 903 break; 904 case 2: 905 xfer = buf[6]; 906 xfer |= (extend ? buf[5] << 8 : 0); 907 break; 908 } 909 910 return xfer * unit; 911 } 912 913 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 914 { 915 cmd->xfer = scsi_cdb_xfer(buf); 916 switch (buf[0]) { 917 case TEST_UNIT_READY: 918 case REWIND: 919 case START_STOP: 920 case SET_CAPACITY: 921 case WRITE_FILEMARKS: 922 case WRITE_FILEMARKS_16: 923 case SPACE: 924 case RESERVE: 925 case RELEASE: 926 case ERASE: 927 case ALLOW_MEDIUM_REMOVAL: 928 case SEEK_10: 929 case SYNCHRONIZE_CACHE: 930 case SYNCHRONIZE_CACHE_16: 931 case LOCATE_16: 932 case LOCK_UNLOCK_CACHE: 933 case SET_CD_SPEED: 934 case SET_LIMITS: 935 case WRITE_LONG_10: 936 case UPDATE_BLOCK: 937 case RESERVE_TRACK: 938 case SET_READ_AHEAD: 939 case PRE_FETCH: 940 case PRE_FETCH_16: 941 case ALLOW_OVERWRITE: 942 cmd->xfer = 0; 943 break; 944 case VERIFY_10: 945 case VERIFY_12: 946 case VERIFY_16: 947 if ((buf[1] & 2) == 0) { 948 cmd->xfer = 0; 949 } else if ((buf[1] & 4) != 0) { 950 cmd->xfer = 1; 951 } 952 cmd->xfer *= dev->blocksize; 953 break; 954 case MODE_SENSE: 955 break; 956 case WRITE_SAME_10: 957 case WRITE_SAME_16: 958 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize; 959 break; 960 case READ_CAPACITY_10: 961 cmd->xfer = 8; 962 break; 963 case READ_BLOCK_LIMITS: 964 cmd->xfer = 6; 965 break; 966 case SEND_VOLUME_TAG: 967 /* GPCMD_SET_STREAMING from multimedia commands. */ 968 if (dev->type == TYPE_ROM) { 969 cmd->xfer = buf[10] | (buf[9] << 8); 970 } else { 971 cmd->xfer = buf[9] | (buf[8] << 8); 972 } 973 break; 974 case WRITE_6: 975 /* length 0 means 256 blocks */ 976 if (cmd->xfer == 0) { 977 cmd->xfer = 256; 978 } 979 /* fall through */ 980 case WRITE_10: 981 case WRITE_VERIFY_10: 982 case WRITE_12: 983 case WRITE_VERIFY_12: 984 case WRITE_16: 985 case WRITE_VERIFY_16: 986 cmd->xfer *= dev->blocksize; 987 break; 988 case READ_6: 989 case READ_REVERSE: 990 /* length 0 means 256 blocks */ 991 if (cmd->xfer == 0) { 992 cmd->xfer = 256; 993 } 994 /* fall through */ 995 case READ_10: 996 case READ_12: 997 case READ_16: 998 cmd->xfer *= dev->blocksize; 999 break; 1000 case FORMAT_UNIT: 1001 /* MMC mandates the parameter list to be 12-bytes long. Parameters 1002 * for block devices are restricted to the header right now. */ 1003 if (dev->type == TYPE_ROM && (buf[1] & 16)) { 1004 cmd->xfer = 12; 1005 } else { 1006 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4); 1007 } 1008 break; 1009 case INQUIRY: 1010 case RECEIVE_DIAGNOSTIC: 1011 case SEND_DIAGNOSTIC: 1012 cmd->xfer = buf[4] | (buf[3] << 8); 1013 break; 1014 case READ_CD: 1015 case READ_BUFFER: 1016 case WRITE_BUFFER: 1017 case SEND_CUE_SHEET: 1018 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1019 break; 1020 case PERSISTENT_RESERVE_OUT: 1021 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL; 1022 break; 1023 case ERASE_12: 1024 if (dev->type == TYPE_ROM) { 1025 /* MMC command GET PERFORMANCE. */ 1026 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8), 1027 buf[10], buf[1] & 0x1f); 1028 } 1029 break; 1030 case MECHANISM_STATUS: 1031 case READ_DVD_STRUCTURE: 1032 case SEND_DVD_STRUCTURE: 1033 case MAINTENANCE_OUT: 1034 case MAINTENANCE_IN: 1035 if (dev->type == TYPE_ROM) { 1036 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */ 1037 cmd->xfer = buf[9] | (buf[8] << 8); 1038 } 1039 break; 1040 case ATA_PASSTHROUGH_12: 1041 if (dev->type == TYPE_ROM) { 1042 /* BLANK command of MMC */ 1043 cmd->xfer = 0; 1044 } else { 1045 cmd->xfer = ata_passthrough_12_xfer(dev, buf); 1046 } 1047 break; 1048 case ATA_PASSTHROUGH_16: 1049 cmd->xfer = ata_passthrough_16_xfer(dev, buf); 1050 break; 1051 } 1052 return 0; 1053 } 1054 1055 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1056 { 1057 switch (buf[0]) { 1058 /* stream commands */ 1059 case ERASE_12: 1060 case ERASE_16: 1061 cmd->xfer = 0; 1062 break; 1063 case READ_6: 1064 case READ_REVERSE: 1065 case RECOVER_BUFFERED_DATA: 1066 case WRITE_6: 1067 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16); 1068 if (buf[1] & 0x01) { /* fixed */ 1069 cmd->xfer *= dev->blocksize; 1070 } 1071 break; 1072 case READ_16: 1073 case READ_REVERSE_16: 1074 case VERIFY_16: 1075 case WRITE_16: 1076 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16); 1077 if (buf[1] & 0x01) { /* fixed */ 1078 cmd->xfer *= dev->blocksize; 1079 } 1080 break; 1081 case REWIND: 1082 case LOAD_UNLOAD: 1083 cmd->xfer = 0; 1084 break; 1085 case SPACE_16: 1086 cmd->xfer = buf[13] | (buf[12] << 8); 1087 break; 1088 case READ_POSITION: 1089 switch (buf[1] & 0x1f) /* operation code */ { 1090 case SHORT_FORM_BLOCK_ID: 1091 case SHORT_FORM_VENDOR_SPECIFIC: 1092 cmd->xfer = 20; 1093 break; 1094 case LONG_FORM: 1095 cmd->xfer = 32; 1096 break; 1097 case EXTENDED_FORM: 1098 cmd->xfer = buf[8] | (buf[7] << 8); 1099 break; 1100 default: 1101 return -1; 1102 } 1103 1104 break; 1105 case FORMAT_UNIT: 1106 cmd->xfer = buf[4] | (buf[3] << 8); 1107 break; 1108 /* generic commands */ 1109 default: 1110 return scsi_req_xfer(cmd, dev, buf); 1111 } 1112 return 0; 1113 } 1114 1115 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1116 { 1117 switch (buf[0]) { 1118 /* medium changer commands */ 1119 case EXCHANGE_MEDIUM: 1120 case INITIALIZE_ELEMENT_STATUS: 1121 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE: 1122 case MOVE_MEDIUM: 1123 case POSITION_TO_ELEMENT: 1124 cmd->xfer = 0; 1125 break; 1126 case READ_ELEMENT_STATUS: 1127 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16); 1128 break; 1129 1130 /* generic commands */ 1131 default: 1132 return scsi_req_xfer(cmd, dev, buf); 1133 } 1134 return 0; 1135 } 1136 1137 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1138 { 1139 switch (buf[0]) { 1140 /* Scanner commands */ 1141 case OBJECT_POSITION: 1142 cmd->xfer = 0; 1143 break; 1144 case SCAN: 1145 cmd->xfer = buf[4]; 1146 break; 1147 case READ_10: 1148 case SEND: 1149 case GET_WINDOW: 1150 case SET_WINDOW: 1151 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1152 break; 1153 default: 1154 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */ 1155 return scsi_req_xfer(cmd, dev, buf); 1156 } 1157 1158 return 0; 1159 } 1160 1161 static void scsi_cmd_xfer_mode(SCSICommand *cmd) 1162 { 1163 if (!cmd->xfer) { 1164 cmd->mode = SCSI_XFER_NONE; 1165 return; 1166 } 1167 switch (cmd->buf[0]) { 1168 case WRITE_6: 1169 case WRITE_10: 1170 case WRITE_VERIFY_10: 1171 case WRITE_12: 1172 case WRITE_VERIFY_12: 1173 case WRITE_16: 1174 case WRITE_VERIFY_16: 1175 case VERIFY_10: 1176 case VERIFY_12: 1177 case VERIFY_16: 1178 case COPY: 1179 case COPY_VERIFY: 1180 case COMPARE: 1181 case CHANGE_DEFINITION: 1182 case LOG_SELECT: 1183 case MODE_SELECT: 1184 case MODE_SELECT_10: 1185 case SEND_DIAGNOSTIC: 1186 case WRITE_BUFFER: 1187 case FORMAT_UNIT: 1188 case REASSIGN_BLOCKS: 1189 case SEARCH_EQUAL: 1190 case SEARCH_HIGH: 1191 case SEARCH_LOW: 1192 case UPDATE_BLOCK: 1193 case WRITE_LONG_10: 1194 case WRITE_SAME_10: 1195 case WRITE_SAME_16: 1196 case UNMAP: 1197 case SEARCH_HIGH_12: 1198 case SEARCH_EQUAL_12: 1199 case SEARCH_LOW_12: 1200 case MEDIUM_SCAN: 1201 case SEND_VOLUME_TAG: 1202 case SEND_CUE_SHEET: 1203 case SEND_DVD_STRUCTURE: 1204 case PERSISTENT_RESERVE_OUT: 1205 case MAINTENANCE_OUT: 1206 case SET_WINDOW: 1207 case SCAN: 1208 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for 1209 * non-scanner devices, so we only get here for SCAN and not for START_STOP. 1210 */ 1211 cmd->mode = SCSI_XFER_TO_DEV; 1212 break; 1213 case ATA_PASSTHROUGH_12: 1214 case ATA_PASSTHROUGH_16: 1215 /* T_DIR */ 1216 cmd->mode = (cmd->buf[2] & 0x8) ? 1217 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV; 1218 break; 1219 default: 1220 cmd->mode = SCSI_XFER_FROM_DEV; 1221 break; 1222 } 1223 } 1224 1225 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf) 1226 { 1227 int rc; 1228 int len; 1229 1230 cmd->lba = -1; 1231 len = scsi_cdb_length(buf); 1232 if (len < 0) { 1233 return -1; 1234 } 1235 1236 cmd->len = len; 1237 switch (dev->type) { 1238 case TYPE_TAPE: 1239 rc = scsi_req_stream_xfer(cmd, dev, buf); 1240 break; 1241 case TYPE_MEDIUM_CHANGER: 1242 rc = scsi_req_medium_changer_xfer(cmd, dev, buf); 1243 break; 1244 case TYPE_SCANNER: 1245 rc = scsi_req_scanner_length(cmd, dev, buf); 1246 break; 1247 default: 1248 rc = scsi_req_xfer(cmd, dev, buf); 1249 break; 1250 } 1251 1252 if (rc != 0) 1253 return rc; 1254 1255 memcpy(cmd->buf, buf, cmd->len); 1256 scsi_cmd_xfer_mode(cmd); 1257 cmd->lba = scsi_cmd_lba(cmd); 1258 return 0; 1259 } 1260 1261 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense) 1262 { 1263 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 1264 1265 scsi_device_set_ua(dev, sense); 1266 if (bus->info->change) { 1267 bus->info->change(bus, dev, sense); 1268 } 1269 } 1270 1271 SCSIRequest *scsi_req_ref(SCSIRequest *req) 1272 { 1273 assert(req->refcount > 0); 1274 req->refcount++; 1275 return req; 1276 } 1277 1278 void scsi_req_unref(SCSIRequest *req) 1279 { 1280 assert(req->refcount > 0); 1281 if (--req->refcount == 0) { 1282 BusState *qbus = req->dev->qdev.parent_bus; 1283 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus); 1284 1285 if (bus->info->free_request && req->hba_private) { 1286 bus->info->free_request(bus, req->hba_private); 1287 } 1288 if (req->ops->free_req) { 1289 req->ops->free_req(req); 1290 } 1291 object_unref(OBJECT(req->dev)); 1292 object_unref(OBJECT(qbus->parent)); 1293 g_free(req); 1294 } 1295 } 1296 1297 /* Tell the device that we finished processing this chunk of I/O. It 1298 will start the next chunk or complete the command. */ 1299 void scsi_req_continue(SCSIRequest *req) 1300 { 1301 if (req->io_canceled) { 1302 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag); 1303 return; 1304 } 1305 trace_scsi_req_continue(req->dev->id, req->lun, req->tag); 1306 if (req->cmd.mode == SCSI_XFER_TO_DEV) { 1307 req->ops->write_data(req); 1308 } else { 1309 req->ops->read_data(req); 1310 } 1311 } 1312 1313 /* Called by the devices when data is ready for the HBA. The HBA should 1314 start a DMA operation to read or fill the device's data buffer. 1315 Once it completes, calling scsi_req_continue will restart I/O. */ 1316 void scsi_req_data(SCSIRequest *req, int len) 1317 { 1318 uint8_t *buf; 1319 if (req->io_canceled) { 1320 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len); 1321 return; 1322 } 1323 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len); 1324 assert(req->cmd.mode != SCSI_XFER_NONE); 1325 if (!req->sg) { 1326 req->resid -= len; 1327 req->bus->info->transfer_data(req, len); 1328 return; 1329 } 1330 1331 /* If the device calls scsi_req_data and the HBA specified a 1332 * scatter/gather list, the transfer has to happen in a single 1333 * step. */ 1334 assert(!req->dma_started); 1335 req->dma_started = true; 1336 1337 buf = scsi_req_get_buf(req); 1338 if (req->cmd.mode == SCSI_XFER_FROM_DEV) { 1339 req->resid = dma_buf_read(buf, len, req->sg); 1340 } else { 1341 req->resid = dma_buf_write(buf, len, req->sg); 1342 } 1343 scsi_req_continue(req); 1344 } 1345 1346 void scsi_req_print(SCSIRequest *req) 1347 { 1348 FILE *fp = stderr; 1349 int i; 1350 1351 fprintf(fp, "[%s id=%d] %s", 1352 req->dev->qdev.parent_bus->name, 1353 req->dev->id, 1354 scsi_command_name(req->cmd.buf[0])); 1355 for (i = 1; i < req->cmd.len; i++) { 1356 fprintf(fp, " 0x%02x", req->cmd.buf[i]); 1357 } 1358 switch (req->cmd.mode) { 1359 case SCSI_XFER_NONE: 1360 fprintf(fp, " - none\n"); 1361 break; 1362 case SCSI_XFER_FROM_DEV: 1363 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer); 1364 break; 1365 case SCSI_XFER_TO_DEV: 1366 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer); 1367 break; 1368 default: 1369 fprintf(fp, " - Oops\n"); 1370 break; 1371 } 1372 } 1373 1374 void scsi_req_complete(SCSIRequest *req, int status) 1375 { 1376 assert(req->status == -1); 1377 req->status = status; 1378 1379 assert(req->sense_len <= sizeof(req->sense)); 1380 if (status == GOOD) { 1381 req->sense_len = 0; 1382 } 1383 1384 if (req->sense_len) { 1385 memcpy(req->dev->sense, req->sense, req->sense_len); 1386 req->dev->sense_len = req->sense_len; 1387 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention); 1388 } else { 1389 req->dev->sense_len = 0; 1390 req->dev->sense_is_ua = false; 1391 } 1392 1393 /* 1394 * Unit attention state is now stored in the device's sense buffer 1395 * if the HBA didn't do autosense. Clear the pending unit attention 1396 * flags. 1397 */ 1398 scsi_clear_unit_attention(req); 1399 1400 scsi_req_ref(req); 1401 scsi_req_dequeue(req); 1402 req->bus->info->complete(req, req->status, req->resid); 1403 1404 /* Cancelled requests might end up being completed instead of cancelled */ 1405 notifier_list_notify(&req->cancel_notifiers, req); 1406 scsi_req_unref(req); 1407 } 1408 1409 /* Called by the devices when the request is canceled. */ 1410 void scsi_req_cancel_complete(SCSIRequest *req) 1411 { 1412 assert(req->io_canceled); 1413 if (req->bus->info->cancel) { 1414 req->bus->info->cancel(req); 1415 } 1416 notifier_list_notify(&req->cancel_notifiers, req); 1417 scsi_req_unref(req); 1418 } 1419 1420 /* Cancel @req asynchronously. @notifier is added to @req's cancellation 1421 * notifier list, the bus will be notified the requests cancellation is 1422 * completed. 1423 * */ 1424 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier) 1425 { 1426 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1427 if (notifier) { 1428 notifier_list_add(&req->cancel_notifiers, notifier); 1429 } 1430 if (req->io_canceled) { 1431 /* A blk_aio_cancel_async is pending; when it finishes, 1432 * scsi_req_cancel_complete will be called and will 1433 * call the notifier we just added. Just wait for that. 1434 */ 1435 assert(req->aiocb); 1436 return; 1437 } 1438 /* Dropped in scsi_req_cancel_complete. */ 1439 scsi_req_ref(req); 1440 scsi_req_dequeue(req); 1441 req->io_canceled = true; 1442 if (req->aiocb) { 1443 blk_aio_cancel_async(req->aiocb); 1444 } else { 1445 scsi_req_cancel_complete(req); 1446 } 1447 } 1448 1449 void scsi_req_cancel(SCSIRequest *req) 1450 { 1451 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1452 if (!req->enqueued) { 1453 return; 1454 } 1455 assert(!req->io_canceled); 1456 /* Dropped in scsi_req_cancel_complete. */ 1457 scsi_req_ref(req); 1458 scsi_req_dequeue(req); 1459 req->io_canceled = true; 1460 if (req->aiocb) { 1461 blk_aio_cancel(req->aiocb); 1462 } else { 1463 scsi_req_cancel_complete(req); 1464 } 1465 } 1466 1467 static int scsi_ua_precedence(SCSISense sense) 1468 { 1469 if (sense.key != UNIT_ATTENTION) { 1470 return INT_MAX; 1471 } 1472 if (sense.asc == 0x29 && sense.ascq == 0x04) { 1473 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */ 1474 return 1; 1475 } else if (sense.asc == 0x3F && sense.ascq == 0x01) { 1476 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */ 1477 return 2; 1478 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) { 1479 /* These two go with "all others". */ 1480 ; 1481 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) { 1482 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0 1483 * POWER ON OCCURRED = 1 1484 * SCSI BUS RESET OCCURRED = 2 1485 * BUS DEVICE RESET FUNCTION OCCURRED = 3 1486 * I_T NEXUS LOSS OCCURRED = 7 1487 */ 1488 return sense.ascq; 1489 } else if (sense.asc == 0x2F && sense.ascq == 0x01) { 1490 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */ 1491 return 8; 1492 } 1493 return (sense.asc << 8) | sense.ascq; 1494 } 1495 1496 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense) 1497 { 1498 int prec1, prec2; 1499 if (sense.key != UNIT_ATTENTION) { 1500 return; 1501 } 1502 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key, 1503 sense.asc, sense.ascq); 1504 1505 /* 1506 * Override a pre-existing unit attention condition, except for a more 1507 * important reset condition. 1508 */ 1509 prec1 = scsi_ua_precedence(sdev->unit_attention); 1510 prec2 = scsi_ua_precedence(sense); 1511 if (prec2 < prec1) { 1512 sdev->unit_attention = sense; 1513 } 1514 } 1515 1516 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense) 1517 { 1518 SCSIRequest *req; 1519 1520 aio_context_acquire(blk_get_aio_context(sdev->conf.blk)); 1521 while (!QTAILQ_EMPTY(&sdev->requests)) { 1522 req = QTAILQ_FIRST(&sdev->requests); 1523 scsi_req_cancel_async(req, NULL); 1524 } 1525 blk_drain(sdev->conf.blk); 1526 aio_context_release(blk_get_aio_context(sdev->conf.blk)); 1527 scsi_device_set_ua(sdev, sense); 1528 } 1529 1530 static char *scsibus_get_dev_path(DeviceState *dev) 1531 { 1532 SCSIDevice *d = SCSI_DEVICE(dev); 1533 DeviceState *hba = dev->parent_bus->parent; 1534 char *id; 1535 char *path; 1536 1537 id = qdev_get_dev_path(hba); 1538 if (id) { 1539 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun); 1540 } else { 1541 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun); 1542 } 1543 g_free(id); 1544 return path; 1545 } 1546 1547 static char *scsibus_get_fw_dev_path(DeviceState *dev) 1548 { 1549 SCSIDevice *d = SCSI_DEVICE(dev); 1550 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel, 1551 qdev_fw_name(dev), d->id, d->lun); 1552 } 1553 1554 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun) 1555 { 1556 BusChild *kid; 1557 SCSIDevice *target_dev = NULL; 1558 1559 QTAILQ_FOREACH_REVERSE(kid, &bus->qbus.children, sibling) { 1560 DeviceState *qdev = kid->child; 1561 SCSIDevice *dev = SCSI_DEVICE(qdev); 1562 1563 if (dev->channel == channel && dev->id == id) { 1564 if (dev->lun == lun) { 1565 return dev; 1566 } 1567 target_dev = dev; 1568 } 1569 } 1570 return target_dev; 1571 } 1572 1573 /* SCSI request list. For simplicity, pv points to the whole device */ 1574 1575 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size, 1576 const VMStateField *field, QJSON *vmdesc) 1577 { 1578 SCSIDevice *s = pv; 1579 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1580 SCSIRequest *req; 1581 1582 QTAILQ_FOREACH(req, &s->requests, next) { 1583 assert(!req->io_canceled); 1584 assert(req->status == -1); 1585 assert(req->enqueued); 1586 1587 qemu_put_sbyte(f, req->retry ? 1 : 2); 1588 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); 1589 qemu_put_be32s(f, &req->tag); 1590 qemu_put_be32s(f, &req->lun); 1591 if (bus->info->save_request) { 1592 bus->info->save_request(f, req); 1593 } 1594 if (req->ops->save_request) { 1595 req->ops->save_request(f, req); 1596 } 1597 } 1598 qemu_put_sbyte(f, 0); 1599 1600 return 0; 1601 } 1602 1603 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size, 1604 const VMStateField *field) 1605 { 1606 SCSIDevice *s = pv; 1607 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1608 int8_t sbyte; 1609 1610 while ((sbyte = qemu_get_sbyte(f)) > 0) { 1611 uint8_t buf[SCSI_CMD_BUF_SIZE]; 1612 uint32_t tag; 1613 uint32_t lun; 1614 SCSIRequest *req; 1615 1616 qemu_get_buffer(f, buf, sizeof(buf)); 1617 qemu_get_be32s(f, &tag); 1618 qemu_get_be32s(f, &lun); 1619 req = scsi_req_new(s, tag, lun, buf, NULL); 1620 req->retry = (sbyte == 1); 1621 if (bus->info->load_request) { 1622 req->hba_private = bus->info->load_request(f, req); 1623 } 1624 if (req->ops->load_request) { 1625 req->ops->load_request(f, req); 1626 } 1627 1628 /* Just restart it later. */ 1629 scsi_req_enqueue_internal(req); 1630 1631 /* At this point, the request will be kept alive by the reference 1632 * added by scsi_req_enqueue_internal, so we can release our reference. 1633 * The HBA of course will add its own reference in the load_request 1634 * callback if it needs to hold on the SCSIRequest. 1635 */ 1636 scsi_req_unref(req); 1637 } 1638 1639 return 0; 1640 } 1641 1642 static const VMStateInfo vmstate_info_scsi_requests = { 1643 .name = "scsi-requests", 1644 .get = get_scsi_requests, 1645 .put = put_scsi_requests, 1646 }; 1647 1648 static bool scsi_sense_state_needed(void *opaque) 1649 { 1650 SCSIDevice *s = opaque; 1651 1652 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD; 1653 } 1654 1655 static const VMStateDescription vmstate_scsi_sense_state = { 1656 .name = "SCSIDevice/sense", 1657 .version_id = 1, 1658 .minimum_version_id = 1, 1659 .needed = scsi_sense_state_needed, 1660 .fields = (VMStateField[]) { 1661 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 1662 SCSI_SENSE_BUF_SIZE_OLD, 1663 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD), 1664 VMSTATE_END_OF_LIST() 1665 } 1666 }; 1667 1668 const VMStateDescription vmstate_scsi_device = { 1669 .name = "SCSIDevice", 1670 .version_id = 1, 1671 .minimum_version_id = 1, 1672 .fields = (VMStateField[]) { 1673 VMSTATE_UINT8(unit_attention.key, SCSIDevice), 1674 VMSTATE_UINT8(unit_attention.asc, SCSIDevice), 1675 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice), 1676 VMSTATE_BOOL(sense_is_ua, SCSIDevice), 1677 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD), 1678 VMSTATE_UINT32(sense_len, SCSIDevice), 1679 { 1680 .name = "requests", 1681 .version_id = 0, 1682 .field_exists = NULL, 1683 .size = 0, /* ouch */ 1684 .info = &vmstate_info_scsi_requests, 1685 .flags = VMS_SINGLE, 1686 .offset = 0, 1687 }, 1688 VMSTATE_END_OF_LIST() 1689 }, 1690 .subsections = (const VMStateDescription*[]) { 1691 &vmstate_scsi_sense_state, 1692 NULL 1693 } 1694 }; 1695 1696 static void scsi_device_class_init(ObjectClass *klass, void *data) 1697 { 1698 DeviceClass *k = DEVICE_CLASS(klass); 1699 set_bit(DEVICE_CATEGORY_STORAGE, k->categories); 1700 k->bus_type = TYPE_SCSI_BUS; 1701 k->realize = scsi_qdev_realize; 1702 k->unrealize = scsi_qdev_unrealize; 1703 k->props = scsi_props; 1704 } 1705 1706 static void scsi_dev_instance_init(Object *obj) 1707 { 1708 DeviceState *dev = DEVICE(obj); 1709 SCSIDevice *s = SCSI_DEVICE(dev); 1710 1711 device_add_bootindex_property(obj, &s->conf.bootindex, 1712 "bootindex", NULL, 1713 &s->qdev, NULL); 1714 } 1715 1716 static const TypeInfo scsi_device_type_info = { 1717 .name = TYPE_SCSI_DEVICE, 1718 .parent = TYPE_DEVICE, 1719 .instance_size = sizeof(SCSIDevice), 1720 .abstract = true, 1721 .class_size = sizeof(SCSIDeviceClass), 1722 .class_init = scsi_device_class_init, 1723 .instance_init = scsi_dev_instance_init, 1724 }; 1725 1726 static void scsi_register_types(void) 1727 { 1728 type_register_static(&scsi_bus_info); 1729 type_register_static(&scsi_device_type_info); 1730 } 1731 1732 type_init(scsi_register_types) 1733