1 #include "qemu/osdep.h" 2 #include "hw/hw.h" 3 #include "qapi/error.h" 4 #include "qemu/error-report.h" 5 #include "qemu/option.h" 6 #include "hw/scsi/scsi.h" 7 #include "scsi/constants.h" 8 #include "hw/qdev.h" 9 #include "sysemu/block-backend.h" 10 #include "sysemu/blockdev.h" 11 #include "trace.h" 12 #include "sysemu/dma.h" 13 #include "qemu/cutils.h" 14 15 static char *scsibus_get_dev_path(DeviceState *dev); 16 static char *scsibus_get_fw_dev_path(DeviceState *dev); 17 static void scsi_req_dequeue(SCSIRequest *req); 18 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len); 19 static void scsi_target_free_buf(SCSIRequest *req); 20 21 static Property scsi_props[] = { 22 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0), 23 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1), 24 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1), 25 DEFINE_PROP_END_OF_LIST(), 26 }; 27 28 static void scsi_bus_class_init(ObjectClass *klass, void *data) 29 { 30 BusClass *k = BUS_CLASS(klass); 31 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 32 33 k->get_dev_path = scsibus_get_dev_path; 34 k->get_fw_dev_path = scsibus_get_fw_dev_path; 35 hc->unplug = qdev_simple_device_unplug_cb; 36 } 37 38 static const TypeInfo scsi_bus_info = { 39 .name = TYPE_SCSI_BUS, 40 .parent = TYPE_BUS, 41 .instance_size = sizeof(SCSIBus), 42 .class_init = scsi_bus_class_init, 43 .interfaces = (InterfaceInfo[]) { 44 { TYPE_HOTPLUG_HANDLER }, 45 { } 46 } 47 }; 48 static int next_scsi_bus; 49 50 static void scsi_device_realize(SCSIDevice *s, Error **errp) 51 { 52 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 53 if (sc->realize) { 54 sc->realize(s, errp); 55 } 56 } 57 58 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, 59 void *hba_private) 60 { 61 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 62 int rc; 63 64 assert(cmd->len == 0); 65 rc = scsi_req_parse_cdb(dev, cmd, buf); 66 if (bus->info->parse_cdb) { 67 rc = bus->info->parse_cdb(dev, cmd, buf, hba_private); 68 } 69 return rc; 70 } 71 72 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun, 73 uint8_t *buf, void *hba_private) 74 { 75 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 76 if (sc->alloc_req) { 77 return sc->alloc_req(s, tag, lun, buf, hba_private); 78 } 79 80 return NULL; 81 } 82 83 void scsi_device_unit_attention_reported(SCSIDevice *s) 84 { 85 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 86 if (sc->unit_attention_reported) { 87 sc->unit_attention_reported(s); 88 } 89 } 90 91 /* Create a scsi bus, and attach devices to it. */ 92 void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host, 93 const SCSIBusInfo *info, const char *bus_name) 94 { 95 qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name); 96 bus->busnr = next_scsi_bus++; 97 bus->info = info; 98 qbus_set_bus_hotplug_handler(BUS(bus), &error_abort); 99 } 100 101 static void scsi_dma_restart_bh(void *opaque) 102 { 103 SCSIDevice *s = opaque; 104 SCSIRequest *req, *next; 105 106 qemu_bh_delete(s->bh); 107 s->bh = NULL; 108 109 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 110 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { 111 scsi_req_ref(req); 112 if (req->retry) { 113 req->retry = false; 114 switch (req->cmd.mode) { 115 case SCSI_XFER_FROM_DEV: 116 case SCSI_XFER_TO_DEV: 117 scsi_req_continue(req); 118 break; 119 case SCSI_XFER_NONE: 120 scsi_req_dequeue(req); 121 scsi_req_enqueue(req); 122 break; 123 } 124 } 125 scsi_req_unref(req); 126 } 127 aio_context_release(blk_get_aio_context(s->conf.blk)); 128 } 129 130 void scsi_req_retry(SCSIRequest *req) 131 { 132 /* No need to save a reference, because scsi_dma_restart_bh just 133 * looks at the request list. */ 134 req->retry = true; 135 } 136 137 static void scsi_dma_restart_cb(void *opaque, int running, RunState state) 138 { 139 SCSIDevice *s = opaque; 140 141 if (!running) { 142 return; 143 } 144 if (!s->bh) { 145 AioContext *ctx = blk_get_aio_context(s->conf.blk); 146 s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s); 147 qemu_bh_schedule(s->bh); 148 } 149 } 150 151 static void scsi_qdev_realize(DeviceState *qdev, Error **errp) 152 { 153 SCSIDevice *dev = SCSI_DEVICE(qdev); 154 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 155 SCSIDevice *d; 156 Error *local_err = NULL; 157 158 if (dev->channel > bus->info->max_channel) { 159 error_setg(errp, "bad scsi channel id: %d", dev->channel); 160 return; 161 } 162 if (dev->id != -1 && dev->id > bus->info->max_target) { 163 error_setg(errp, "bad scsi device id: %d", dev->id); 164 return; 165 } 166 if (dev->lun != -1 && dev->lun > bus->info->max_lun) { 167 error_setg(errp, "bad scsi device lun: %d", dev->lun); 168 return; 169 } 170 171 if (dev->id == -1) { 172 int id = -1; 173 if (dev->lun == -1) { 174 dev->lun = 0; 175 } 176 do { 177 d = scsi_device_find(bus, dev->channel, ++id, dev->lun); 178 } while (d && d->lun == dev->lun && id < bus->info->max_target); 179 if (d && d->lun == dev->lun) { 180 error_setg(errp, "no free target"); 181 return; 182 } 183 dev->id = id; 184 } else if (dev->lun == -1) { 185 int lun = -1; 186 do { 187 d = scsi_device_find(bus, dev->channel, dev->id, ++lun); 188 } while (d && d->lun == lun && lun < bus->info->max_lun); 189 if (d && d->lun == lun) { 190 error_setg(errp, "no free lun"); 191 return; 192 } 193 dev->lun = lun; 194 } else { 195 d = scsi_device_find(bus, dev->channel, dev->id, dev->lun); 196 assert(d); 197 if (d->lun == dev->lun && dev != d) { 198 error_setg(errp, "lun already used by '%s'", d->qdev.id); 199 return; 200 } 201 } 202 203 QTAILQ_INIT(&dev->requests); 204 scsi_device_realize(dev, &local_err); 205 if (local_err) { 206 error_propagate(errp, local_err); 207 return; 208 } 209 dev->vmsentry = qemu_add_vm_change_state_handler(scsi_dma_restart_cb, 210 dev); 211 } 212 213 static void scsi_qdev_unrealize(DeviceState *qdev, Error **errp) 214 { 215 SCSIDevice *dev = SCSI_DEVICE(qdev); 216 217 if (dev->vmsentry) { 218 qemu_del_vm_change_state_handler(dev->vmsentry); 219 } 220 221 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE)); 222 blockdev_mark_auto_del(dev->conf.blk); 223 } 224 225 /* handle legacy '-drive if=scsi,...' cmd line args */ 226 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, 227 int unit, bool removable, int bootindex, 228 bool share_rw, 229 const char *serial, Error **errp) 230 { 231 const char *driver; 232 char *name; 233 DeviceState *dev; 234 Error *err = NULL; 235 236 driver = blk_is_sg(blk) ? "scsi-generic" : "scsi-disk"; 237 dev = qdev_create(&bus->qbus, driver); 238 name = g_strdup_printf("legacy[%d]", unit); 239 object_property_add_child(OBJECT(bus), name, OBJECT(dev), NULL); 240 g_free(name); 241 242 qdev_prop_set_uint32(dev, "scsi-id", unit); 243 if (bootindex >= 0) { 244 object_property_set_int(OBJECT(dev), bootindex, "bootindex", 245 &error_abort); 246 } 247 if (object_property_find(OBJECT(dev), "removable", NULL)) { 248 qdev_prop_set_bit(dev, "removable", removable); 249 } 250 if (serial && object_property_find(OBJECT(dev), "serial", NULL)) { 251 qdev_prop_set_string(dev, "serial", serial); 252 } 253 qdev_prop_set_drive(dev, "drive", blk, &err); 254 if (err) { 255 error_propagate(errp, err); 256 object_unparent(OBJECT(dev)); 257 return NULL; 258 } 259 object_property_set_bool(OBJECT(dev), share_rw, "share-rw", &err); 260 if (err != NULL) { 261 error_propagate(errp, err); 262 object_unparent(OBJECT(dev)); 263 return NULL; 264 } 265 object_property_set_bool(OBJECT(dev), true, "realized", &err); 266 if (err != NULL) { 267 error_propagate(errp, err); 268 object_unparent(OBJECT(dev)); 269 return NULL; 270 } 271 return SCSI_DEVICE(dev); 272 } 273 274 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus) 275 { 276 Location loc; 277 DriveInfo *dinfo; 278 int unit; 279 280 loc_push_none(&loc); 281 for (unit = 0; unit <= bus->info->max_target; unit++) { 282 dinfo = drive_get(IF_SCSI, bus->busnr, unit); 283 if (dinfo == NULL) { 284 continue; 285 } 286 qemu_opts_loc_restore(dinfo->opts); 287 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo), 288 unit, false, -1, false, NULL, &error_fatal); 289 } 290 loc_pop(&loc); 291 } 292 293 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf) 294 { 295 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 296 scsi_req_complete(req, CHECK_CONDITION); 297 return 0; 298 } 299 300 static const struct SCSIReqOps reqops_invalid_field = { 301 .size = sizeof(SCSIRequest), 302 .send_command = scsi_invalid_field 303 }; 304 305 /* SCSIReqOps implementation for invalid commands. */ 306 307 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf) 308 { 309 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 310 scsi_req_complete(req, CHECK_CONDITION); 311 return 0; 312 } 313 314 static const struct SCSIReqOps reqops_invalid_opcode = { 315 .size = sizeof(SCSIRequest), 316 .send_command = scsi_invalid_command 317 }; 318 319 /* SCSIReqOps implementation for unit attention conditions. */ 320 321 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf) 322 { 323 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 324 scsi_req_build_sense(req, req->dev->unit_attention); 325 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) { 326 scsi_req_build_sense(req, req->bus->unit_attention); 327 } 328 scsi_req_complete(req, CHECK_CONDITION); 329 return 0; 330 } 331 332 static const struct SCSIReqOps reqops_unit_attention = { 333 .size = sizeof(SCSIRequest), 334 .send_command = scsi_unit_attention 335 }; 336 337 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to 338 an invalid LUN. */ 339 340 typedef struct SCSITargetReq SCSITargetReq; 341 342 struct SCSITargetReq { 343 SCSIRequest req; 344 int len; 345 uint8_t *buf; 346 int buf_len; 347 }; 348 349 static void store_lun(uint8_t *outbuf, int lun) 350 { 351 if (lun < 256) { 352 outbuf[1] = lun; 353 return; 354 } 355 outbuf[1] = (lun & 255); 356 outbuf[0] = (lun >> 8) | 0x40; 357 } 358 359 static bool scsi_target_emulate_report_luns(SCSITargetReq *r) 360 { 361 BusChild *kid; 362 int i, len, n; 363 int channel, id; 364 bool found_lun0; 365 366 if (r->req.cmd.xfer < 16) { 367 return false; 368 } 369 if (r->req.cmd.buf[2] > 2) { 370 return false; 371 } 372 channel = r->req.dev->channel; 373 id = r->req.dev->id; 374 found_lun0 = false; 375 n = 0; 376 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) { 377 DeviceState *qdev = kid->child; 378 SCSIDevice *dev = SCSI_DEVICE(qdev); 379 380 if (dev->channel == channel && dev->id == id) { 381 if (dev->lun == 0) { 382 found_lun0 = true; 383 } 384 n += 8; 385 } 386 } 387 if (!found_lun0) { 388 n += 8; 389 } 390 391 scsi_target_alloc_buf(&r->req, n + 8); 392 393 len = MIN(n + 8, r->req.cmd.xfer & ~7); 394 memset(r->buf, 0, len); 395 stl_be_p(&r->buf[0], n); 396 i = found_lun0 ? 8 : 16; 397 QTAILQ_FOREACH(kid, &r->req.bus->qbus.children, sibling) { 398 DeviceState *qdev = kid->child; 399 SCSIDevice *dev = SCSI_DEVICE(qdev); 400 401 if (dev->channel == channel && dev->id == id) { 402 store_lun(&r->buf[i], dev->lun); 403 i += 8; 404 } 405 } 406 assert(i == n + 8); 407 r->len = len; 408 return true; 409 } 410 411 static bool scsi_target_emulate_inquiry(SCSITargetReq *r) 412 { 413 assert(r->req.dev->lun != r->req.lun); 414 415 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN); 416 417 if (r->req.cmd.buf[1] & 0x2) { 418 /* Command support data - optional, not implemented */ 419 return false; 420 } 421 422 if (r->req.cmd.buf[1] & 0x1) { 423 /* Vital product data */ 424 uint8_t page_code = r->req.cmd.buf[2]; 425 r->buf[r->len++] = page_code ; /* this page */ 426 r->buf[r->len++] = 0x00; 427 428 switch (page_code) { 429 case 0x00: /* Supported page codes, mandatory */ 430 { 431 int pages; 432 pages = r->len++; 433 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */ 434 r->buf[pages] = r->len - pages - 1; /* number of pages */ 435 break; 436 } 437 default: 438 return false; 439 } 440 /* done with EVPD */ 441 assert(r->len < r->buf_len); 442 r->len = MIN(r->req.cmd.xfer, r->len); 443 return true; 444 } 445 446 /* Standard INQUIRY data */ 447 if (r->req.cmd.buf[2] != 0) { 448 return false; 449 } 450 451 /* PAGE CODE == 0 */ 452 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN); 453 memset(r->buf, 0, r->len); 454 if (r->req.lun != 0) { 455 r->buf[0] = TYPE_NO_LUN; 456 } else { 457 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE; 458 r->buf[2] = 5; /* Version */ 459 r->buf[3] = 2 | 0x10; /* HiSup, response data format */ 460 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */ 461 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */ 462 memcpy(&r->buf[8], "QEMU ", 8); 463 memcpy(&r->buf[16], "QEMU TARGET ", 16); 464 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version()); 465 } 466 return true; 467 } 468 469 static size_t scsi_sense_len(SCSIRequest *req) 470 { 471 if (req->dev->type == TYPE_SCANNER) 472 return SCSI_SENSE_LEN_SCANNER; 473 else 474 return SCSI_SENSE_LEN; 475 } 476 477 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf) 478 { 479 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 480 int fixed_sense = (req->cmd.buf[1] & 1) == 0; 481 482 if (req->lun != 0 && 483 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) { 484 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED)); 485 scsi_req_complete(req, CHECK_CONDITION); 486 return 0; 487 } 488 switch (buf[0]) { 489 case REPORT_LUNS: 490 if (!scsi_target_emulate_report_luns(r)) { 491 goto illegal_request; 492 } 493 break; 494 case INQUIRY: 495 if (!scsi_target_emulate_inquiry(r)) { 496 goto illegal_request; 497 } 498 break; 499 case REQUEST_SENSE: 500 scsi_target_alloc_buf(&r->req, scsi_sense_len(req)); 501 if (req->lun != 0) { 502 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED); 503 504 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer, 505 sense, fixed_sense); 506 } else { 507 r->len = scsi_device_get_sense(r->req.dev, r->buf, 508 MIN(req->cmd.xfer, r->buf_len), 509 fixed_sense); 510 } 511 if (r->req.dev->sense_is_ua) { 512 scsi_device_unit_attention_reported(req->dev); 513 r->req.dev->sense_len = 0; 514 r->req.dev->sense_is_ua = false; 515 } 516 break; 517 case TEST_UNIT_READY: 518 break; 519 default: 520 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 521 scsi_req_complete(req, CHECK_CONDITION); 522 return 0; 523 illegal_request: 524 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 525 scsi_req_complete(req, CHECK_CONDITION); 526 return 0; 527 } 528 529 if (!r->len) { 530 scsi_req_complete(req, GOOD); 531 } 532 return r->len; 533 } 534 535 static void scsi_target_read_data(SCSIRequest *req) 536 { 537 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 538 uint32_t n; 539 540 n = r->len; 541 if (n > 0) { 542 r->len = 0; 543 scsi_req_data(&r->req, n); 544 } else { 545 scsi_req_complete(&r->req, GOOD); 546 } 547 } 548 549 static uint8_t *scsi_target_get_buf(SCSIRequest *req) 550 { 551 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 552 553 return r->buf; 554 } 555 556 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len) 557 { 558 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 559 560 r->buf = g_malloc(len); 561 r->buf_len = len; 562 563 return r->buf; 564 } 565 566 static void scsi_target_free_buf(SCSIRequest *req) 567 { 568 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 569 570 g_free(r->buf); 571 } 572 573 static const struct SCSIReqOps reqops_target_command = { 574 .size = sizeof(SCSITargetReq), 575 .send_command = scsi_target_send_command, 576 .read_data = scsi_target_read_data, 577 .get_buf = scsi_target_get_buf, 578 .free_req = scsi_target_free_buf, 579 }; 580 581 582 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d, 583 uint32_t tag, uint32_t lun, void *hba_private) 584 { 585 SCSIRequest *req; 586 SCSIBus *bus = scsi_bus_from_device(d); 587 BusState *qbus = BUS(bus); 588 const int memset_off = offsetof(SCSIRequest, sense) 589 + sizeof(req->sense); 590 591 req = g_malloc(reqops->size); 592 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off); 593 req->refcount = 1; 594 req->bus = bus; 595 req->dev = d; 596 req->tag = tag; 597 req->lun = lun; 598 req->hba_private = hba_private; 599 req->status = -1; 600 req->ops = reqops; 601 object_ref(OBJECT(d)); 602 object_ref(OBJECT(qbus->parent)); 603 notifier_list_init(&req->cancel_notifiers); 604 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag); 605 return req; 606 } 607 608 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, 609 uint8_t *buf, void *hba_private) 610 { 611 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus); 612 const SCSIReqOps *ops; 613 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d); 614 SCSIRequest *req; 615 SCSICommand cmd = { .len = 0 }; 616 int ret; 617 618 if ((d->unit_attention.key == UNIT_ATTENTION || 619 bus->unit_attention.key == UNIT_ATTENTION) && 620 (buf[0] != INQUIRY && 621 buf[0] != REPORT_LUNS && 622 buf[0] != GET_CONFIGURATION && 623 buf[0] != GET_EVENT_STATUS_NOTIFICATION && 624 625 /* 626 * If we already have a pending unit attention condition, 627 * report this one before triggering another one. 628 */ 629 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) { 630 ops = &reqops_unit_attention; 631 } else if (lun != d->lun || 632 buf[0] == REPORT_LUNS || 633 (buf[0] == REQUEST_SENSE && d->sense_len)) { 634 ops = &reqops_target_command; 635 } else { 636 ops = NULL; 637 } 638 639 if (ops != NULL || !sc->parse_cdb) { 640 ret = scsi_req_parse_cdb(d, &cmd, buf); 641 } else { 642 ret = sc->parse_cdb(d, &cmd, buf, hba_private); 643 } 644 645 if (ret != 0) { 646 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]); 647 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private); 648 } else { 649 assert(cmd.len != 0); 650 trace_scsi_req_parsed(d->id, lun, tag, buf[0], 651 cmd.mode, cmd.xfer); 652 if (cmd.lba != -1) { 653 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0], 654 cmd.lba); 655 } 656 657 if (cmd.xfer > INT32_MAX) { 658 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private); 659 } else if (ops) { 660 req = scsi_req_alloc(ops, d, tag, lun, hba_private); 661 } else { 662 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private); 663 } 664 } 665 666 req->cmd = cmd; 667 req->resid = req->cmd.xfer; 668 669 switch (buf[0]) { 670 case INQUIRY: 671 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]); 672 break; 673 case TEST_UNIT_READY: 674 trace_scsi_test_unit_ready(d->id, lun, tag); 675 break; 676 case REPORT_LUNS: 677 trace_scsi_report_luns(d->id, lun, tag); 678 break; 679 case REQUEST_SENSE: 680 trace_scsi_request_sense(d->id, lun, tag); 681 break; 682 default: 683 break; 684 } 685 686 return req; 687 } 688 689 uint8_t *scsi_req_get_buf(SCSIRequest *req) 690 { 691 return req->ops->get_buf(req); 692 } 693 694 static void scsi_clear_unit_attention(SCSIRequest *req) 695 { 696 SCSISense *ua; 697 if (req->dev->unit_attention.key != UNIT_ATTENTION && 698 req->bus->unit_attention.key != UNIT_ATTENTION) { 699 return; 700 } 701 702 /* 703 * If an INQUIRY command enters the enabled command state, 704 * the device server shall [not] clear any unit attention condition; 705 * See also MMC-6, paragraphs 6.5 and 6.6.2. 706 */ 707 if (req->cmd.buf[0] == INQUIRY || 708 req->cmd.buf[0] == GET_CONFIGURATION || 709 req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) { 710 return; 711 } 712 713 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 714 ua = &req->dev->unit_attention; 715 } else { 716 ua = &req->bus->unit_attention; 717 } 718 719 /* 720 * If a REPORT LUNS command enters the enabled command state, [...] 721 * the device server shall clear any pending unit attention condition 722 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED. 723 */ 724 if (req->cmd.buf[0] == REPORT_LUNS && 725 !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc && 726 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) { 727 return; 728 } 729 730 *ua = SENSE_CODE(NO_SENSE); 731 } 732 733 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len) 734 { 735 int ret; 736 737 assert(len >= 14); 738 if (!req->sense_len) { 739 return 0; 740 } 741 742 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true); 743 744 /* 745 * FIXME: clearing unit attention conditions upon autosense should be done 746 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b 747 * (SAM-5, 5.14). 748 * 749 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and 750 * 10b for HBAs that do not support it (do not call scsi_req_get_sense). 751 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b. 752 */ 753 if (req->dev->sense_is_ua) { 754 scsi_device_unit_attention_reported(req->dev); 755 req->dev->sense_len = 0; 756 req->dev->sense_is_ua = false; 757 } 758 return ret; 759 } 760 761 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed) 762 { 763 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed); 764 } 765 766 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense) 767 { 768 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag, 769 sense.key, sense.asc, sense.ascq); 770 req->sense_len = scsi_build_sense(req->sense, sense); 771 } 772 773 static void scsi_req_enqueue_internal(SCSIRequest *req) 774 { 775 assert(!req->enqueued); 776 scsi_req_ref(req); 777 if (req->bus->info->get_sg_list) { 778 req->sg = req->bus->info->get_sg_list(req); 779 } else { 780 req->sg = NULL; 781 } 782 req->enqueued = true; 783 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next); 784 } 785 786 int32_t scsi_req_enqueue(SCSIRequest *req) 787 { 788 int32_t rc; 789 790 assert(!req->retry); 791 scsi_req_enqueue_internal(req); 792 scsi_req_ref(req); 793 rc = req->ops->send_command(req, req->cmd.buf); 794 scsi_req_unref(req); 795 return rc; 796 } 797 798 static void scsi_req_dequeue(SCSIRequest *req) 799 { 800 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag); 801 req->retry = false; 802 if (req->enqueued) { 803 QTAILQ_REMOVE(&req->dev->requests, req, next); 804 req->enqueued = false; 805 scsi_req_unref(req); 806 } 807 } 808 809 static int scsi_get_performance_length(int num_desc, int type, int data_type) 810 { 811 /* MMC-6, paragraph 6.7. */ 812 switch (type) { 813 case 0: 814 if ((data_type & 3) == 0) { 815 /* Each descriptor is as in Table 295 - Nominal performance. */ 816 return 16 * num_desc + 8; 817 } else { 818 /* Each descriptor is as in Table 296 - Exceptions. */ 819 return 6 * num_desc + 8; 820 } 821 case 1: 822 case 4: 823 case 5: 824 return 8 * num_desc + 8; 825 case 2: 826 return 2048 * num_desc + 8; 827 case 3: 828 return 16 * num_desc + 8; 829 default: 830 return 8; 831 } 832 } 833 834 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf) 835 { 836 int byte_block = (buf[2] >> 2) & 0x1; 837 int type = (buf[2] >> 4) & 0x1; 838 int xfer_unit; 839 840 if (byte_block) { 841 if (type) { 842 xfer_unit = dev->blocksize; 843 } else { 844 xfer_unit = 512; 845 } 846 } else { 847 xfer_unit = 1; 848 } 849 850 return xfer_unit; 851 } 852 853 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf) 854 { 855 int length = buf[2] & 0x3; 856 int xfer; 857 int unit = ata_passthrough_xfer_unit(dev, buf); 858 859 switch (length) { 860 case 0: 861 case 3: /* USB-specific. */ 862 default: 863 xfer = 0; 864 break; 865 case 1: 866 xfer = buf[3]; 867 break; 868 case 2: 869 xfer = buf[4]; 870 break; 871 } 872 873 return xfer * unit; 874 } 875 876 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf) 877 { 878 int extend = buf[1] & 0x1; 879 int length = buf[2] & 0x3; 880 int xfer; 881 int unit = ata_passthrough_xfer_unit(dev, buf); 882 883 switch (length) { 884 case 0: 885 case 3: /* USB-specific. */ 886 default: 887 xfer = 0; 888 break; 889 case 1: 890 xfer = buf[4]; 891 xfer |= (extend ? buf[3] << 8 : 0); 892 break; 893 case 2: 894 xfer = buf[6]; 895 xfer |= (extend ? buf[5] << 8 : 0); 896 break; 897 } 898 899 return xfer * unit; 900 } 901 902 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 903 { 904 cmd->xfer = scsi_cdb_xfer(buf); 905 switch (buf[0]) { 906 case TEST_UNIT_READY: 907 case REWIND: 908 case START_STOP: 909 case SET_CAPACITY: 910 case WRITE_FILEMARKS: 911 case WRITE_FILEMARKS_16: 912 case SPACE: 913 case RESERVE: 914 case RELEASE: 915 case ERASE: 916 case ALLOW_MEDIUM_REMOVAL: 917 case SEEK_10: 918 case SYNCHRONIZE_CACHE: 919 case SYNCHRONIZE_CACHE_16: 920 case LOCATE_16: 921 case LOCK_UNLOCK_CACHE: 922 case SET_CD_SPEED: 923 case SET_LIMITS: 924 case WRITE_LONG_10: 925 case UPDATE_BLOCK: 926 case RESERVE_TRACK: 927 case SET_READ_AHEAD: 928 case PRE_FETCH: 929 case PRE_FETCH_16: 930 case ALLOW_OVERWRITE: 931 cmd->xfer = 0; 932 break; 933 case VERIFY_10: 934 case VERIFY_12: 935 case VERIFY_16: 936 if ((buf[1] & 2) == 0) { 937 cmd->xfer = 0; 938 } else if ((buf[1] & 4) != 0) { 939 cmd->xfer = 1; 940 } 941 cmd->xfer *= dev->blocksize; 942 break; 943 case MODE_SENSE: 944 break; 945 case WRITE_SAME_10: 946 case WRITE_SAME_16: 947 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize; 948 break; 949 case READ_CAPACITY_10: 950 cmd->xfer = 8; 951 break; 952 case READ_BLOCK_LIMITS: 953 cmd->xfer = 6; 954 break; 955 case SEND_VOLUME_TAG: 956 /* GPCMD_SET_STREAMING from multimedia commands. */ 957 if (dev->type == TYPE_ROM) { 958 cmd->xfer = buf[10] | (buf[9] << 8); 959 } else { 960 cmd->xfer = buf[9] | (buf[8] << 8); 961 } 962 break; 963 case WRITE_6: 964 /* length 0 means 256 blocks */ 965 if (cmd->xfer == 0) { 966 cmd->xfer = 256; 967 } 968 /* fall through */ 969 case WRITE_10: 970 case WRITE_VERIFY_10: 971 case WRITE_12: 972 case WRITE_VERIFY_12: 973 case WRITE_16: 974 case WRITE_VERIFY_16: 975 cmd->xfer *= dev->blocksize; 976 break; 977 case READ_6: 978 case READ_REVERSE: 979 /* length 0 means 256 blocks */ 980 if (cmd->xfer == 0) { 981 cmd->xfer = 256; 982 } 983 /* fall through */ 984 case READ_10: 985 case READ_12: 986 case READ_16: 987 cmd->xfer *= dev->blocksize; 988 break; 989 case FORMAT_UNIT: 990 /* MMC mandates the parameter list to be 12-bytes long. Parameters 991 * for block devices are restricted to the header right now. */ 992 if (dev->type == TYPE_ROM && (buf[1] & 16)) { 993 cmd->xfer = 12; 994 } else { 995 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4); 996 } 997 break; 998 case INQUIRY: 999 case RECEIVE_DIAGNOSTIC: 1000 case SEND_DIAGNOSTIC: 1001 cmd->xfer = buf[4] | (buf[3] << 8); 1002 break; 1003 case READ_CD: 1004 case READ_BUFFER: 1005 case WRITE_BUFFER: 1006 case SEND_CUE_SHEET: 1007 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1008 break; 1009 case PERSISTENT_RESERVE_OUT: 1010 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL; 1011 break; 1012 case ERASE_12: 1013 if (dev->type == TYPE_ROM) { 1014 /* MMC command GET PERFORMANCE. */ 1015 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8), 1016 buf[10], buf[1] & 0x1f); 1017 } 1018 break; 1019 case MECHANISM_STATUS: 1020 case READ_DVD_STRUCTURE: 1021 case SEND_DVD_STRUCTURE: 1022 case MAINTENANCE_OUT: 1023 case MAINTENANCE_IN: 1024 if (dev->type == TYPE_ROM) { 1025 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */ 1026 cmd->xfer = buf[9] | (buf[8] << 8); 1027 } 1028 break; 1029 case ATA_PASSTHROUGH_12: 1030 if (dev->type == TYPE_ROM) { 1031 /* BLANK command of MMC */ 1032 cmd->xfer = 0; 1033 } else { 1034 cmd->xfer = ata_passthrough_12_xfer(dev, buf); 1035 } 1036 break; 1037 case ATA_PASSTHROUGH_16: 1038 cmd->xfer = ata_passthrough_16_xfer(dev, buf); 1039 break; 1040 } 1041 return 0; 1042 } 1043 1044 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1045 { 1046 switch (buf[0]) { 1047 /* stream commands */ 1048 case ERASE_12: 1049 case ERASE_16: 1050 cmd->xfer = 0; 1051 break; 1052 case READ_6: 1053 case READ_REVERSE: 1054 case RECOVER_BUFFERED_DATA: 1055 case WRITE_6: 1056 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16); 1057 if (buf[1] & 0x01) { /* fixed */ 1058 cmd->xfer *= dev->blocksize; 1059 } 1060 break; 1061 case READ_16: 1062 case READ_REVERSE_16: 1063 case VERIFY_16: 1064 case WRITE_16: 1065 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16); 1066 if (buf[1] & 0x01) { /* fixed */ 1067 cmd->xfer *= dev->blocksize; 1068 } 1069 break; 1070 case REWIND: 1071 case LOAD_UNLOAD: 1072 cmd->xfer = 0; 1073 break; 1074 case SPACE_16: 1075 cmd->xfer = buf[13] | (buf[12] << 8); 1076 break; 1077 case READ_POSITION: 1078 switch (buf[1] & 0x1f) /* operation code */ { 1079 case SHORT_FORM_BLOCK_ID: 1080 case SHORT_FORM_VENDOR_SPECIFIC: 1081 cmd->xfer = 20; 1082 break; 1083 case LONG_FORM: 1084 cmd->xfer = 32; 1085 break; 1086 case EXTENDED_FORM: 1087 cmd->xfer = buf[8] | (buf[7] << 8); 1088 break; 1089 default: 1090 return -1; 1091 } 1092 1093 break; 1094 case FORMAT_UNIT: 1095 cmd->xfer = buf[4] | (buf[3] << 8); 1096 break; 1097 /* generic commands */ 1098 default: 1099 return scsi_req_xfer(cmd, dev, buf); 1100 } 1101 return 0; 1102 } 1103 1104 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1105 { 1106 switch (buf[0]) { 1107 /* medium changer commands */ 1108 case EXCHANGE_MEDIUM: 1109 case INITIALIZE_ELEMENT_STATUS: 1110 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE: 1111 case MOVE_MEDIUM: 1112 case POSITION_TO_ELEMENT: 1113 cmd->xfer = 0; 1114 break; 1115 case READ_ELEMENT_STATUS: 1116 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16); 1117 break; 1118 1119 /* generic commands */ 1120 default: 1121 return scsi_req_xfer(cmd, dev, buf); 1122 } 1123 return 0; 1124 } 1125 1126 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1127 { 1128 switch (buf[0]) { 1129 /* Scanner commands */ 1130 case OBJECT_POSITION: 1131 cmd->xfer = 0; 1132 break; 1133 case SCAN: 1134 cmd->xfer = buf[4]; 1135 break; 1136 case READ_10: 1137 case SEND: 1138 case GET_WINDOW: 1139 case SET_WINDOW: 1140 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1141 break; 1142 default: 1143 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */ 1144 return scsi_req_xfer(cmd, dev, buf); 1145 } 1146 1147 return 0; 1148 } 1149 1150 static void scsi_cmd_xfer_mode(SCSICommand *cmd) 1151 { 1152 if (!cmd->xfer) { 1153 cmd->mode = SCSI_XFER_NONE; 1154 return; 1155 } 1156 switch (cmd->buf[0]) { 1157 case WRITE_6: 1158 case WRITE_10: 1159 case WRITE_VERIFY_10: 1160 case WRITE_12: 1161 case WRITE_VERIFY_12: 1162 case WRITE_16: 1163 case WRITE_VERIFY_16: 1164 case VERIFY_10: 1165 case VERIFY_12: 1166 case VERIFY_16: 1167 case COPY: 1168 case COPY_VERIFY: 1169 case COMPARE: 1170 case CHANGE_DEFINITION: 1171 case LOG_SELECT: 1172 case MODE_SELECT: 1173 case MODE_SELECT_10: 1174 case SEND_DIAGNOSTIC: 1175 case WRITE_BUFFER: 1176 case FORMAT_UNIT: 1177 case REASSIGN_BLOCKS: 1178 case SEARCH_EQUAL: 1179 case SEARCH_HIGH: 1180 case SEARCH_LOW: 1181 case UPDATE_BLOCK: 1182 case WRITE_LONG_10: 1183 case WRITE_SAME_10: 1184 case WRITE_SAME_16: 1185 case UNMAP: 1186 case SEARCH_HIGH_12: 1187 case SEARCH_EQUAL_12: 1188 case SEARCH_LOW_12: 1189 case MEDIUM_SCAN: 1190 case SEND_VOLUME_TAG: 1191 case SEND_CUE_SHEET: 1192 case SEND_DVD_STRUCTURE: 1193 case PERSISTENT_RESERVE_OUT: 1194 case MAINTENANCE_OUT: 1195 case SET_WINDOW: 1196 case SCAN: 1197 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for 1198 * non-scanner devices, so we only get here for SCAN and not for START_STOP. 1199 */ 1200 cmd->mode = SCSI_XFER_TO_DEV; 1201 break; 1202 case ATA_PASSTHROUGH_12: 1203 case ATA_PASSTHROUGH_16: 1204 /* T_DIR */ 1205 cmd->mode = (cmd->buf[2] & 0x8) ? 1206 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV; 1207 break; 1208 default: 1209 cmd->mode = SCSI_XFER_FROM_DEV; 1210 break; 1211 } 1212 } 1213 1214 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf) 1215 { 1216 int rc; 1217 int len; 1218 1219 cmd->lba = -1; 1220 len = scsi_cdb_length(buf); 1221 if (len < 0) { 1222 return -1; 1223 } 1224 1225 cmd->len = len; 1226 switch (dev->type) { 1227 case TYPE_TAPE: 1228 rc = scsi_req_stream_xfer(cmd, dev, buf); 1229 break; 1230 case TYPE_MEDIUM_CHANGER: 1231 rc = scsi_req_medium_changer_xfer(cmd, dev, buf); 1232 break; 1233 case TYPE_SCANNER: 1234 rc = scsi_req_scanner_length(cmd, dev, buf); 1235 break; 1236 default: 1237 rc = scsi_req_xfer(cmd, dev, buf); 1238 break; 1239 } 1240 1241 if (rc != 0) 1242 return rc; 1243 1244 memcpy(cmd->buf, buf, cmd->len); 1245 scsi_cmd_xfer_mode(cmd); 1246 cmd->lba = scsi_cmd_lba(cmd); 1247 return 0; 1248 } 1249 1250 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense) 1251 { 1252 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 1253 1254 scsi_device_set_ua(dev, sense); 1255 if (bus->info->change) { 1256 bus->info->change(bus, dev, sense); 1257 } 1258 } 1259 1260 SCSIRequest *scsi_req_ref(SCSIRequest *req) 1261 { 1262 assert(req->refcount > 0); 1263 req->refcount++; 1264 return req; 1265 } 1266 1267 void scsi_req_unref(SCSIRequest *req) 1268 { 1269 assert(req->refcount > 0); 1270 if (--req->refcount == 0) { 1271 BusState *qbus = req->dev->qdev.parent_bus; 1272 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus); 1273 1274 if (bus->info->free_request && req->hba_private) { 1275 bus->info->free_request(bus, req->hba_private); 1276 } 1277 if (req->ops->free_req) { 1278 req->ops->free_req(req); 1279 } 1280 object_unref(OBJECT(req->dev)); 1281 object_unref(OBJECT(qbus->parent)); 1282 g_free(req); 1283 } 1284 } 1285 1286 /* Tell the device that we finished processing this chunk of I/O. It 1287 will start the next chunk or complete the command. */ 1288 void scsi_req_continue(SCSIRequest *req) 1289 { 1290 if (req->io_canceled) { 1291 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag); 1292 return; 1293 } 1294 trace_scsi_req_continue(req->dev->id, req->lun, req->tag); 1295 if (req->cmd.mode == SCSI_XFER_TO_DEV) { 1296 req->ops->write_data(req); 1297 } else { 1298 req->ops->read_data(req); 1299 } 1300 } 1301 1302 /* Called by the devices when data is ready for the HBA. The HBA should 1303 start a DMA operation to read or fill the device's data buffer. 1304 Once it completes, calling scsi_req_continue will restart I/O. */ 1305 void scsi_req_data(SCSIRequest *req, int len) 1306 { 1307 uint8_t *buf; 1308 if (req->io_canceled) { 1309 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len); 1310 return; 1311 } 1312 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len); 1313 assert(req->cmd.mode != SCSI_XFER_NONE); 1314 if (!req->sg) { 1315 req->resid -= len; 1316 req->bus->info->transfer_data(req, len); 1317 return; 1318 } 1319 1320 /* If the device calls scsi_req_data and the HBA specified a 1321 * scatter/gather list, the transfer has to happen in a single 1322 * step. */ 1323 assert(!req->dma_started); 1324 req->dma_started = true; 1325 1326 buf = scsi_req_get_buf(req); 1327 if (req->cmd.mode == SCSI_XFER_FROM_DEV) { 1328 req->resid = dma_buf_read(buf, len, req->sg); 1329 } else { 1330 req->resid = dma_buf_write(buf, len, req->sg); 1331 } 1332 scsi_req_continue(req); 1333 } 1334 1335 void scsi_req_print(SCSIRequest *req) 1336 { 1337 FILE *fp = stderr; 1338 int i; 1339 1340 fprintf(fp, "[%s id=%d] %s", 1341 req->dev->qdev.parent_bus->name, 1342 req->dev->id, 1343 scsi_command_name(req->cmd.buf[0])); 1344 for (i = 1; i < req->cmd.len; i++) { 1345 fprintf(fp, " 0x%02x", req->cmd.buf[i]); 1346 } 1347 switch (req->cmd.mode) { 1348 case SCSI_XFER_NONE: 1349 fprintf(fp, " - none\n"); 1350 break; 1351 case SCSI_XFER_FROM_DEV: 1352 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer); 1353 break; 1354 case SCSI_XFER_TO_DEV: 1355 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer); 1356 break; 1357 default: 1358 fprintf(fp, " - Oops\n"); 1359 break; 1360 } 1361 } 1362 1363 void scsi_req_complete(SCSIRequest *req, int status) 1364 { 1365 assert(req->status == -1); 1366 req->status = status; 1367 1368 assert(req->sense_len <= sizeof(req->sense)); 1369 if (status == GOOD) { 1370 req->sense_len = 0; 1371 } 1372 1373 if (req->sense_len) { 1374 memcpy(req->dev->sense, req->sense, req->sense_len); 1375 req->dev->sense_len = req->sense_len; 1376 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention); 1377 } else { 1378 req->dev->sense_len = 0; 1379 req->dev->sense_is_ua = false; 1380 } 1381 1382 /* 1383 * Unit attention state is now stored in the device's sense buffer 1384 * if the HBA didn't do autosense. Clear the pending unit attention 1385 * flags. 1386 */ 1387 scsi_clear_unit_attention(req); 1388 1389 scsi_req_ref(req); 1390 scsi_req_dequeue(req); 1391 req->bus->info->complete(req, req->status, req->resid); 1392 1393 /* Cancelled requests might end up being completed instead of cancelled */ 1394 notifier_list_notify(&req->cancel_notifiers, req); 1395 scsi_req_unref(req); 1396 } 1397 1398 /* Called by the devices when the request is canceled. */ 1399 void scsi_req_cancel_complete(SCSIRequest *req) 1400 { 1401 assert(req->io_canceled); 1402 if (req->bus->info->cancel) { 1403 req->bus->info->cancel(req); 1404 } 1405 notifier_list_notify(&req->cancel_notifiers, req); 1406 scsi_req_unref(req); 1407 } 1408 1409 /* Cancel @req asynchronously. @notifier is added to @req's cancellation 1410 * notifier list, the bus will be notified the requests cancellation is 1411 * completed. 1412 * */ 1413 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier) 1414 { 1415 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1416 if (notifier) { 1417 notifier_list_add(&req->cancel_notifiers, notifier); 1418 } 1419 if (req->io_canceled) { 1420 /* A blk_aio_cancel_async is pending; when it finishes, 1421 * scsi_req_cancel_complete will be called and will 1422 * call the notifier we just added. Just wait for that. 1423 */ 1424 assert(req->aiocb); 1425 return; 1426 } 1427 /* Dropped in scsi_req_cancel_complete. */ 1428 scsi_req_ref(req); 1429 scsi_req_dequeue(req); 1430 req->io_canceled = true; 1431 if (req->aiocb) { 1432 blk_aio_cancel_async(req->aiocb); 1433 } else { 1434 scsi_req_cancel_complete(req); 1435 } 1436 } 1437 1438 void scsi_req_cancel(SCSIRequest *req) 1439 { 1440 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1441 if (!req->enqueued) { 1442 return; 1443 } 1444 assert(!req->io_canceled); 1445 /* Dropped in scsi_req_cancel_complete. */ 1446 scsi_req_ref(req); 1447 scsi_req_dequeue(req); 1448 req->io_canceled = true; 1449 if (req->aiocb) { 1450 blk_aio_cancel(req->aiocb); 1451 } else { 1452 scsi_req_cancel_complete(req); 1453 } 1454 } 1455 1456 static int scsi_ua_precedence(SCSISense sense) 1457 { 1458 if (sense.key != UNIT_ATTENTION) { 1459 return INT_MAX; 1460 } 1461 if (sense.asc == 0x29 && sense.ascq == 0x04) { 1462 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */ 1463 return 1; 1464 } else if (sense.asc == 0x3F && sense.ascq == 0x01) { 1465 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */ 1466 return 2; 1467 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) { 1468 /* These two go with "all others". */ 1469 ; 1470 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) { 1471 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0 1472 * POWER ON OCCURRED = 1 1473 * SCSI BUS RESET OCCURRED = 2 1474 * BUS DEVICE RESET FUNCTION OCCURRED = 3 1475 * I_T NEXUS LOSS OCCURRED = 7 1476 */ 1477 return sense.ascq; 1478 } else if (sense.asc == 0x2F && sense.ascq == 0x01) { 1479 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */ 1480 return 8; 1481 } 1482 return (sense.asc << 8) | sense.ascq; 1483 } 1484 1485 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense) 1486 { 1487 int prec1, prec2; 1488 if (sense.key != UNIT_ATTENTION) { 1489 return; 1490 } 1491 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key, 1492 sense.asc, sense.ascq); 1493 1494 /* 1495 * Override a pre-existing unit attention condition, except for a more 1496 * important reset condition. 1497 */ 1498 prec1 = scsi_ua_precedence(sdev->unit_attention); 1499 prec2 = scsi_ua_precedence(sense); 1500 if (prec2 < prec1) { 1501 sdev->unit_attention = sense; 1502 } 1503 } 1504 1505 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense) 1506 { 1507 SCSIRequest *req; 1508 1509 aio_context_acquire(blk_get_aio_context(sdev->conf.blk)); 1510 while (!QTAILQ_EMPTY(&sdev->requests)) { 1511 req = QTAILQ_FIRST(&sdev->requests); 1512 scsi_req_cancel_async(req, NULL); 1513 } 1514 blk_drain(sdev->conf.blk); 1515 aio_context_release(blk_get_aio_context(sdev->conf.blk)); 1516 scsi_device_set_ua(sdev, sense); 1517 } 1518 1519 static char *scsibus_get_dev_path(DeviceState *dev) 1520 { 1521 SCSIDevice *d = SCSI_DEVICE(dev); 1522 DeviceState *hba = dev->parent_bus->parent; 1523 char *id; 1524 char *path; 1525 1526 id = qdev_get_dev_path(hba); 1527 if (id) { 1528 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun); 1529 } else { 1530 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun); 1531 } 1532 g_free(id); 1533 return path; 1534 } 1535 1536 static char *scsibus_get_fw_dev_path(DeviceState *dev) 1537 { 1538 SCSIDevice *d = SCSI_DEVICE(dev); 1539 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel, 1540 qdev_fw_name(dev), d->id, d->lun); 1541 } 1542 1543 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun) 1544 { 1545 BusChild *kid; 1546 SCSIDevice *target_dev = NULL; 1547 1548 QTAILQ_FOREACH_REVERSE(kid, &bus->qbus.children, ChildrenHead, sibling) { 1549 DeviceState *qdev = kid->child; 1550 SCSIDevice *dev = SCSI_DEVICE(qdev); 1551 1552 if (dev->channel == channel && dev->id == id) { 1553 if (dev->lun == lun) { 1554 return dev; 1555 } 1556 target_dev = dev; 1557 } 1558 } 1559 return target_dev; 1560 } 1561 1562 /* SCSI request list. For simplicity, pv points to the whole device */ 1563 1564 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size, 1565 VMStateField *field, QJSON *vmdesc) 1566 { 1567 SCSIDevice *s = pv; 1568 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1569 SCSIRequest *req; 1570 1571 QTAILQ_FOREACH(req, &s->requests, next) { 1572 assert(!req->io_canceled); 1573 assert(req->status == -1); 1574 assert(req->enqueued); 1575 1576 qemu_put_sbyte(f, req->retry ? 1 : 2); 1577 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); 1578 qemu_put_be32s(f, &req->tag); 1579 qemu_put_be32s(f, &req->lun); 1580 if (bus->info->save_request) { 1581 bus->info->save_request(f, req); 1582 } 1583 if (req->ops->save_request) { 1584 req->ops->save_request(f, req); 1585 } 1586 } 1587 qemu_put_sbyte(f, 0); 1588 1589 return 0; 1590 } 1591 1592 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size, 1593 VMStateField *field) 1594 { 1595 SCSIDevice *s = pv; 1596 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1597 int8_t sbyte; 1598 1599 while ((sbyte = qemu_get_sbyte(f)) > 0) { 1600 uint8_t buf[SCSI_CMD_BUF_SIZE]; 1601 uint32_t tag; 1602 uint32_t lun; 1603 SCSIRequest *req; 1604 1605 qemu_get_buffer(f, buf, sizeof(buf)); 1606 qemu_get_be32s(f, &tag); 1607 qemu_get_be32s(f, &lun); 1608 req = scsi_req_new(s, tag, lun, buf, NULL); 1609 req->retry = (sbyte == 1); 1610 if (bus->info->load_request) { 1611 req->hba_private = bus->info->load_request(f, req); 1612 } 1613 if (req->ops->load_request) { 1614 req->ops->load_request(f, req); 1615 } 1616 1617 /* Just restart it later. */ 1618 scsi_req_enqueue_internal(req); 1619 1620 /* At this point, the request will be kept alive by the reference 1621 * added by scsi_req_enqueue_internal, so we can release our reference. 1622 * The HBA of course will add its own reference in the load_request 1623 * callback if it needs to hold on the SCSIRequest. 1624 */ 1625 scsi_req_unref(req); 1626 } 1627 1628 return 0; 1629 } 1630 1631 static const VMStateInfo vmstate_info_scsi_requests = { 1632 .name = "scsi-requests", 1633 .get = get_scsi_requests, 1634 .put = put_scsi_requests, 1635 }; 1636 1637 static bool scsi_sense_state_needed(void *opaque) 1638 { 1639 SCSIDevice *s = opaque; 1640 1641 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD; 1642 } 1643 1644 static const VMStateDescription vmstate_scsi_sense_state = { 1645 .name = "SCSIDevice/sense", 1646 .version_id = 1, 1647 .minimum_version_id = 1, 1648 .needed = scsi_sense_state_needed, 1649 .fields = (VMStateField[]) { 1650 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 1651 SCSI_SENSE_BUF_SIZE_OLD, 1652 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD), 1653 VMSTATE_END_OF_LIST() 1654 } 1655 }; 1656 1657 const VMStateDescription vmstate_scsi_device = { 1658 .name = "SCSIDevice", 1659 .version_id = 1, 1660 .minimum_version_id = 1, 1661 .fields = (VMStateField[]) { 1662 VMSTATE_UINT8(unit_attention.key, SCSIDevice), 1663 VMSTATE_UINT8(unit_attention.asc, SCSIDevice), 1664 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice), 1665 VMSTATE_BOOL(sense_is_ua, SCSIDevice), 1666 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD), 1667 VMSTATE_UINT32(sense_len, SCSIDevice), 1668 { 1669 .name = "requests", 1670 .version_id = 0, 1671 .field_exists = NULL, 1672 .size = 0, /* ouch */ 1673 .info = &vmstate_info_scsi_requests, 1674 .flags = VMS_SINGLE, 1675 .offset = 0, 1676 }, 1677 VMSTATE_END_OF_LIST() 1678 }, 1679 .subsections = (const VMStateDescription*[]) { 1680 &vmstate_scsi_sense_state, 1681 NULL 1682 } 1683 }; 1684 1685 static void scsi_device_class_init(ObjectClass *klass, void *data) 1686 { 1687 DeviceClass *k = DEVICE_CLASS(klass); 1688 set_bit(DEVICE_CATEGORY_STORAGE, k->categories); 1689 k->bus_type = TYPE_SCSI_BUS; 1690 k->realize = scsi_qdev_realize; 1691 k->unrealize = scsi_qdev_unrealize; 1692 k->props = scsi_props; 1693 } 1694 1695 static void scsi_dev_instance_init(Object *obj) 1696 { 1697 DeviceState *dev = DEVICE(obj); 1698 SCSIDevice *s = SCSI_DEVICE(dev); 1699 1700 device_add_bootindex_property(obj, &s->conf.bootindex, 1701 "bootindex", NULL, 1702 &s->qdev, NULL); 1703 } 1704 1705 static const TypeInfo scsi_device_type_info = { 1706 .name = TYPE_SCSI_DEVICE, 1707 .parent = TYPE_DEVICE, 1708 .instance_size = sizeof(SCSIDevice), 1709 .abstract = true, 1710 .class_size = sizeof(SCSIDeviceClass), 1711 .class_init = scsi_device_class_init, 1712 .instance_init = scsi_dev_instance_init, 1713 }; 1714 1715 static void scsi_register_types(void) 1716 { 1717 type_register_static(&scsi_bus_info); 1718 type_register_static(&scsi_device_type_info); 1719 } 1720 1721 type_init(scsi_register_types) 1722