1 #include "qemu/osdep.h" 2 #include "qapi/error.h" 3 #include "qemu/error-report.h" 4 #include "qemu/module.h" 5 #include "qemu/option.h" 6 #include "hw/qdev-properties.h" 7 #include "hw/scsi/scsi.h" 8 #include "migration/qemu-file-types.h" 9 #include "migration/vmstate.h" 10 #include "scsi/constants.h" 11 #include "sysemu/block-backend.h" 12 #include "sysemu/blockdev.h" 13 #include "sysemu/sysemu.h" 14 #include "sysemu/runstate.h" 15 #include "trace.h" 16 #include "sysemu/dma.h" 17 #include "qemu/cutils.h" 18 19 static char *scsibus_get_dev_path(DeviceState *dev); 20 static char *scsibus_get_fw_dev_path(DeviceState *dev); 21 static void scsi_req_dequeue(SCSIRequest *req); 22 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len); 23 static void scsi_target_free_buf(SCSIRequest *req); 24 25 static int next_scsi_bus; 26 27 static SCSIDevice *do_scsi_device_find(SCSIBus *bus, 28 int channel, int id, int lun, 29 bool include_unrealized) 30 { 31 BusChild *kid; 32 SCSIDevice *retval = NULL; 33 34 QTAILQ_FOREACH_RCU(kid, &bus->qbus.children, sibling) { 35 DeviceState *qdev = kid->child; 36 SCSIDevice *dev = SCSI_DEVICE(qdev); 37 38 if (dev->channel == channel && dev->id == id) { 39 if (dev->lun == lun) { 40 retval = dev; 41 break; 42 } 43 44 /* 45 * If we don't find exact match (channel/bus/lun), 46 * we will return the first device which matches channel/bus 47 */ 48 49 if (!retval) { 50 retval = dev; 51 } 52 } 53 } 54 55 /* 56 * This function might run on the IO thread and we might race against 57 * main thread hot-plugging the device. 58 * We assume that as soon as .realized is set to true we can let 59 * the user access the device. 60 */ 61 62 if (retval && !include_unrealized && 63 !qatomic_load_acquire(&retval->qdev.realized)) { 64 retval = NULL; 65 } 66 67 return retval; 68 } 69 70 SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int id, int lun) 71 { 72 RCU_READ_LOCK_GUARD(); 73 return do_scsi_device_find(bus, channel, id, lun, false); 74 } 75 76 SCSIDevice *scsi_device_get(SCSIBus *bus, int channel, int id, int lun) 77 { 78 SCSIDevice *d; 79 RCU_READ_LOCK_GUARD(); 80 d = do_scsi_device_find(bus, channel, id, lun, false); 81 if (d) { 82 object_ref(d); 83 } 84 return d; 85 } 86 87 static void scsi_device_realize(SCSIDevice *s, Error **errp) 88 { 89 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 90 if (sc->realize) { 91 sc->realize(s, errp); 92 } 93 } 94 95 static void scsi_device_unrealize(SCSIDevice *s) 96 { 97 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 98 if (sc->unrealize) { 99 sc->unrealize(s); 100 } 101 } 102 103 int scsi_bus_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf, 104 void *hba_private) 105 { 106 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 107 int rc; 108 109 assert(cmd->len == 0); 110 rc = scsi_req_parse_cdb(dev, cmd, buf); 111 if (bus->info->parse_cdb) { 112 rc = bus->info->parse_cdb(dev, cmd, buf, hba_private); 113 } 114 return rc; 115 } 116 117 static SCSIRequest *scsi_device_alloc_req(SCSIDevice *s, uint32_t tag, uint32_t lun, 118 uint8_t *buf, void *hba_private) 119 { 120 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 121 if (sc->alloc_req) { 122 return sc->alloc_req(s, tag, lun, buf, hba_private); 123 } 124 125 return NULL; 126 } 127 128 void scsi_device_unit_attention_reported(SCSIDevice *s) 129 { 130 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(s); 131 if (sc->unit_attention_reported) { 132 sc->unit_attention_reported(s); 133 } 134 } 135 136 /* Create a scsi bus, and attach devices to it. */ 137 void scsi_bus_new(SCSIBus *bus, size_t bus_size, DeviceState *host, 138 const SCSIBusInfo *info, const char *bus_name) 139 { 140 qbus_create_inplace(bus, bus_size, TYPE_SCSI_BUS, host, bus_name); 141 bus->busnr = next_scsi_bus++; 142 bus->info = info; 143 qbus_set_bus_hotplug_handler(BUS(bus)); 144 } 145 146 static void scsi_dma_restart_bh(void *opaque) 147 { 148 SCSIDevice *s = opaque; 149 SCSIRequest *req, *next; 150 151 qemu_bh_delete(s->bh); 152 s->bh = NULL; 153 154 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 155 QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { 156 scsi_req_ref(req); 157 if (req->retry) { 158 req->retry = false; 159 switch (req->cmd.mode) { 160 case SCSI_XFER_FROM_DEV: 161 case SCSI_XFER_TO_DEV: 162 scsi_req_continue(req); 163 break; 164 case SCSI_XFER_NONE: 165 scsi_req_dequeue(req); 166 scsi_req_enqueue(req); 167 break; 168 } 169 } 170 scsi_req_unref(req); 171 } 172 aio_context_release(blk_get_aio_context(s->conf.blk)); 173 } 174 175 void scsi_req_retry(SCSIRequest *req) 176 { 177 /* No need to save a reference, because scsi_dma_restart_bh just 178 * looks at the request list. */ 179 req->retry = true; 180 } 181 182 static void scsi_dma_restart_cb(void *opaque, int running, RunState state) 183 { 184 SCSIDevice *s = opaque; 185 186 if (!running) { 187 return; 188 } 189 if (!s->bh) { 190 AioContext *ctx = blk_get_aio_context(s->conf.blk); 191 s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s); 192 qemu_bh_schedule(s->bh); 193 } 194 } 195 196 static bool scsi_bus_is_address_free(SCSIBus *bus, 197 int channel, int target, int lun, 198 SCSIDevice **p_dev) 199 { 200 SCSIDevice *d; 201 202 RCU_READ_LOCK_GUARD(); 203 d = do_scsi_device_find(bus, channel, target, lun, true); 204 if (d && d->lun == lun) { 205 if (p_dev) { 206 *p_dev = d; 207 } 208 return false; 209 } 210 if (p_dev) { 211 *p_dev = NULL; 212 } 213 return true; 214 } 215 216 static bool scsi_bus_check_address(BusState *qbus, DeviceState *qdev, Error **errp) 217 { 218 SCSIDevice *dev = SCSI_DEVICE(qdev); 219 SCSIBus *bus = SCSI_BUS(qbus); 220 221 if (dev->channel > bus->info->max_channel) { 222 error_setg(errp, "bad scsi channel id: %d", dev->channel); 223 return false; 224 } 225 if (dev->id != -1 && dev->id > bus->info->max_target) { 226 error_setg(errp, "bad scsi device id: %d", dev->id); 227 return false; 228 } 229 if (dev->lun != -1 && dev->lun > bus->info->max_lun) { 230 error_setg(errp, "bad scsi device lun: %d", dev->lun); 231 return false; 232 } 233 234 if (dev->id != -1 && dev->lun != -1) { 235 SCSIDevice *d; 236 if (!scsi_bus_is_address_free(bus, dev->channel, dev->id, dev->lun, &d)) { 237 error_setg(errp, "lun already used by '%s'", d->qdev.id); 238 return false; 239 } 240 } 241 242 return true; 243 } 244 245 static void scsi_qdev_realize(DeviceState *qdev, Error **errp) 246 { 247 SCSIDevice *dev = SCSI_DEVICE(qdev); 248 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 249 bool is_free; 250 Error *local_err = NULL; 251 252 if (dev->id == -1) { 253 int id = -1; 254 if (dev->lun == -1) { 255 dev->lun = 0; 256 } 257 do { 258 is_free = scsi_bus_is_address_free(bus, dev->channel, ++id, dev->lun, NULL); 259 } while (!is_free && id < bus->info->max_target); 260 if (!is_free) { 261 error_setg(errp, "no free target"); 262 return; 263 } 264 dev->id = id; 265 } else if (dev->lun == -1) { 266 int lun = -1; 267 do { 268 is_free = scsi_bus_is_address_free(bus, dev->channel, dev->id, ++lun, NULL); 269 } while (!is_free && lun < bus->info->max_lun); 270 if (!is_free) { 271 error_setg(errp, "no free lun"); 272 return; 273 } 274 dev->lun = lun; 275 } 276 277 QTAILQ_INIT(&dev->requests); 278 scsi_device_realize(dev, &local_err); 279 if (local_err) { 280 error_propagate(errp, local_err); 281 return; 282 } 283 dev->vmsentry = qdev_add_vm_change_state_handler(DEVICE(dev), 284 scsi_dma_restart_cb, dev); 285 } 286 287 static void scsi_qdev_unrealize(DeviceState *qdev) 288 { 289 SCSIDevice *dev = SCSI_DEVICE(qdev); 290 291 if (dev->vmsentry) { 292 qemu_del_vm_change_state_handler(dev->vmsentry); 293 } 294 295 scsi_device_purge_requests(dev, SENSE_CODE(NO_SENSE)); 296 297 scsi_device_unrealize(dev); 298 299 blockdev_mark_auto_del(dev->conf.blk); 300 } 301 302 /* handle legacy '-drive if=scsi,...' cmd line args */ 303 SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk, 304 int unit, bool removable, int bootindex, 305 bool share_rw, 306 BlockdevOnError rerror, 307 BlockdevOnError werror, 308 const char *serial, Error **errp) 309 { 310 const char *driver; 311 char *name; 312 DeviceState *dev; 313 DriveInfo *dinfo; 314 315 if (blk_is_sg(blk)) { 316 driver = "scsi-generic"; 317 } else { 318 dinfo = blk_legacy_dinfo(blk); 319 if (dinfo && dinfo->media_cd) { 320 driver = "scsi-cd"; 321 } else { 322 driver = "scsi-hd"; 323 } 324 } 325 dev = qdev_new(driver); 326 name = g_strdup_printf("legacy[%d]", unit); 327 object_property_add_child(OBJECT(bus), name, OBJECT(dev)); 328 g_free(name); 329 330 qdev_prop_set_uint32(dev, "scsi-id", unit); 331 if (bootindex >= 0) { 332 object_property_set_int(OBJECT(dev), "bootindex", bootindex, 333 &error_abort); 334 } 335 if (object_property_find(OBJECT(dev), "removable")) { 336 qdev_prop_set_bit(dev, "removable", removable); 337 } 338 if (serial && object_property_find(OBJECT(dev), "serial")) { 339 qdev_prop_set_string(dev, "serial", serial); 340 } 341 if (!qdev_prop_set_drive_err(dev, "drive", blk, errp)) { 342 object_unparent(OBJECT(dev)); 343 return NULL; 344 } 345 if (!object_property_set_bool(OBJECT(dev), "share-rw", share_rw, errp)) { 346 object_unparent(OBJECT(dev)); 347 return NULL; 348 } 349 350 qdev_prop_set_enum(dev, "rerror", rerror); 351 qdev_prop_set_enum(dev, "werror", werror); 352 353 if (!qdev_realize_and_unref(dev, &bus->qbus, errp)) { 354 object_unparent(OBJECT(dev)); 355 return NULL; 356 } 357 return SCSI_DEVICE(dev); 358 } 359 360 void scsi_bus_legacy_handle_cmdline(SCSIBus *bus) 361 { 362 Location loc; 363 DriveInfo *dinfo; 364 int unit; 365 366 loc_push_none(&loc); 367 for (unit = 0; unit <= bus->info->max_target; unit++) { 368 dinfo = drive_get(IF_SCSI, bus->busnr, unit); 369 if (dinfo == NULL) { 370 continue; 371 } 372 qemu_opts_loc_restore(dinfo->opts); 373 scsi_bus_legacy_add_drive(bus, blk_by_legacy_dinfo(dinfo), 374 unit, false, -1, false, 375 BLOCKDEV_ON_ERROR_AUTO, 376 BLOCKDEV_ON_ERROR_AUTO, 377 NULL, &error_fatal); 378 } 379 loc_pop(&loc); 380 } 381 382 static int32_t scsi_invalid_field(SCSIRequest *req, uint8_t *buf) 383 { 384 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 385 scsi_req_complete(req, CHECK_CONDITION); 386 return 0; 387 } 388 389 static const struct SCSIReqOps reqops_invalid_field = { 390 .size = sizeof(SCSIRequest), 391 .send_command = scsi_invalid_field 392 }; 393 394 /* SCSIReqOps implementation for invalid commands. */ 395 396 static int32_t scsi_invalid_command(SCSIRequest *req, uint8_t *buf) 397 { 398 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 399 scsi_req_complete(req, CHECK_CONDITION); 400 return 0; 401 } 402 403 static const struct SCSIReqOps reqops_invalid_opcode = { 404 .size = sizeof(SCSIRequest), 405 .send_command = scsi_invalid_command 406 }; 407 408 /* SCSIReqOps implementation for unit attention conditions. */ 409 410 static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf) 411 { 412 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 413 scsi_req_build_sense(req, req->dev->unit_attention); 414 } else if (req->bus->unit_attention.key == UNIT_ATTENTION) { 415 scsi_req_build_sense(req, req->bus->unit_attention); 416 } 417 scsi_req_complete(req, CHECK_CONDITION); 418 return 0; 419 } 420 421 static const struct SCSIReqOps reqops_unit_attention = { 422 .size = sizeof(SCSIRequest), 423 .send_command = scsi_unit_attention 424 }; 425 426 /* SCSIReqOps implementation for REPORT LUNS and for commands sent to 427 an invalid LUN. */ 428 429 typedef struct SCSITargetReq SCSITargetReq; 430 431 struct SCSITargetReq { 432 SCSIRequest req; 433 int len; 434 uint8_t *buf; 435 int buf_len; 436 }; 437 438 static void store_lun(uint8_t *outbuf, int lun) 439 { 440 if (lun < 256) { 441 /* Simple logical unit addressing method*/ 442 outbuf[0] = 0; 443 outbuf[1] = lun; 444 } else { 445 /* Flat space addressing method */ 446 outbuf[0] = 0x40 | (lun >> 8); 447 outbuf[1] = (lun & 255); 448 } 449 } 450 451 static bool scsi_target_emulate_report_luns(SCSITargetReq *r) 452 { 453 BusChild *kid; 454 int channel, id; 455 uint8_t tmp[8] = {0}; 456 int len = 0; 457 GByteArray *buf; 458 459 if (r->req.cmd.xfer < 16) { 460 return false; 461 } 462 if (r->req.cmd.buf[2] > 2) { 463 return false; 464 } 465 466 /* reserve space for 63 LUNs*/ 467 buf = g_byte_array_sized_new(512); 468 469 channel = r->req.dev->channel; 470 id = r->req.dev->id; 471 472 /* add size (will be updated later to correct value */ 473 g_byte_array_append(buf, tmp, 8); 474 len += 8; 475 476 /* add LUN0 */ 477 g_byte_array_append(buf, tmp, 8); 478 len += 8; 479 480 WITH_RCU_READ_LOCK_GUARD() { 481 QTAILQ_FOREACH_RCU(kid, &r->req.bus->qbus.children, sibling) { 482 DeviceState *qdev = kid->child; 483 SCSIDevice *dev = SCSI_DEVICE(qdev); 484 485 if (dev->channel == channel && dev->id == id && dev->lun != 0) { 486 store_lun(tmp, dev->lun); 487 g_byte_array_append(buf, tmp, 8); 488 len += 8; 489 } 490 } 491 } 492 493 r->buf_len = len; 494 r->buf = g_byte_array_free(buf, FALSE); 495 r->len = MIN(len, r->req.cmd.xfer & ~7); 496 497 /* store the LUN list length */ 498 stl_be_p(&r->buf[0], len - 8); 499 return true; 500 } 501 502 static bool scsi_target_emulate_inquiry(SCSITargetReq *r) 503 { 504 assert(r->req.dev->lun != r->req.lun); 505 506 scsi_target_alloc_buf(&r->req, SCSI_INQUIRY_LEN); 507 508 if (r->req.cmd.buf[1] & 0x2) { 509 /* Command support data - optional, not implemented */ 510 return false; 511 } 512 513 if (r->req.cmd.buf[1] & 0x1) { 514 /* Vital product data */ 515 uint8_t page_code = r->req.cmd.buf[2]; 516 r->buf[r->len++] = page_code ; /* this page */ 517 r->buf[r->len++] = 0x00; 518 519 switch (page_code) { 520 case 0x00: /* Supported page codes, mandatory */ 521 { 522 int pages; 523 pages = r->len++; 524 r->buf[r->len++] = 0x00; /* list of supported pages (this page) */ 525 r->buf[pages] = r->len - pages - 1; /* number of pages */ 526 break; 527 } 528 default: 529 return false; 530 } 531 /* done with EVPD */ 532 assert(r->len < r->buf_len); 533 r->len = MIN(r->req.cmd.xfer, r->len); 534 return true; 535 } 536 537 /* Standard INQUIRY data */ 538 if (r->req.cmd.buf[2] != 0) { 539 return false; 540 } 541 542 /* PAGE CODE == 0 */ 543 r->len = MIN(r->req.cmd.xfer, SCSI_INQUIRY_LEN); 544 memset(r->buf, 0, r->len); 545 if (r->req.lun != 0) { 546 r->buf[0] = TYPE_NO_LUN; 547 } else { 548 r->buf[0] = TYPE_NOT_PRESENT | TYPE_INACTIVE; 549 r->buf[2] = 5; /* Version */ 550 r->buf[3] = 2 | 0x10; /* HiSup, response data format */ 551 r->buf[4] = r->len - 5; /* Additional Length = (Len - 1) - 4 */ 552 r->buf[7] = 0x10 | (r->req.bus->info->tcq ? 0x02 : 0); /* Sync, TCQ. */ 553 memcpy(&r->buf[8], "QEMU ", 8); 554 memcpy(&r->buf[16], "QEMU TARGET ", 16); 555 pstrcpy((char *) &r->buf[32], 4, qemu_hw_version()); 556 } 557 return true; 558 } 559 560 static size_t scsi_sense_len(SCSIRequest *req) 561 { 562 if (req->dev->type == TYPE_SCANNER) 563 return SCSI_SENSE_LEN_SCANNER; 564 else 565 return SCSI_SENSE_LEN; 566 } 567 568 static int32_t scsi_target_send_command(SCSIRequest *req, uint8_t *buf) 569 { 570 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 571 int fixed_sense = (req->cmd.buf[1] & 1) == 0; 572 573 if (req->lun != 0 && 574 buf[0] != INQUIRY && buf[0] != REQUEST_SENSE) { 575 scsi_req_build_sense(req, SENSE_CODE(LUN_NOT_SUPPORTED)); 576 scsi_req_complete(req, CHECK_CONDITION); 577 return 0; 578 } 579 switch (buf[0]) { 580 case REPORT_LUNS: 581 if (!scsi_target_emulate_report_luns(r)) { 582 goto illegal_request; 583 } 584 break; 585 case INQUIRY: 586 if (!scsi_target_emulate_inquiry(r)) { 587 goto illegal_request; 588 } 589 break; 590 case REQUEST_SENSE: 591 scsi_target_alloc_buf(&r->req, scsi_sense_len(req)); 592 if (req->lun != 0) { 593 const struct SCSISense sense = SENSE_CODE(LUN_NOT_SUPPORTED); 594 595 r->len = scsi_build_sense_buf(r->buf, req->cmd.xfer, 596 sense, fixed_sense); 597 } else { 598 r->len = scsi_device_get_sense(r->req.dev, r->buf, 599 MIN(req->cmd.xfer, r->buf_len), 600 fixed_sense); 601 } 602 if (r->req.dev->sense_is_ua) { 603 scsi_device_unit_attention_reported(req->dev); 604 r->req.dev->sense_len = 0; 605 r->req.dev->sense_is_ua = false; 606 } 607 break; 608 case TEST_UNIT_READY: 609 break; 610 default: 611 scsi_req_build_sense(req, SENSE_CODE(INVALID_OPCODE)); 612 scsi_req_complete(req, CHECK_CONDITION); 613 return 0; 614 illegal_request: 615 scsi_req_build_sense(req, SENSE_CODE(INVALID_FIELD)); 616 scsi_req_complete(req, CHECK_CONDITION); 617 return 0; 618 } 619 620 if (!r->len) { 621 scsi_req_complete(req, GOOD); 622 } 623 return r->len; 624 } 625 626 static void scsi_target_read_data(SCSIRequest *req) 627 { 628 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 629 uint32_t n; 630 631 n = r->len; 632 if (n > 0) { 633 r->len = 0; 634 scsi_req_data(&r->req, n); 635 } else { 636 scsi_req_complete(&r->req, GOOD); 637 } 638 } 639 640 static uint8_t *scsi_target_get_buf(SCSIRequest *req) 641 { 642 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 643 644 return r->buf; 645 } 646 647 static uint8_t *scsi_target_alloc_buf(SCSIRequest *req, size_t len) 648 { 649 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 650 651 r->buf = g_malloc(len); 652 r->buf_len = len; 653 654 return r->buf; 655 } 656 657 static void scsi_target_free_buf(SCSIRequest *req) 658 { 659 SCSITargetReq *r = DO_UPCAST(SCSITargetReq, req, req); 660 661 g_free(r->buf); 662 } 663 664 static const struct SCSIReqOps reqops_target_command = { 665 .size = sizeof(SCSITargetReq), 666 .send_command = scsi_target_send_command, 667 .read_data = scsi_target_read_data, 668 .get_buf = scsi_target_get_buf, 669 .free_req = scsi_target_free_buf, 670 }; 671 672 673 SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d, 674 uint32_t tag, uint32_t lun, void *hba_private) 675 { 676 SCSIRequest *req; 677 SCSIBus *bus = scsi_bus_from_device(d); 678 BusState *qbus = BUS(bus); 679 const int memset_off = offsetof(SCSIRequest, sense) 680 + sizeof(req->sense); 681 682 req = g_malloc(reqops->size); 683 memset((uint8_t *)req + memset_off, 0, reqops->size - memset_off); 684 req->refcount = 1; 685 req->bus = bus; 686 req->dev = d; 687 req->tag = tag; 688 req->lun = lun; 689 req->hba_private = hba_private; 690 req->status = -1; 691 req->ops = reqops; 692 object_ref(OBJECT(d)); 693 object_ref(OBJECT(qbus->parent)); 694 notifier_list_init(&req->cancel_notifiers); 695 trace_scsi_req_alloc(req->dev->id, req->lun, req->tag); 696 return req; 697 } 698 699 SCSIRequest *scsi_req_new(SCSIDevice *d, uint32_t tag, uint32_t lun, 700 uint8_t *buf, void *hba_private) 701 { 702 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, d->qdev.parent_bus); 703 const SCSIReqOps *ops; 704 SCSIDeviceClass *sc = SCSI_DEVICE_GET_CLASS(d); 705 SCSIRequest *req; 706 SCSICommand cmd = { .len = 0 }; 707 int ret; 708 709 if ((d->unit_attention.key == UNIT_ATTENTION || 710 bus->unit_attention.key == UNIT_ATTENTION) && 711 (buf[0] != INQUIRY && 712 buf[0] != REPORT_LUNS && 713 buf[0] != GET_CONFIGURATION && 714 buf[0] != GET_EVENT_STATUS_NOTIFICATION && 715 716 /* 717 * If we already have a pending unit attention condition, 718 * report this one before triggering another one. 719 */ 720 !(buf[0] == REQUEST_SENSE && d->sense_is_ua))) { 721 ops = &reqops_unit_attention; 722 } else if (lun != d->lun || 723 buf[0] == REPORT_LUNS || 724 (buf[0] == REQUEST_SENSE && d->sense_len)) { 725 ops = &reqops_target_command; 726 } else { 727 ops = NULL; 728 } 729 730 if (ops != NULL || !sc->parse_cdb) { 731 ret = scsi_req_parse_cdb(d, &cmd, buf); 732 } else { 733 ret = sc->parse_cdb(d, &cmd, buf, hba_private); 734 } 735 736 if (ret != 0) { 737 trace_scsi_req_parse_bad(d->id, lun, tag, buf[0]); 738 req = scsi_req_alloc(&reqops_invalid_opcode, d, tag, lun, hba_private); 739 } else { 740 assert(cmd.len != 0); 741 trace_scsi_req_parsed(d->id, lun, tag, buf[0], 742 cmd.mode, cmd.xfer); 743 if (cmd.lba != -1) { 744 trace_scsi_req_parsed_lba(d->id, lun, tag, buf[0], 745 cmd.lba); 746 } 747 748 if (cmd.xfer > INT32_MAX) { 749 req = scsi_req_alloc(&reqops_invalid_field, d, tag, lun, hba_private); 750 } else if (ops) { 751 req = scsi_req_alloc(ops, d, tag, lun, hba_private); 752 } else { 753 req = scsi_device_alloc_req(d, tag, lun, buf, hba_private); 754 } 755 } 756 757 req->cmd = cmd; 758 req->resid = req->cmd.xfer; 759 760 switch (buf[0]) { 761 case INQUIRY: 762 trace_scsi_inquiry(d->id, lun, tag, cmd.buf[1], cmd.buf[2]); 763 break; 764 case TEST_UNIT_READY: 765 trace_scsi_test_unit_ready(d->id, lun, tag); 766 break; 767 case REPORT_LUNS: 768 trace_scsi_report_luns(d->id, lun, tag); 769 break; 770 case REQUEST_SENSE: 771 trace_scsi_request_sense(d->id, lun, tag); 772 break; 773 default: 774 break; 775 } 776 777 return req; 778 } 779 780 uint8_t *scsi_req_get_buf(SCSIRequest *req) 781 { 782 return req->ops->get_buf(req); 783 } 784 785 static void scsi_clear_unit_attention(SCSIRequest *req) 786 { 787 SCSISense *ua; 788 if (req->dev->unit_attention.key != UNIT_ATTENTION && 789 req->bus->unit_attention.key != UNIT_ATTENTION) { 790 return; 791 } 792 793 /* 794 * If an INQUIRY command enters the enabled command state, 795 * the device server shall [not] clear any unit attention condition; 796 * See also MMC-6, paragraphs 6.5 and 6.6.2. 797 */ 798 if (req->cmd.buf[0] == INQUIRY || 799 req->cmd.buf[0] == GET_CONFIGURATION || 800 req->cmd.buf[0] == GET_EVENT_STATUS_NOTIFICATION) { 801 return; 802 } 803 804 if (req->dev->unit_attention.key == UNIT_ATTENTION) { 805 ua = &req->dev->unit_attention; 806 } else { 807 ua = &req->bus->unit_attention; 808 } 809 810 /* 811 * If a REPORT LUNS command enters the enabled command state, [...] 812 * the device server shall clear any pending unit attention condition 813 * with an additional sense code of REPORTED LUNS DATA HAS CHANGED. 814 */ 815 if (req->cmd.buf[0] == REPORT_LUNS && 816 !(ua->asc == SENSE_CODE(REPORTED_LUNS_CHANGED).asc && 817 ua->ascq == SENSE_CODE(REPORTED_LUNS_CHANGED).ascq)) { 818 return; 819 } 820 821 *ua = SENSE_CODE(NO_SENSE); 822 } 823 824 int scsi_req_get_sense(SCSIRequest *req, uint8_t *buf, int len) 825 { 826 int ret; 827 828 assert(len >= 14); 829 if (!req->sense_len) { 830 return 0; 831 } 832 833 ret = scsi_convert_sense(req->sense, req->sense_len, buf, len, true); 834 835 /* 836 * FIXME: clearing unit attention conditions upon autosense should be done 837 * only if the UA_INTLCK_CTRL field in the Control mode page is set to 00b 838 * (SAM-5, 5.14). 839 * 840 * We assume UA_INTLCK_CTRL to be 00b for HBAs that support autosense, and 841 * 10b for HBAs that do not support it (do not call scsi_req_get_sense). 842 * Here we handle unit attention clearing for UA_INTLCK_CTRL == 00b. 843 */ 844 if (req->dev->sense_is_ua) { 845 scsi_device_unit_attention_reported(req->dev); 846 req->dev->sense_len = 0; 847 req->dev->sense_is_ua = false; 848 } 849 return ret; 850 } 851 852 int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed) 853 { 854 return scsi_convert_sense(dev->sense, dev->sense_len, buf, len, fixed); 855 } 856 857 void scsi_req_build_sense(SCSIRequest *req, SCSISense sense) 858 { 859 trace_scsi_req_build_sense(req->dev->id, req->lun, req->tag, 860 sense.key, sense.asc, sense.ascq); 861 req->sense_len = scsi_build_sense(req->sense, sense); 862 } 863 864 static void scsi_req_enqueue_internal(SCSIRequest *req) 865 { 866 assert(!req->enqueued); 867 scsi_req_ref(req); 868 if (req->bus->info->get_sg_list) { 869 req->sg = req->bus->info->get_sg_list(req); 870 } else { 871 req->sg = NULL; 872 } 873 req->enqueued = true; 874 QTAILQ_INSERT_TAIL(&req->dev->requests, req, next); 875 } 876 877 int32_t scsi_req_enqueue(SCSIRequest *req) 878 { 879 int32_t rc; 880 881 assert(!req->retry); 882 scsi_req_enqueue_internal(req); 883 scsi_req_ref(req); 884 rc = req->ops->send_command(req, req->cmd.buf); 885 scsi_req_unref(req); 886 return rc; 887 } 888 889 static void scsi_req_dequeue(SCSIRequest *req) 890 { 891 trace_scsi_req_dequeue(req->dev->id, req->lun, req->tag); 892 req->retry = false; 893 if (req->enqueued) { 894 QTAILQ_REMOVE(&req->dev->requests, req, next); 895 req->enqueued = false; 896 scsi_req_unref(req); 897 } 898 } 899 900 static int scsi_get_performance_length(int num_desc, int type, int data_type) 901 { 902 /* MMC-6, paragraph 6.7. */ 903 switch (type) { 904 case 0: 905 if ((data_type & 3) == 0) { 906 /* Each descriptor is as in Table 295 - Nominal performance. */ 907 return 16 * num_desc + 8; 908 } else { 909 /* Each descriptor is as in Table 296 - Exceptions. */ 910 return 6 * num_desc + 8; 911 } 912 case 1: 913 case 4: 914 case 5: 915 return 8 * num_desc + 8; 916 case 2: 917 return 2048 * num_desc + 8; 918 case 3: 919 return 16 * num_desc + 8; 920 default: 921 return 8; 922 } 923 } 924 925 static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf) 926 { 927 int byte_block = (buf[2] >> 2) & 0x1; 928 int type = (buf[2] >> 4) & 0x1; 929 int xfer_unit; 930 931 if (byte_block) { 932 if (type) { 933 xfer_unit = dev->blocksize; 934 } else { 935 xfer_unit = 512; 936 } 937 } else { 938 xfer_unit = 1; 939 } 940 941 return xfer_unit; 942 } 943 944 static int ata_passthrough_12_xfer(SCSIDevice *dev, uint8_t *buf) 945 { 946 int length = buf[2] & 0x3; 947 int xfer; 948 int unit = ata_passthrough_xfer_unit(dev, buf); 949 950 switch (length) { 951 case 0: 952 case 3: /* USB-specific. */ 953 default: 954 xfer = 0; 955 break; 956 case 1: 957 xfer = buf[3]; 958 break; 959 case 2: 960 xfer = buf[4]; 961 break; 962 } 963 964 return xfer * unit; 965 } 966 967 static int ata_passthrough_16_xfer(SCSIDevice *dev, uint8_t *buf) 968 { 969 int extend = buf[1] & 0x1; 970 int length = buf[2] & 0x3; 971 int xfer; 972 int unit = ata_passthrough_xfer_unit(dev, buf); 973 974 switch (length) { 975 case 0: 976 case 3: /* USB-specific. */ 977 default: 978 xfer = 0; 979 break; 980 case 1: 981 xfer = buf[4]; 982 xfer |= (extend ? buf[3] << 8 : 0); 983 break; 984 case 2: 985 xfer = buf[6]; 986 xfer |= (extend ? buf[5] << 8 : 0); 987 break; 988 } 989 990 return xfer * unit; 991 } 992 993 static int scsi_req_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 994 { 995 cmd->xfer = scsi_cdb_xfer(buf); 996 switch (buf[0]) { 997 case TEST_UNIT_READY: 998 case REWIND: 999 case START_STOP: 1000 case SET_CAPACITY: 1001 case WRITE_FILEMARKS: 1002 case WRITE_FILEMARKS_16: 1003 case SPACE: 1004 case RESERVE: 1005 case RELEASE: 1006 case ERASE: 1007 case ALLOW_MEDIUM_REMOVAL: 1008 case SEEK_10: 1009 case SYNCHRONIZE_CACHE: 1010 case SYNCHRONIZE_CACHE_16: 1011 case LOCATE_16: 1012 case LOCK_UNLOCK_CACHE: 1013 case SET_CD_SPEED: 1014 case SET_LIMITS: 1015 case WRITE_LONG_10: 1016 case UPDATE_BLOCK: 1017 case RESERVE_TRACK: 1018 case SET_READ_AHEAD: 1019 case PRE_FETCH: 1020 case PRE_FETCH_16: 1021 case ALLOW_OVERWRITE: 1022 cmd->xfer = 0; 1023 break; 1024 case VERIFY_10: 1025 case VERIFY_12: 1026 case VERIFY_16: 1027 if ((buf[1] & 2) == 0) { 1028 cmd->xfer = 0; 1029 } else if ((buf[1] & 4) != 0) { 1030 cmd->xfer = 1; 1031 } 1032 cmd->xfer *= dev->blocksize; 1033 break; 1034 case MODE_SENSE: 1035 break; 1036 case WRITE_SAME_10: 1037 case WRITE_SAME_16: 1038 cmd->xfer = buf[1] & 1 ? 0 : dev->blocksize; 1039 break; 1040 case READ_CAPACITY_10: 1041 cmd->xfer = 8; 1042 break; 1043 case READ_BLOCK_LIMITS: 1044 cmd->xfer = 6; 1045 break; 1046 case SEND_VOLUME_TAG: 1047 /* GPCMD_SET_STREAMING from multimedia commands. */ 1048 if (dev->type == TYPE_ROM) { 1049 cmd->xfer = buf[10] | (buf[9] << 8); 1050 } else { 1051 cmd->xfer = buf[9] | (buf[8] << 8); 1052 } 1053 break; 1054 case WRITE_6: 1055 /* length 0 means 256 blocks */ 1056 if (cmd->xfer == 0) { 1057 cmd->xfer = 256; 1058 } 1059 /* fall through */ 1060 case WRITE_10: 1061 case WRITE_VERIFY_10: 1062 case WRITE_12: 1063 case WRITE_VERIFY_12: 1064 case WRITE_16: 1065 case WRITE_VERIFY_16: 1066 cmd->xfer *= dev->blocksize; 1067 break; 1068 case READ_6: 1069 case READ_REVERSE: 1070 /* length 0 means 256 blocks */ 1071 if (cmd->xfer == 0) { 1072 cmd->xfer = 256; 1073 } 1074 /* fall through */ 1075 case READ_10: 1076 case READ_12: 1077 case READ_16: 1078 cmd->xfer *= dev->blocksize; 1079 break; 1080 case FORMAT_UNIT: 1081 /* MMC mandates the parameter list to be 12-bytes long. Parameters 1082 * for block devices are restricted to the header right now. */ 1083 if (dev->type == TYPE_ROM && (buf[1] & 16)) { 1084 cmd->xfer = 12; 1085 } else { 1086 cmd->xfer = (buf[1] & 16) == 0 ? 0 : (buf[1] & 32 ? 8 : 4); 1087 } 1088 break; 1089 case INQUIRY: 1090 case RECEIVE_DIAGNOSTIC: 1091 case SEND_DIAGNOSTIC: 1092 cmd->xfer = buf[4] | (buf[3] << 8); 1093 break; 1094 case READ_CD: 1095 case READ_BUFFER: 1096 case WRITE_BUFFER: 1097 case SEND_CUE_SHEET: 1098 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1099 break; 1100 case PERSISTENT_RESERVE_OUT: 1101 cmd->xfer = ldl_be_p(&buf[5]) & 0xffffffffULL; 1102 break; 1103 case ERASE_12: 1104 if (dev->type == TYPE_ROM) { 1105 /* MMC command GET PERFORMANCE. */ 1106 cmd->xfer = scsi_get_performance_length(buf[9] | (buf[8] << 8), 1107 buf[10], buf[1] & 0x1f); 1108 } 1109 break; 1110 case MECHANISM_STATUS: 1111 case READ_DVD_STRUCTURE: 1112 case SEND_DVD_STRUCTURE: 1113 case MAINTENANCE_OUT: 1114 case MAINTENANCE_IN: 1115 if (dev->type == TYPE_ROM) { 1116 /* GPCMD_REPORT_KEY and GPCMD_SEND_KEY from multi media commands */ 1117 cmd->xfer = buf[9] | (buf[8] << 8); 1118 } 1119 break; 1120 case ATA_PASSTHROUGH_12: 1121 if (dev->type == TYPE_ROM) { 1122 /* BLANK command of MMC */ 1123 cmd->xfer = 0; 1124 } else { 1125 cmd->xfer = ata_passthrough_12_xfer(dev, buf); 1126 } 1127 break; 1128 case ATA_PASSTHROUGH_16: 1129 cmd->xfer = ata_passthrough_16_xfer(dev, buf); 1130 break; 1131 } 1132 return 0; 1133 } 1134 1135 static int scsi_req_stream_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1136 { 1137 switch (buf[0]) { 1138 /* stream commands */ 1139 case ERASE_12: 1140 case ERASE_16: 1141 cmd->xfer = 0; 1142 break; 1143 case READ_6: 1144 case READ_REVERSE: 1145 case RECOVER_BUFFERED_DATA: 1146 case WRITE_6: 1147 cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16); 1148 if (buf[1] & 0x01) { /* fixed */ 1149 cmd->xfer *= dev->blocksize; 1150 } 1151 break; 1152 case READ_16: 1153 case READ_REVERSE_16: 1154 case VERIFY_16: 1155 case WRITE_16: 1156 cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16); 1157 if (buf[1] & 0x01) { /* fixed */ 1158 cmd->xfer *= dev->blocksize; 1159 } 1160 break; 1161 case REWIND: 1162 case LOAD_UNLOAD: 1163 cmd->xfer = 0; 1164 break; 1165 case SPACE_16: 1166 cmd->xfer = buf[13] | (buf[12] << 8); 1167 break; 1168 case READ_POSITION: 1169 switch (buf[1] & 0x1f) /* operation code */ { 1170 case SHORT_FORM_BLOCK_ID: 1171 case SHORT_FORM_VENDOR_SPECIFIC: 1172 cmd->xfer = 20; 1173 break; 1174 case LONG_FORM: 1175 cmd->xfer = 32; 1176 break; 1177 case EXTENDED_FORM: 1178 cmd->xfer = buf[8] | (buf[7] << 8); 1179 break; 1180 default: 1181 return -1; 1182 } 1183 1184 break; 1185 case FORMAT_UNIT: 1186 cmd->xfer = buf[4] | (buf[3] << 8); 1187 break; 1188 /* generic commands */ 1189 default: 1190 return scsi_req_xfer(cmd, dev, buf); 1191 } 1192 return 0; 1193 } 1194 1195 static int scsi_req_medium_changer_xfer(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1196 { 1197 switch (buf[0]) { 1198 /* medium changer commands */ 1199 case EXCHANGE_MEDIUM: 1200 case INITIALIZE_ELEMENT_STATUS: 1201 case INITIALIZE_ELEMENT_STATUS_WITH_RANGE: 1202 case MOVE_MEDIUM: 1203 case POSITION_TO_ELEMENT: 1204 cmd->xfer = 0; 1205 break; 1206 case READ_ELEMENT_STATUS: 1207 cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16); 1208 break; 1209 1210 /* generic commands */ 1211 default: 1212 return scsi_req_xfer(cmd, dev, buf); 1213 } 1214 return 0; 1215 } 1216 1217 static int scsi_req_scanner_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf) 1218 { 1219 switch (buf[0]) { 1220 /* Scanner commands */ 1221 case OBJECT_POSITION: 1222 cmd->xfer = 0; 1223 break; 1224 case SCAN: 1225 cmd->xfer = buf[4]; 1226 break; 1227 case READ_10: 1228 case SEND: 1229 case GET_WINDOW: 1230 case SET_WINDOW: 1231 cmd->xfer = buf[8] | (buf[7] << 8) | (buf[6] << 16); 1232 break; 1233 default: 1234 /* GET_DATA_BUFFER_STATUS xfer handled by scsi_req_xfer */ 1235 return scsi_req_xfer(cmd, dev, buf); 1236 } 1237 1238 return 0; 1239 } 1240 1241 static void scsi_cmd_xfer_mode(SCSICommand *cmd) 1242 { 1243 if (!cmd->xfer) { 1244 cmd->mode = SCSI_XFER_NONE; 1245 return; 1246 } 1247 switch (cmd->buf[0]) { 1248 case WRITE_6: 1249 case WRITE_10: 1250 case WRITE_VERIFY_10: 1251 case WRITE_12: 1252 case WRITE_VERIFY_12: 1253 case WRITE_16: 1254 case WRITE_VERIFY_16: 1255 case VERIFY_10: 1256 case VERIFY_12: 1257 case VERIFY_16: 1258 case COPY: 1259 case COPY_VERIFY: 1260 case COMPARE: 1261 case CHANGE_DEFINITION: 1262 case LOG_SELECT: 1263 case MODE_SELECT: 1264 case MODE_SELECT_10: 1265 case SEND_DIAGNOSTIC: 1266 case WRITE_BUFFER: 1267 case FORMAT_UNIT: 1268 case REASSIGN_BLOCKS: 1269 case SEARCH_EQUAL: 1270 case SEARCH_HIGH: 1271 case SEARCH_LOW: 1272 case UPDATE_BLOCK: 1273 case WRITE_LONG_10: 1274 case WRITE_SAME_10: 1275 case WRITE_SAME_16: 1276 case UNMAP: 1277 case SEARCH_HIGH_12: 1278 case SEARCH_EQUAL_12: 1279 case SEARCH_LOW_12: 1280 case MEDIUM_SCAN: 1281 case SEND_VOLUME_TAG: 1282 case SEND_CUE_SHEET: 1283 case SEND_DVD_STRUCTURE: 1284 case PERSISTENT_RESERVE_OUT: 1285 case MAINTENANCE_OUT: 1286 case SET_WINDOW: 1287 case SCAN: 1288 /* SCAN conflicts with START_STOP. START_STOP has cmd->xfer set to 0 for 1289 * non-scanner devices, so we only get here for SCAN and not for START_STOP. 1290 */ 1291 cmd->mode = SCSI_XFER_TO_DEV; 1292 break; 1293 case ATA_PASSTHROUGH_12: 1294 case ATA_PASSTHROUGH_16: 1295 /* T_DIR */ 1296 cmd->mode = (cmd->buf[2] & 0x8) ? 1297 SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV; 1298 break; 1299 default: 1300 cmd->mode = SCSI_XFER_FROM_DEV; 1301 break; 1302 } 1303 } 1304 1305 int scsi_req_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, uint8_t *buf) 1306 { 1307 int rc; 1308 int len; 1309 1310 cmd->lba = -1; 1311 len = scsi_cdb_length(buf); 1312 if (len < 0) { 1313 return -1; 1314 } 1315 1316 cmd->len = len; 1317 switch (dev->type) { 1318 case TYPE_TAPE: 1319 rc = scsi_req_stream_xfer(cmd, dev, buf); 1320 break; 1321 case TYPE_MEDIUM_CHANGER: 1322 rc = scsi_req_medium_changer_xfer(cmd, dev, buf); 1323 break; 1324 case TYPE_SCANNER: 1325 rc = scsi_req_scanner_length(cmd, dev, buf); 1326 break; 1327 default: 1328 rc = scsi_req_xfer(cmd, dev, buf); 1329 break; 1330 } 1331 1332 if (rc != 0) 1333 return rc; 1334 1335 memcpy(cmd->buf, buf, cmd->len); 1336 scsi_cmd_xfer_mode(cmd); 1337 cmd->lba = scsi_cmd_lba(cmd); 1338 return 0; 1339 } 1340 1341 void scsi_device_report_change(SCSIDevice *dev, SCSISense sense) 1342 { 1343 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus); 1344 1345 scsi_device_set_ua(dev, sense); 1346 if (bus->info->change) { 1347 bus->info->change(bus, dev, sense); 1348 } 1349 } 1350 1351 SCSIRequest *scsi_req_ref(SCSIRequest *req) 1352 { 1353 assert(req->refcount > 0); 1354 req->refcount++; 1355 return req; 1356 } 1357 1358 void scsi_req_unref(SCSIRequest *req) 1359 { 1360 assert(req->refcount > 0); 1361 if (--req->refcount == 0) { 1362 BusState *qbus = req->dev->qdev.parent_bus; 1363 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, qbus); 1364 1365 if (bus->info->free_request && req->hba_private) { 1366 bus->info->free_request(bus, req->hba_private); 1367 } 1368 if (req->ops->free_req) { 1369 req->ops->free_req(req); 1370 } 1371 object_unref(OBJECT(req->dev)); 1372 object_unref(OBJECT(qbus->parent)); 1373 g_free(req); 1374 } 1375 } 1376 1377 /* Tell the device that we finished processing this chunk of I/O. It 1378 will start the next chunk or complete the command. */ 1379 void scsi_req_continue(SCSIRequest *req) 1380 { 1381 if (req->io_canceled) { 1382 trace_scsi_req_continue_canceled(req->dev->id, req->lun, req->tag); 1383 return; 1384 } 1385 trace_scsi_req_continue(req->dev->id, req->lun, req->tag); 1386 if (req->cmd.mode == SCSI_XFER_TO_DEV) { 1387 req->ops->write_data(req); 1388 } else { 1389 req->ops->read_data(req); 1390 } 1391 } 1392 1393 /* Called by the devices when data is ready for the HBA. The HBA should 1394 start a DMA operation to read or fill the device's data buffer. 1395 Once it completes, calling scsi_req_continue will restart I/O. */ 1396 void scsi_req_data(SCSIRequest *req, int len) 1397 { 1398 uint8_t *buf; 1399 if (req->io_canceled) { 1400 trace_scsi_req_data_canceled(req->dev->id, req->lun, req->tag, len); 1401 return; 1402 } 1403 trace_scsi_req_data(req->dev->id, req->lun, req->tag, len); 1404 assert(req->cmd.mode != SCSI_XFER_NONE); 1405 if (!req->sg) { 1406 req->resid -= len; 1407 req->bus->info->transfer_data(req, len); 1408 return; 1409 } 1410 1411 /* If the device calls scsi_req_data and the HBA specified a 1412 * scatter/gather list, the transfer has to happen in a single 1413 * step. */ 1414 assert(!req->dma_started); 1415 req->dma_started = true; 1416 1417 buf = scsi_req_get_buf(req); 1418 if (req->cmd.mode == SCSI_XFER_FROM_DEV) { 1419 req->resid = dma_buf_read(buf, len, req->sg); 1420 } else { 1421 req->resid = dma_buf_write(buf, len, req->sg); 1422 } 1423 scsi_req_continue(req); 1424 } 1425 1426 void scsi_req_print(SCSIRequest *req) 1427 { 1428 FILE *fp = stderr; 1429 int i; 1430 1431 fprintf(fp, "[%s id=%d] %s", 1432 req->dev->qdev.parent_bus->name, 1433 req->dev->id, 1434 scsi_command_name(req->cmd.buf[0])); 1435 for (i = 1; i < req->cmd.len; i++) { 1436 fprintf(fp, " 0x%02x", req->cmd.buf[i]); 1437 } 1438 switch (req->cmd.mode) { 1439 case SCSI_XFER_NONE: 1440 fprintf(fp, " - none\n"); 1441 break; 1442 case SCSI_XFER_FROM_DEV: 1443 fprintf(fp, " - from-dev len=%zd\n", req->cmd.xfer); 1444 break; 1445 case SCSI_XFER_TO_DEV: 1446 fprintf(fp, " - to-dev len=%zd\n", req->cmd.xfer); 1447 break; 1448 default: 1449 fprintf(fp, " - Oops\n"); 1450 break; 1451 } 1452 } 1453 1454 void scsi_req_complete(SCSIRequest *req, int status) 1455 { 1456 assert(req->status == -1); 1457 req->status = status; 1458 1459 assert(req->sense_len <= sizeof(req->sense)); 1460 if (status == GOOD) { 1461 req->sense_len = 0; 1462 } 1463 1464 if (req->sense_len) { 1465 memcpy(req->dev->sense, req->sense, req->sense_len); 1466 req->dev->sense_len = req->sense_len; 1467 req->dev->sense_is_ua = (req->ops == &reqops_unit_attention); 1468 } else { 1469 req->dev->sense_len = 0; 1470 req->dev->sense_is_ua = false; 1471 } 1472 1473 /* 1474 * Unit attention state is now stored in the device's sense buffer 1475 * if the HBA didn't do autosense. Clear the pending unit attention 1476 * flags. 1477 */ 1478 scsi_clear_unit_attention(req); 1479 1480 scsi_req_ref(req); 1481 scsi_req_dequeue(req); 1482 req->bus->info->complete(req, req->status, req->resid); 1483 1484 /* Cancelled requests might end up being completed instead of cancelled */ 1485 notifier_list_notify(&req->cancel_notifiers, req); 1486 scsi_req_unref(req); 1487 } 1488 1489 /* Called by the devices when the request is canceled. */ 1490 void scsi_req_cancel_complete(SCSIRequest *req) 1491 { 1492 assert(req->io_canceled); 1493 if (req->bus->info->cancel) { 1494 req->bus->info->cancel(req); 1495 } 1496 notifier_list_notify(&req->cancel_notifiers, req); 1497 scsi_req_unref(req); 1498 } 1499 1500 /* Cancel @req asynchronously. @notifier is added to @req's cancellation 1501 * notifier list, the bus will be notified the requests cancellation is 1502 * completed. 1503 * */ 1504 void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier) 1505 { 1506 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1507 if (notifier) { 1508 notifier_list_add(&req->cancel_notifiers, notifier); 1509 } 1510 if (req->io_canceled) { 1511 /* A blk_aio_cancel_async is pending; when it finishes, 1512 * scsi_req_cancel_complete will be called and will 1513 * call the notifier we just added. Just wait for that. 1514 */ 1515 assert(req->aiocb); 1516 return; 1517 } 1518 /* Dropped in scsi_req_cancel_complete. */ 1519 scsi_req_ref(req); 1520 scsi_req_dequeue(req); 1521 req->io_canceled = true; 1522 if (req->aiocb) { 1523 blk_aio_cancel_async(req->aiocb); 1524 } else { 1525 scsi_req_cancel_complete(req); 1526 } 1527 } 1528 1529 void scsi_req_cancel(SCSIRequest *req) 1530 { 1531 trace_scsi_req_cancel(req->dev->id, req->lun, req->tag); 1532 if (!req->enqueued) { 1533 return; 1534 } 1535 assert(!req->io_canceled); 1536 /* Dropped in scsi_req_cancel_complete. */ 1537 scsi_req_ref(req); 1538 scsi_req_dequeue(req); 1539 req->io_canceled = true; 1540 if (req->aiocb) { 1541 blk_aio_cancel(req->aiocb); 1542 } else { 1543 scsi_req_cancel_complete(req); 1544 } 1545 } 1546 1547 static int scsi_ua_precedence(SCSISense sense) 1548 { 1549 if (sense.key != UNIT_ATTENTION) { 1550 return INT_MAX; 1551 } 1552 if (sense.asc == 0x29 && sense.ascq == 0x04) { 1553 /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */ 1554 return 1; 1555 } else if (sense.asc == 0x3F && sense.ascq == 0x01) { 1556 /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */ 1557 return 2; 1558 } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) { 1559 /* These two go with "all others". */ 1560 ; 1561 } else if (sense.asc == 0x29 && sense.ascq <= 0x07) { 1562 /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0 1563 * POWER ON OCCURRED = 1 1564 * SCSI BUS RESET OCCURRED = 2 1565 * BUS DEVICE RESET FUNCTION OCCURRED = 3 1566 * I_T NEXUS LOSS OCCURRED = 7 1567 */ 1568 return sense.ascq; 1569 } else if (sense.asc == 0x2F && sense.ascq == 0x01) { 1570 /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */ 1571 return 8; 1572 } 1573 return (sense.asc << 8) | sense.ascq; 1574 } 1575 1576 void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense) 1577 { 1578 int prec1, prec2; 1579 if (sense.key != UNIT_ATTENTION) { 1580 return; 1581 } 1582 trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key, 1583 sense.asc, sense.ascq); 1584 1585 /* 1586 * Override a pre-existing unit attention condition, except for a more 1587 * important reset condition. 1588 */ 1589 prec1 = scsi_ua_precedence(sdev->unit_attention); 1590 prec2 = scsi_ua_precedence(sense); 1591 if (prec2 < prec1) { 1592 sdev->unit_attention = sense; 1593 } 1594 } 1595 1596 void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense) 1597 { 1598 SCSIRequest *req; 1599 1600 aio_context_acquire(blk_get_aio_context(sdev->conf.blk)); 1601 while (!QTAILQ_EMPTY(&sdev->requests)) { 1602 req = QTAILQ_FIRST(&sdev->requests); 1603 scsi_req_cancel_async(req, NULL); 1604 } 1605 blk_drain(sdev->conf.blk); 1606 aio_context_release(blk_get_aio_context(sdev->conf.blk)); 1607 scsi_device_set_ua(sdev, sense); 1608 } 1609 1610 static char *scsibus_get_dev_path(DeviceState *dev) 1611 { 1612 SCSIDevice *d = SCSI_DEVICE(dev); 1613 DeviceState *hba = dev->parent_bus->parent; 1614 char *id; 1615 char *path; 1616 1617 id = qdev_get_dev_path(hba); 1618 if (id) { 1619 path = g_strdup_printf("%s/%d:%d:%d", id, d->channel, d->id, d->lun); 1620 } else { 1621 path = g_strdup_printf("%d:%d:%d", d->channel, d->id, d->lun); 1622 } 1623 g_free(id); 1624 return path; 1625 } 1626 1627 static char *scsibus_get_fw_dev_path(DeviceState *dev) 1628 { 1629 SCSIDevice *d = SCSI_DEVICE(dev); 1630 return g_strdup_printf("channel@%x/%s@%x,%x", d->channel, 1631 qdev_fw_name(dev), d->id, d->lun); 1632 } 1633 1634 /* SCSI request list. For simplicity, pv points to the whole device */ 1635 1636 static int put_scsi_requests(QEMUFile *f, void *pv, size_t size, 1637 const VMStateField *field, QJSON *vmdesc) 1638 { 1639 SCSIDevice *s = pv; 1640 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1641 SCSIRequest *req; 1642 1643 QTAILQ_FOREACH(req, &s->requests, next) { 1644 assert(!req->io_canceled); 1645 assert(req->status == -1); 1646 assert(req->enqueued); 1647 1648 qemu_put_sbyte(f, req->retry ? 1 : 2); 1649 qemu_put_buffer(f, req->cmd.buf, sizeof(req->cmd.buf)); 1650 qemu_put_be32s(f, &req->tag); 1651 qemu_put_be32s(f, &req->lun); 1652 if (bus->info->save_request) { 1653 bus->info->save_request(f, req); 1654 } 1655 if (req->ops->save_request) { 1656 req->ops->save_request(f, req); 1657 } 1658 } 1659 qemu_put_sbyte(f, 0); 1660 1661 return 0; 1662 } 1663 1664 static int get_scsi_requests(QEMUFile *f, void *pv, size_t size, 1665 const VMStateField *field) 1666 { 1667 SCSIDevice *s = pv; 1668 SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, s->qdev.parent_bus); 1669 int8_t sbyte; 1670 1671 while ((sbyte = qemu_get_sbyte(f)) > 0) { 1672 uint8_t buf[SCSI_CMD_BUF_SIZE]; 1673 uint32_t tag; 1674 uint32_t lun; 1675 SCSIRequest *req; 1676 1677 qemu_get_buffer(f, buf, sizeof(buf)); 1678 qemu_get_be32s(f, &tag); 1679 qemu_get_be32s(f, &lun); 1680 req = scsi_req_new(s, tag, lun, buf, NULL); 1681 req->retry = (sbyte == 1); 1682 if (bus->info->load_request) { 1683 req->hba_private = bus->info->load_request(f, req); 1684 } 1685 if (req->ops->load_request) { 1686 req->ops->load_request(f, req); 1687 } 1688 1689 /* Just restart it later. */ 1690 scsi_req_enqueue_internal(req); 1691 1692 /* At this point, the request will be kept alive by the reference 1693 * added by scsi_req_enqueue_internal, so we can release our reference. 1694 * The HBA of course will add its own reference in the load_request 1695 * callback if it needs to hold on the SCSIRequest. 1696 */ 1697 scsi_req_unref(req); 1698 } 1699 1700 return 0; 1701 } 1702 1703 static const VMStateInfo vmstate_info_scsi_requests = { 1704 .name = "scsi-requests", 1705 .get = get_scsi_requests, 1706 .put = put_scsi_requests, 1707 }; 1708 1709 static bool scsi_sense_state_needed(void *opaque) 1710 { 1711 SCSIDevice *s = opaque; 1712 1713 return s->sense_len > SCSI_SENSE_BUF_SIZE_OLD; 1714 } 1715 1716 static const VMStateDescription vmstate_scsi_sense_state = { 1717 .name = "SCSIDevice/sense", 1718 .version_id = 1, 1719 .minimum_version_id = 1, 1720 .needed = scsi_sense_state_needed, 1721 .fields = (VMStateField[]) { 1722 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 1723 SCSI_SENSE_BUF_SIZE_OLD, 1724 SCSI_SENSE_BUF_SIZE - SCSI_SENSE_BUF_SIZE_OLD), 1725 VMSTATE_END_OF_LIST() 1726 } 1727 }; 1728 1729 const VMStateDescription vmstate_scsi_device = { 1730 .name = "SCSIDevice", 1731 .version_id = 1, 1732 .minimum_version_id = 1, 1733 .fields = (VMStateField[]) { 1734 VMSTATE_UINT8(unit_attention.key, SCSIDevice), 1735 VMSTATE_UINT8(unit_attention.asc, SCSIDevice), 1736 VMSTATE_UINT8(unit_attention.ascq, SCSIDevice), 1737 VMSTATE_BOOL(sense_is_ua, SCSIDevice), 1738 VMSTATE_UINT8_SUB_ARRAY(sense, SCSIDevice, 0, SCSI_SENSE_BUF_SIZE_OLD), 1739 VMSTATE_UINT32(sense_len, SCSIDevice), 1740 { 1741 .name = "requests", 1742 .version_id = 0, 1743 .field_exists = NULL, 1744 .size = 0, /* ouch */ 1745 .info = &vmstate_info_scsi_requests, 1746 .flags = VMS_SINGLE, 1747 .offset = 0, 1748 }, 1749 VMSTATE_END_OF_LIST() 1750 }, 1751 .subsections = (const VMStateDescription*[]) { 1752 &vmstate_scsi_sense_state, 1753 NULL 1754 } 1755 }; 1756 1757 static Property scsi_props[] = { 1758 DEFINE_PROP_UINT32("channel", SCSIDevice, channel, 0), 1759 DEFINE_PROP_UINT32("scsi-id", SCSIDevice, id, -1), 1760 DEFINE_PROP_UINT32("lun", SCSIDevice, lun, -1), 1761 DEFINE_PROP_END_OF_LIST(), 1762 }; 1763 1764 static void scsi_device_class_init(ObjectClass *klass, void *data) 1765 { 1766 DeviceClass *k = DEVICE_CLASS(klass); 1767 set_bit(DEVICE_CATEGORY_STORAGE, k->categories); 1768 k->bus_type = TYPE_SCSI_BUS; 1769 k->realize = scsi_qdev_realize; 1770 k->unrealize = scsi_qdev_unrealize; 1771 device_class_set_props(k, scsi_props); 1772 } 1773 1774 static void scsi_dev_instance_init(Object *obj) 1775 { 1776 DeviceState *dev = DEVICE(obj); 1777 SCSIDevice *s = SCSI_DEVICE(dev); 1778 1779 device_add_bootindex_property(obj, &s->conf.bootindex, 1780 "bootindex", NULL, 1781 &s->qdev); 1782 } 1783 1784 static const TypeInfo scsi_device_type_info = { 1785 .name = TYPE_SCSI_DEVICE, 1786 .parent = TYPE_DEVICE, 1787 .instance_size = sizeof(SCSIDevice), 1788 .abstract = true, 1789 .class_size = sizeof(SCSIDeviceClass), 1790 .class_init = scsi_device_class_init, 1791 .instance_init = scsi_dev_instance_init, 1792 }; 1793 1794 static void scsi_bus_class_init(ObjectClass *klass, void *data) 1795 { 1796 BusClass *k = BUS_CLASS(klass); 1797 HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); 1798 1799 k->get_dev_path = scsibus_get_dev_path; 1800 k->get_fw_dev_path = scsibus_get_fw_dev_path; 1801 k->check_address = scsi_bus_check_address; 1802 hc->unplug = qdev_simple_device_unplug_cb; 1803 } 1804 1805 static const TypeInfo scsi_bus_info = { 1806 .name = TYPE_SCSI_BUS, 1807 .parent = TYPE_BUS, 1808 .instance_size = sizeof(SCSIBus), 1809 .class_init = scsi_bus_class_init, 1810 .interfaces = (InterfaceInfo[]) { 1811 { TYPE_HOTPLUG_HANDLER }, 1812 { } 1813 } 1814 }; 1815 1816 static void scsi_register_types(void) 1817 { 1818 type_register_static(&scsi_bus_info); 1819 type_register_static(&scsi_device_type_info); 1820 } 1821 1822 type_init(scsi_register_types) 1823