1 /* 2 * Generic SCSI Device support 3 * 4 * Copyright (c) 2007 Bull S.A.S. 5 * Based on code by Paul Brook 6 * Based on code by Fabrice Bellard 7 * 8 * Written by Laurent Vivier <Laurent.Vivier@bull.net> 9 * 10 * This code is licensed under the LGPL. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qemu/ctype.h" 17 #include "qemu/error-report.h" 18 #include "qemu/module.h" 19 #include "hw/scsi/scsi.h" 20 #include "hw/scsi/emulation.h" 21 #include "sysemu/block-backend.h" 22 #include "trace.h" 23 24 #ifdef __linux__ 25 26 #include <scsi/sg.h> 27 #include "scsi/constants.h" 28 29 #ifndef MAX_UINT 30 #define MAX_UINT ((unsigned int)-1) 31 #endif 32 33 typedef struct SCSIGenericReq { 34 SCSIRequest req; 35 uint8_t *buf; 36 int buflen; 37 int len; 38 sg_io_hdr_t io_header; 39 } SCSIGenericReq; 40 41 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req) 42 { 43 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 44 45 qemu_put_sbe32s(f, &r->buflen); 46 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 47 assert(!r->req.sg); 48 qemu_put_buffer(f, r->buf, r->req.cmd.xfer); 49 } 50 } 51 52 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req) 53 { 54 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 55 56 qemu_get_sbe32s(f, &r->buflen); 57 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 58 assert(!r->req.sg); 59 qemu_get_buffer(f, r->buf, r->req.cmd.xfer); 60 } 61 } 62 63 static void scsi_free_request(SCSIRequest *req) 64 { 65 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 66 67 g_free(r->buf); 68 } 69 70 /* Helper function for command completion. */ 71 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret) 72 { 73 int status; 74 SCSISense sense; 75 76 assert(r->req.aiocb == NULL); 77 78 if (r->req.io_canceled) { 79 scsi_req_cancel_complete(&r->req); 80 goto done; 81 } 82 status = sg_io_sense_from_errno(-ret, &r->io_header, &sense); 83 if (status == CHECK_CONDITION) { 84 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { 85 r->req.sense_len = r->io_header.sb_len_wr; 86 } else { 87 scsi_req_build_sense(&r->req, sense); 88 } 89 } 90 91 trace_scsi_generic_command_complete_noio(r, r->req.tag, status); 92 93 scsi_req_complete(&r->req, status); 94 done: 95 scsi_req_unref(&r->req); 96 } 97 98 static void scsi_command_complete(void *opaque, int ret) 99 { 100 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 101 SCSIDevice *s = r->req.dev; 102 103 assert(r->req.aiocb != NULL); 104 r->req.aiocb = NULL; 105 106 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 107 scsi_command_complete_noio(r, ret); 108 aio_context_release(blk_get_aio_context(s->conf.blk)); 109 } 110 111 static int execute_command(BlockBackend *blk, 112 SCSIGenericReq *r, int direction, 113 BlockCompletionFunc *complete) 114 { 115 r->io_header.interface_id = 'S'; 116 r->io_header.dxfer_direction = direction; 117 r->io_header.dxferp = r->buf; 118 r->io_header.dxfer_len = r->buflen; 119 r->io_header.cmdp = r->req.cmd.buf; 120 r->io_header.cmd_len = r->req.cmd.len; 121 r->io_header.mx_sb_len = sizeof(r->req.sense); 122 r->io_header.sbp = r->req.sense; 123 r->io_header.timeout = MAX_UINT; 124 r->io_header.usr_ptr = r; 125 r->io_header.flags |= SG_FLAG_DIRECT_IO; 126 127 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r); 128 if (r->req.aiocb == NULL) { 129 return -EIO; 130 } 131 132 return 0; 133 } 134 135 static void scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s) 136 { 137 uint8_t page, page_idx; 138 139 /* 140 * EVPD set to zero returns the standard INQUIRY data. 141 * 142 * Check if scsi_version is unset (-1) to avoid re-defining it 143 * each time an INQUIRY with standard data is received. 144 * scsi_version is initialized with -1 in scsi_generic_reset 145 * and scsi_disk_reset, making sure that we'll set the 146 * scsi_version after a reset. If the version field of the 147 * INQUIRY response somehow changes after a guest reboot, 148 * we'll be able to keep track of it. 149 * 150 * On SCSI-2 and older, first 3 bits of byte 2 is the 151 * ANSI-approved version, while on later versions the 152 * whole byte 2 contains the version. Check if we're dealing 153 * with a newer version and, in that case, assign the 154 * whole byte. 155 */ 156 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) { 157 s->scsi_version = r->buf[2] & 0x07; 158 if (s->scsi_version > 2) { 159 s->scsi_version = r->buf[2]; 160 } 161 } 162 163 if (s->type == TYPE_DISK && (r->req.cmd.buf[1] & 0x01)) { 164 page = r->req.cmd.buf[2]; 165 if (page == 0xb0) { 166 uint32_t max_transfer = 167 blk_get_max_transfer(s->conf.blk) / s->blocksize; 168 169 assert(max_transfer); 170 stl_be_p(&r->buf[8], max_transfer); 171 /* Also take care of the opt xfer len. */ 172 stl_be_p(&r->buf[12], 173 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); 174 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { 175 /* 176 * Now we're capable of supplying the VPD Block Limits 177 * response if the hardware can't. Add it in the INQUIRY 178 * Supported VPD pages response in case we are using the 179 * emulation for this device. 180 * 181 * This way, the guest kernel will be aware of the support 182 * and will use it to proper setup the SCSI device. 183 * 184 * VPD page numbers must be sorted, so insert 0xb0 at the 185 * right place with an in-place insert. When the while loop 186 * begins the device response is at r[0] to r[page_idx - 1]. 187 */ 188 page_idx = lduw_be_p(r->buf + 2) + 4; 189 page_idx = MIN(page_idx, r->buflen); 190 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) { 191 if (page_idx < r->buflen) { 192 r->buf[page_idx] = r->buf[page_idx - 1]; 193 } 194 page_idx--; 195 } 196 if (page_idx < r->buflen) { 197 r->buf[page_idx] = 0xb0; 198 } 199 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1); 200 } 201 } 202 } 203 204 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s) 205 { 206 int len; 207 uint8_t buf[64]; 208 209 SCSIBlockLimits bl = { 210 .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize 211 }; 212 213 memset(r->buf, 0, r->buflen); 214 stb_p(buf, s->type); 215 stb_p(buf + 1, 0xb0); 216 len = scsi_emulate_block_limits(buf + 4, &bl); 217 assert(len <= sizeof(buf) - 4); 218 stw_be_p(buf + 2, len); 219 220 memcpy(r->buf, buf, MIN(r->buflen, len + 4)); 221 222 r->io_header.sb_len_wr = 0; 223 224 /* 225 * We have valid contents in the reply buffer but the 226 * io_header can report a sense error coming from 227 * the hardware in scsi_command_complete_noio. Clean 228 * up the io_header to avoid reporting it. 229 */ 230 r->io_header.driver_status = 0; 231 r->io_header.status = 0; 232 233 return r->buflen; 234 } 235 236 static void scsi_read_complete(void * opaque, int ret) 237 { 238 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 239 SCSIDevice *s = r->req.dev; 240 int len; 241 242 assert(r->req.aiocb != NULL); 243 r->req.aiocb = NULL; 244 245 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 246 247 if (ret || r->req.io_canceled) { 248 scsi_command_complete_noio(r, ret); 249 goto done; 250 } 251 252 len = r->io_header.dxfer_len - r->io_header.resid; 253 trace_scsi_generic_read_complete(r->req.tag, len); 254 255 r->len = -1; 256 257 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { 258 SCSISense sense = 259 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr); 260 261 /* 262 * Check if this is a VPD Block Limits request that 263 * resulted in sense error but would need emulation. 264 * In this case, emulate a valid VPD response. 265 */ 266 if (sense.key == ILLEGAL_REQUEST && 267 s->needs_vpd_bl_emulation && 268 r->req.cmd.buf[0] == INQUIRY && 269 (r->req.cmd.buf[1] & 0x01) && 270 r->req.cmd.buf[2] == 0xb0) { 271 len = scsi_generic_emulate_block_limits(r, s); 272 /* 273 * It's okay to jup to req_complete: no need to 274 * let scsi_handle_inquiry_reply handle an 275 * INQUIRY VPD BL request we created manually. 276 */ 277 } 278 if (sense.key) { 279 goto req_complete; 280 } 281 } 282 283 if (len == 0) { 284 scsi_command_complete_noio(r, 0); 285 goto done; 286 } 287 288 /* Snoop READ CAPACITY output to set the blocksize. */ 289 if (r->req.cmd.buf[0] == READ_CAPACITY_10 && 290 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) { 291 s->blocksize = ldl_be_p(&r->buf[4]); 292 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL; 293 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 && 294 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 295 s->blocksize = ldl_be_p(&r->buf[8]); 296 s->max_lba = ldq_be_p(&r->buf[0]); 297 } 298 blk_set_guest_block_size(s->conf.blk, s->blocksize); 299 300 /* Patch MODE SENSE device specific parameters if the BDS is opened 301 * readonly. 302 */ 303 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE) && 304 blk_is_read_only(s->conf.blk) && 305 (r->req.cmd.buf[0] == MODE_SENSE || 306 r->req.cmd.buf[0] == MODE_SENSE_10) && 307 (r->req.cmd.buf[1] & 0x8) == 0) { 308 if (r->req.cmd.buf[0] == MODE_SENSE) { 309 r->buf[2] |= 0x80; 310 } else { 311 r->buf[3] |= 0x80; 312 } 313 } 314 if (r->req.cmd.buf[0] == INQUIRY) { 315 scsi_handle_inquiry_reply(r, s); 316 } 317 318 req_complete: 319 scsi_req_data(&r->req, len); 320 scsi_req_unref(&r->req); 321 322 done: 323 aio_context_release(blk_get_aio_context(s->conf.blk)); 324 } 325 326 /* Read more data from scsi device into buffer. */ 327 static void scsi_read_data(SCSIRequest *req) 328 { 329 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 330 SCSIDevice *s = r->req.dev; 331 int ret; 332 333 trace_scsi_generic_read_data(req->tag); 334 335 /* The request is used as the AIO opaque value, so add a ref. */ 336 scsi_req_ref(&r->req); 337 if (r->len == -1) { 338 scsi_command_complete_noio(r, 0); 339 return; 340 } 341 342 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV, 343 scsi_read_complete); 344 if (ret < 0) { 345 scsi_command_complete_noio(r, ret); 346 } 347 } 348 349 static void scsi_write_complete(void * opaque, int ret) 350 { 351 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 352 SCSIDevice *s = r->req.dev; 353 354 trace_scsi_generic_write_complete(ret); 355 356 assert(r->req.aiocb != NULL); 357 r->req.aiocb = NULL; 358 359 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 360 361 if (ret || r->req.io_canceled) { 362 scsi_command_complete_noio(r, ret); 363 goto done; 364 } 365 366 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 && 367 s->type == TYPE_TAPE) { 368 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11]; 369 trace_scsi_generic_write_complete_blocksize(s->blocksize); 370 } 371 372 scsi_command_complete_noio(r, ret); 373 374 done: 375 aio_context_release(blk_get_aio_context(s->conf.blk)); 376 } 377 378 /* Write data to a scsi device. Returns nonzero on failure. 379 The transfer may complete asynchronously. */ 380 static void scsi_write_data(SCSIRequest *req) 381 { 382 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 383 SCSIDevice *s = r->req.dev; 384 int ret; 385 386 trace_scsi_generic_write_data(req->tag); 387 if (r->len == 0) { 388 r->len = r->buflen; 389 scsi_req_data(&r->req, r->len); 390 return; 391 } 392 393 /* The request is used as the AIO opaque value, so add a ref. */ 394 scsi_req_ref(&r->req); 395 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete); 396 if (ret < 0) { 397 scsi_command_complete_noio(r, ret); 398 } 399 } 400 401 /* Return a pointer to the data buffer. */ 402 static uint8_t *scsi_get_buf(SCSIRequest *req) 403 { 404 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 405 406 return r->buf; 407 } 408 409 static void scsi_generic_command_dump(uint8_t *cmd, int len) 410 { 411 int i; 412 char *line_buffer, *p; 413 414 line_buffer = g_malloc(len * 5 + 1); 415 416 for (i = 0, p = line_buffer; i < len; i++) { 417 p += sprintf(p, " 0x%02x", cmd[i]); 418 } 419 trace_scsi_generic_send_command(line_buffer); 420 421 g_free(line_buffer); 422 } 423 424 /* Execute a scsi command. Returns the length of the data expected by the 425 command. This will be Positive for data transfers from the device 426 (eg. disk reads), negative for transfers to the device (eg. disk writes), 427 and zero if the command does not transfer any data. */ 428 429 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd) 430 { 431 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 432 SCSIDevice *s = r->req.dev; 433 int ret; 434 435 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) { 436 scsi_generic_command_dump(cmd, r->req.cmd.len); 437 } 438 439 if (r->req.cmd.xfer == 0) { 440 g_free(r->buf); 441 r->buflen = 0; 442 r->buf = NULL; 443 /* The request is used as the AIO opaque value, so add a ref. */ 444 scsi_req_ref(&r->req); 445 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE, 446 scsi_command_complete); 447 if (ret < 0) { 448 scsi_command_complete_noio(r, ret); 449 return 0; 450 } 451 return 0; 452 } 453 454 if (r->buflen != r->req.cmd.xfer) { 455 g_free(r->buf); 456 r->buf = g_malloc(r->req.cmd.xfer); 457 r->buflen = r->req.cmd.xfer; 458 } 459 460 memset(r->buf, 0, r->buflen); 461 r->len = r->req.cmd.xfer; 462 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 463 r->len = 0; 464 return -r->req.cmd.xfer; 465 } else { 466 return r->req.cmd.xfer; 467 } 468 } 469 470 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn) 471 { 472 int i; 473 474 if ((p[1] & 0xF) == 3) { 475 /* NAA designator type */ 476 if (p[3] != 8) { 477 return -EINVAL; 478 } 479 *p_wwn = ldq_be_p(p + 4); 480 return 0; 481 } 482 483 if ((p[1] & 0xF) == 8) { 484 /* SCSI name string designator type */ 485 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) { 486 return -EINVAL; 487 } 488 if (p[3] > 20 && p[24] != ',') { 489 return -EINVAL; 490 } 491 *p_wwn = 0; 492 for (i = 8; i < 24; i++) { 493 char c = qemu_toupper(p[i]); 494 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10); 495 *p_wwn = (*p_wwn << 4) | c; 496 } 497 return 0; 498 } 499 500 return -EINVAL; 501 } 502 503 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size, 504 uint8_t *buf, uint8_t buf_size) 505 { 506 sg_io_hdr_t io_header; 507 uint8_t sensebuf[8]; 508 int ret; 509 510 memset(&io_header, 0, sizeof(io_header)); 511 io_header.interface_id = 'S'; 512 io_header.dxfer_direction = SG_DXFER_FROM_DEV; 513 io_header.dxfer_len = buf_size; 514 io_header.dxferp = buf; 515 io_header.cmdp = cmd; 516 io_header.cmd_len = cmd_size; 517 io_header.mx_sb_len = sizeof(sensebuf); 518 io_header.sbp = sensebuf; 519 io_header.timeout = 6000; /* XXX */ 520 521 ret = blk_ioctl(blk, SG_IO, &io_header); 522 if (ret < 0 || io_header.driver_status || io_header.host_status) { 523 return -1; 524 } 525 return 0; 526 } 527 528 /* 529 * Executes an INQUIRY request with EVPD set to retrieve the 530 * available VPD pages of the device. If the device does 531 * not support the Block Limits page (page 0xb0), set 532 * the needs_vpd_bl_emulation flag for future use. 533 */ 534 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s) 535 { 536 uint8_t cmd[6]; 537 uint8_t buf[250]; 538 uint8_t page_len; 539 int ret, i; 540 541 memset(cmd, 0, sizeof(cmd)); 542 memset(buf, 0, sizeof(buf)); 543 cmd[0] = INQUIRY; 544 cmd[1] = 1; 545 cmd[2] = 0x00; 546 cmd[4] = sizeof(buf); 547 548 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 549 buf, sizeof(buf)); 550 if (ret < 0) { 551 /* 552 * Do not assume anything if we can't retrieve the 553 * INQUIRY response to assert the VPD Block Limits 554 * support. 555 */ 556 s->needs_vpd_bl_emulation = false; 557 return; 558 } 559 560 page_len = buf[3]; 561 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) { 562 if (buf[i] == 0xb0) { 563 s->needs_vpd_bl_emulation = false; 564 return; 565 } 566 } 567 s->needs_vpd_bl_emulation = true; 568 } 569 570 static void scsi_generic_read_device_identification(SCSIDevice *s) 571 { 572 uint8_t cmd[6]; 573 uint8_t buf[250]; 574 int ret; 575 int i, len; 576 577 memset(cmd, 0, sizeof(cmd)); 578 memset(buf, 0, sizeof(buf)); 579 cmd[0] = INQUIRY; 580 cmd[1] = 1; 581 cmd[2] = 0x83; 582 cmd[4] = sizeof(buf); 583 584 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 585 buf, sizeof(buf)); 586 if (ret < 0) { 587 return; 588 } 589 590 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4); 591 for (i = 0; i + 3 <= len; ) { 592 const uint8_t *p = &buf[i + 4]; 593 uint64_t wwn; 594 595 if (i + (p[3] + 4) > len) { 596 break; 597 } 598 599 if ((p[1] & 0x10) == 0) { 600 /* Associated with the logical unit */ 601 if (read_naa_id(p, &wwn) == 0) { 602 s->wwn = wwn; 603 } 604 } else if ((p[1] & 0x10) == 0x10) { 605 /* Associated with the target port */ 606 if (read_naa_id(p, &wwn) == 0) { 607 s->port_wwn = wwn; 608 } 609 } 610 611 i += p[3] + 4; 612 } 613 } 614 615 void scsi_generic_read_device_inquiry(SCSIDevice *s) 616 { 617 scsi_generic_read_device_identification(s); 618 if (s->type == TYPE_DISK) { 619 scsi_generic_set_vpd_bl_emulation(s); 620 } else { 621 s->needs_vpd_bl_emulation = false; 622 } 623 } 624 625 static int get_stream_blocksize(BlockBackend *blk) 626 { 627 uint8_t cmd[6]; 628 uint8_t buf[12]; 629 int ret; 630 631 memset(cmd, 0, sizeof(cmd)); 632 memset(buf, 0, sizeof(buf)); 633 cmd[0] = MODE_SENSE; 634 cmd[4] = sizeof(buf); 635 636 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf)); 637 if (ret < 0) { 638 return -1; 639 } 640 641 return (buf[9] << 16) | (buf[10] << 8) | buf[11]; 642 } 643 644 static void scsi_generic_reset(DeviceState *dev) 645 { 646 SCSIDevice *s = SCSI_DEVICE(dev); 647 648 s->scsi_version = s->default_scsi_version; 649 scsi_device_purge_requests(s, SENSE_CODE(RESET)); 650 } 651 652 static void scsi_generic_realize(SCSIDevice *s, Error **errp) 653 { 654 int rc; 655 int sg_version; 656 struct sg_scsi_id scsiid; 657 658 if (!s->conf.blk) { 659 error_setg(errp, "drive property not set"); 660 return; 661 } 662 663 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC) { 664 error_setg(errp, "Device doesn't support drive option werror"); 665 return; 666 } 667 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) { 668 error_setg(errp, "Device doesn't support drive option rerror"); 669 return; 670 } 671 672 /* check we are using a driver managing SG_IO (version 3 and after */ 673 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version); 674 if (rc < 0) { 675 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 676 if (rc != -EPERM) { 677 error_append_hint(errp, "Is this a SCSI device?\n"); 678 } 679 return; 680 } 681 if (sg_version < 30000) { 682 error_setg(errp, "scsi generic interface too old"); 683 return; 684 } 685 686 /* get LUN of the /dev/sg? */ 687 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) { 688 error_setg(errp, "SG_GET_SCSI_ID ioctl failed"); 689 return; 690 } 691 if (!blkconf_apply_backend_options(&s->conf, 692 blk_is_read_only(s->conf.blk), 693 true, errp)) { 694 return; 695 } 696 697 /* define device state */ 698 s->type = scsiid.scsi_type; 699 trace_scsi_generic_realize_type(s->type); 700 701 switch (s->type) { 702 case TYPE_TAPE: 703 s->blocksize = get_stream_blocksize(s->conf.blk); 704 if (s->blocksize == -1) { 705 s->blocksize = 0; 706 } 707 break; 708 709 /* Make a guess for block devices, we'll fix it when the guest sends. 710 * READ CAPACITY. If they don't, they likely would assume these sizes 711 * anyway. (TODO: they could also send MODE SENSE). 712 */ 713 case TYPE_ROM: 714 case TYPE_WORM: 715 s->blocksize = 2048; 716 break; 717 default: 718 s->blocksize = 512; 719 break; 720 } 721 722 trace_scsi_generic_realize_blocksize(s->blocksize); 723 724 /* Only used by scsi-block, but initialize it nevertheless to be clean. */ 725 s->default_scsi_version = -1; 726 scsi_generic_read_device_inquiry(s); 727 } 728 729 const SCSIReqOps scsi_generic_req_ops = { 730 .size = sizeof(SCSIGenericReq), 731 .free_req = scsi_free_request, 732 .send_command = scsi_send_command, 733 .read_data = scsi_read_data, 734 .write_data = scsi_write_data, 735 .get_buf = scsi_get_buf, 736 .load_request = scsi_generic_load_request, 737 .save_request = scsi_generic_save_request, 738 }; 739 740 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 741 uint8_t *buf, void *hba_private) 742 { 743 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private); 744 } 745 746 static Property scsi_generic_properties[] = { 747 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk), 748 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false), 749 DEFINE_PROP_END_OF_LIST(), 750 }; 751 752 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, 753 uint8_t *buf, void *hba_private) 754 { 755 return scsi_bus_parse_cdb(dev, cmd, buf, hba_private); 756 } 757 758 static void scsi_generic_class_initfn(ObjectClass *klass, void *data) 759 { 760 DeviceClass *dc = DEVICE_CLASS(klass); 761 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 762 763 sc->realize = scsi_generic_realize; 764 sc->alloc_req = scsi_new_request; 765 sc->parse_cdb = scsi_generic_parse_cdb; 766 dc->fw_name = "disk"; 767 dc->desc = "pass through generic scsi device (/dev/sg*)"; 768 dc->reset = scsi_generic_reset; 769 dc->props = scsi_generic_properties; 770 dc->vmsd = &vmstate_scsi_device; 771 } 772 773 static const TypeInfo scsi_generic_info = { 774 .name = "scsi-generic", 775 .parent = TYPE_SCSI_DEVICE, 776 .instance_size = sizeof(SCSIDevice), 777 .class_init = scsi_generic_class_initfn, 778 }; 779 780 static void scsi_generic_register_types(void) 781 { 782 type_register_static(&scsi_generic_info); 783 } 784 785 type_init(scsi_generic_register_types) 786 787 #endif /* __linux__ */ 788