1 /* 2 * Generic SCSI Device support 3 * 4 * Copyright (c) 2007 Bull S.A.S. 5 * Based on code by Paul Brook 6 * Based on code by Fabrice Bellard 7 * 8 * Written by Laurent Vivier <Laurent.Vivier@bull.net> 9 * 10 * This code is licensed under the LGPL. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qemu/ctype.h" 17 #include "qemu/error-report.h" 18 #include "qemu/module.h" 19 #include "hw/scsi/scsi.h" 20 #include "migration/qemu-file-types.h" 21 #include "hw/scsi/emulation.h" 22 #include "sysemu/block-backend.h" 23 #include "trace.h" 24 25 #ifdef __linux__ 26 27 #include <scsi/sg.h> 28 #include "scsi/constants.h" 29 30 #ifndef MAX_UINT 31 #define MAX_UINT ((unsigned int)-1) 32 #endif 33 34 typedef struct SCSIGenericReq { 35 SCSIRequest req; 36 uint8_t *buf; 37 int buflen; 38 int len; 39 sg_io_hdr_t io_header; 40 } SCSIGenericReq; 41 42 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req) 43 { 44 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 45 46 qemu_put_sbe32s(f, &r->buflen); 47 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 48 assert(!r->req.sg); 49 qemu_put_buffer(f, r->buf, r->req.cmd.xfer); 50 } 51 } 52 53 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req) 54 { 55 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 56 57 qemu_get_sbe32s(f, &r->buflen); 58 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 59 assert(!r->req.sg); 60 qemu_get_buffer(f, r->buf, r->req.cmd.xfer); 61 } 62 } 63 64 static void scsi_free_request(SCSIRequest *req) 65 { 66 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 67 68 g_free(r->buf); 69 } 70 71 /* Helper function for command completion. */ 72 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret) 73 { 74 int status; 75 SCSISense sense; 76 77 assert(r->req.aiocb == NULL); 78 79 if (r->req.io_canceled) { 80 scsi_req_cancel_complete(&r->req); 81 goto done; 82 } 83 status = sg_io_sense_from_errno(-ret, &r->io_header, &sense); 84 if (status == CHECK_CONDITION) { 85 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { 86 r->req.sense_len = r->io_header.sb_len_wr; 87 } else { 88 scsi_req_build_sense(&r->req, sense); 89 } 90 } 91 92 trace_scsi_generic_command_complete_noio(r, r->req.tag, status); 93 94 scsi_req_complete(&r->req, status); 95 done: 96 scsi_req_unref(&r->req); 97 } 98 99 static void scsi_command_complete(void *opaque, int ret) 100 { 101 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 102 SCSIDevice *s = r->req.dev; 103 104 assert(r->req.aiocb != NULL); 105 r->req.aiocb = NULL; 106 107 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 108 scsi_command_complete_noio(r, ret); 109 aio_context_release(blk_get_aio_context(s->conf.blk)); 110 } 111 112 static int execute_command(BlockBackend *blk, 113 SCSIGenericReq *r, int direction, 114 BlockCompletionFunc *complete) 115 { 116 r->io_header.interface_id = 'S'; 117 r->io_header.dxfer_direction = direction; 118 r->io_header.dxferp = r->buf; 119 r->io_header.dxfer_len = r->buflen; 120 r->io_header.cmdp = r->req.cmd.buf; 121 r->io_header.cmd_len = r->req.cmd.len; 122 r->io_header.mx_sb_len = sizeof(r->req.sense); 123 r->io_header.sbp = r->req.sense; 124 r->io_header.timeout = MAX_UINT; 125 r->io_header.usr_ptr = r; 126 r->io_header.flags |= SG_FLAG_DIRECT_IO; 127 128 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r); 129 if (r->req.aiocb == NULL) { 130 return -EIO; 131 } 132 133 return 0; 134 } 135 136 static void scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s) 137 { 138 uint8_t page, page_idx; 139 140 /* 141 * EVPD set to zero returns the standard INQUIRY data. 142 * 143 * Check if scsi_version is unset (-1) to avoid re-defining it 144 * each time an INQUIRY with standard data is received. 145 * scsi_version is initialized with -1 in scsi_generic_reset 146 * and scsi_disk_reset, making sure that we'll set the 147 * scsi_version after a reset. If the version field of the 148 * INQUIRY response somehow changes after a guest reboot, 149 * we'll be able to keep track of it. 150 * 151 * On SCSI-2 and older, first 3 bits of byte 2 is the 152 * ANSI-approved version, while on later versions the 153 * whole byte 2 contains the version. Check if we're dealing 154 * with a newer version and, in that case, assign the 155 * whole byte. 156 */ 157 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) { 158 s->scsi_version = r->buf[2] & 0x07; 159 if (s->scsi_version > 2) { 160 s->scsi_version = r->buf[2]; 161 } 162 } 163 164 if (s->type == TYPE_DISK && (r->req.cmd.buf[1] & 0x01)) { 165 page = r->req.cmd.buf[2]; 166 if (page == 0xb0) { 167 uint32_t max_transfer = 168 blk_get_max_transfer(s->conf.blk) / s->blocksize; 169 170 assert(max_transfer); 171 stl_be_p(&r->buf[8], max_transfer); 172 /* Also take care of the opt xfer len. */ 173 stl_be_p(&r->buf[12], 174 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); 175 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { 176 /* 177 * Now we're capable of supplying the VPD Block Limits 178 * response if the hardware can't. Add it in the INQUIRY 179 * Supported VPD pages response in case we are using the 180 * emulation for this device. 181 * 182 * This way, the guest kernel will be aware of the support 183 * and will use it to proper setup the SCSI device. 184 * 185 * VPD page numbers must be sorted, so insert 0xb0 at the 186 * right place with an in-place insert. When the while loop 187 * begins the device response is at r[0] to r[page_idx - 1]. 188 */ 189 page_idx = lduw_be_p(r->buf + 2) + 4; 190 page_idx = MIN(page_idx, r->buflen); 191 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) { 192 if (page_idx < r->buflen) { 193 r->buf[page_idx] = r->buf[page_idx - 1]; 194 } 195 page_idx--; 196 } 197 if (page_idx < r->buflen) { 198 r->buf[page_idx] = 0xb0; 199 } 200 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1); 201 } 202 } 203 } 204 205 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s) 206 { 207 int len; 208 uint8_t buf[64]; 209 210 SCSIBlockLimits bl = { 211 .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize 212 }; 213 214 memset(r->buf, 0, r->buflen); 215 stb_p(buf, s->type); 216 stb_p(buf + 1, 0xb0); 217 len = scsi_emulate_block_limits(buf + 4, &bl); 218 assert(len <= sizeof(buf) - 4); 219 stw_be_p(buf + 2, len); 220 221 memcpy(r->buf, buf, MIN(r->buflen, len + 4)); 222 223 r->io_header.sb_len_wr = 0; 224 225 /* 226 * We have valid contents in the reply buffer but the 227 * io_header can report a sense error coming from 228 * the hardware in scsi_command_complete_noio. Clean 229 * up the io_header to avoid reporting it. 230 */ 231 r->io_header.driver_status = 0; 232 r->io_header.status = 0; 233 234 return r->buflen; 235 } 236 237 static void scsi_read_complete(void * opaque, int ret) 238 { 239 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 240 SCSIDevice *s = r->req.dev; 241 int len; 242 243 assert(r->req.aiocb != NULL); 244 r->req.aiocb = NULL; 245 246 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 247 248 if (ret || r->req.io_canceled) { 249 scsi_command_complete_noio(r, ret); 250 goto done; 251 } 252 253 len = r->io_header.dxfer_len - r->io_header.resid; 254 trace_scsi_generic_read_complete(r->req.tag, len); 255 256 r->len = -1; 257 258 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { 259 SCSISense sense = 260 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr); 261 262 /* 263 * Check if this is a VPD Block Limits request that 264 * resulted in sense error but would need emulation. 265 * In this case, emulate a valid VPD response. 266 */ 267 if (sense.key == ILLEGAL_REQUEST && 268 s->needs_vpd_bl_emulation && 269 r->req.cmd.buf[0] == INQUIRY && 270 (r->req.cmd.buf[1] & 0x01) && 271 r->req.cmd.buf[2] == 0xb0) { 272 len = scsi_generic_emulate_block_limits(r, s); 273 /* 274 * It's okay to jup to req_complete: no need to 275 * let scsi_handle_inquiry_reply handle an 276 * INQUIRY VPD BL request we created manually. 277 */ 278 } 279 if (sense.key) { 280 goto req_complete; 281 } 282 } 283 284 if (len == 0) { 285 scsi_command_complete_noio(r, 0); 286 goto done; 287 } 288 289 /* Snoop READ CAPACITY output to set the blocksize. */ 290 if (r->req.cmd.buf[0] == READ_CAPACITY_10 && 291 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) { 292 s->blocksize = ldl_be_p(&r->buf[4]); 293 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL; 294 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 && 295 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 296 s->blocksize = ldl_be_p(&r->buf[8]); 297 s->max_lba = ldq_be_p(&r->buf[0]); 298 } 299 blk_set_guest_block_size(s->conf.blk, s->blocksize); 300 301 /* Patch MODE SENSE device specific parameters if the BDS is opened 302 * readonly. 303 */ 304 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE) && 305 blk_is_read_only(s->conf.blk) && 306 (r->req.cmd.buf[0] == MODE_SENSE || 307 r->req.cmd.buf[0] == MODE_SENSE_10) && 308 (r->req.cmd.buf[1] & 0x8) == 0) { 309 if (r->req.cmd.buf[0] == MODE_SENSE) { 310 r->buf[2] |= 0x80; 311 } else { 312 r->buf[3] |= 0x80; 313 } 314 } 315 if (r->req.cmd.buf[0] == INQUIRY) { 316 scsi_handle_inquiry_reply(r, s); 317 } 318 319 req_complete: 320 scsi_req_data(&r->req, len); 321 scsi_req_unref(&r->req); 322 323 done: 324 aio_context_release(blk_get_aio_context(s->conf.blk)); 325 } 326 327 /* Read more data from scsi device into buffer. */ 328 static void scsi_read_data(SCSIRequest *req) 329 { 330 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 331 SCSIDevice *s = r->req.dev; 332 int ret; 333 334 trace_scsi_generic_read_data(req->tag); 335 336 /* The request is used as the AIO opaque value, so add a ref. */ 337 scsi_req_ref(&r->req); 338 if (r->len == -1) { 339 scsi_command_complete_noio(r, 0); 340 return; 341 } 342 343 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV, 344 scsi_read_complete); 345 if (ret < 0) { 346 scsi_command_complete_noio(r, ret); 347 } 348 } 349 350 static void scsi_write_complete(void * opaque, int ret) 351 { 352 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 353 SCSIDevice *s = r->req.dev; 354 355 trace_scsi_generic_write_complete(ret); 356 357 assert(r->req.aiocb != NULL); 358 r->req.aiocb = NULL; 359 360 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 361 362 if (ret || r->req.io_canceled) { 363 scsi_command_complete_noio(r, ret); 364 goto done; 365 } 366 367 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 && 368 s->type == TYPE_TAPE) { 369 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11]; 370 trace_scsi_generic_write_complete_blocksize(s->blocksize); 371 } 372 373 scsi_command_complete_noio(r, ret); 374 375 done: 376 aio_context_release(blk_get_aio_context(s->conf.blk)); 377 } 378 379 /* Write data to a scsi device. Returns nonzero on failure. 380 The transfer may complete asynchronously. */ 381 static void scsi_write_data(SCSIRequest *req) 382 { 383 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 384 SCSIDevice *s = r->req.dev; 385 int ret; 386 387 trace_scsi_generic_write_data(req->tag); 388 if (r->len == 0) { 389 r->len = r->buflen; 390 scsi_req_data(&r->req, r->len); 391 return; 392 } 393 394 /* The request is used as the AIO opaque value, so add a ref. */ 395 scsi_req_ref(&r->req); 396 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete); 397 if (ret < 0) { 398 scsi_command_complete_noio(r, ret); 399 } 400 } 401 402 /* Return a pointer to the data buffer. */ 403 static uint8_t *scsi_get_buf(SCSIRequest *req) 404 { 405 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 406 407 return r->buf; 408 } 409 410 static void scsi_generic_command_dump(uint8_t *cmd, int len) 411 { 412 int i; 413 char *line_buffer, *p; 414 415 line_buffer = g_malloc(len * 5 + 1); 416 417 for (i = 0, p = line_buffer; i < len; i++) { 418 p += sprintf(p, " 0x%02x", cmd[i]); 419 } 420 trace_scsi_generic_send_command(line_buffer); 421 422 g_free(line_buffer); 423 } 424 425 /* Execute a scsi command. Returns the length of the data expected by the 426 command. This will be Positive for data transfers from the device 427 (eg. disk reads), negative for transfers to the device (eg. disk writes), 428 and zero if the command does not transfer any data. */ 429 430 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd) 431 { 432 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 433 SCSIDevice *s = r->req.dev; 434 int ret; 435 436 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) { 437 scsi_generic_command_dump(cmd, r->req.cmd.len); 438 } 439 440 if (r->req.cmd.xfer == 0) { 441 g_free(r->buf); 442 r->buflen = 0; 443 r->buf = NULL; 444 /* The request is used as the AIO opaque value, so add a ref. */ 445 scsi_req_ref(&r->req); 446 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE, 447 scsi_command_complete); 448 if (ret < 0) { 449 scsi_command_complete_noio(r, ret); 450 return 0; 451 } 452 return 0; 453 } 454 455 if (r->buflen != r->req.cmd.xfer) { 456 g_free(r->buf); 457 r->buf = g_malloc(r->req.cmd.xfer); 458 r->buflen = r->req.cmd.xfer; 459 } 460 461 memset(r->buf, 0, r->buflen); 462 r->len = r->req.cmd.xfer; 463 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 464 r->len = 0; 465 return -r->req.cmd.xfer; 466 } else { 467 return r->req.cmd.xfer; 468 } 469 } 470 471 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn) 472 { 473 int i; 474 475 if ((p[1] & 0xF) == 3) { 476 /* NAA designator type */ 477 if (p[3] != 8) { 478 return -EINVAL; 479 } 480 *p_wwn = ldq_be_p(p + 4); 481 return 0; 482 } 483 484 if ((p[1] & 0xF) == 8) { 485 /* SCSI name string designator type */ 486 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) { 487 return -EINVAL; 488 } 489 if (p[3] > 20 && p[24] != ',') { 490 return -EINVAL; 491 } 492 *p_wwn = 0; 493 for (i = 8; i < 24; i++) { 494 char c = qemu_toupper(p[i]); 495 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10); 496 *p_wwn = (*p_wwn << 4) | c; 497 } 498 return 0; 499 } 500 501 return -EINVAL; 502 } 503 504 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size, 505 uint8_t *buf, uint8_t buf_size) 506 { 507 sg_io_hdr_t io_header; 508 uint8_t sensebuf[8]; 509 int ret; 510 511 memset(&io_header, 0, sizeof(io_header)); 512 io_header.interface_id = 'S'; 513 io_header.dxfer_direction = SG_DXFER_FROM_DEV; 514 io_header.dxfer_len = buf_size; 515 io_header.dxferp = buf; 516 io_header.cmdp = cmd; 517 io_header.cmd_len = cmd_size; 518 io_header.mx_sb_len = sizeof(sensebuf); 519 io_header.sbp = sensebuf; 520 io_header.timeout = 6000; /* XXX */ 521 522 ret = blk_ioctl(blk, SG_IO, &io_header); 523 if (ret < 0 || io_header.driver_status || io_header.host_status) { 524 return -1; 525 } 526 return 0; 527 } 528 529 /* 530 * Executes an INQUIRY request with EVPD set to retrieve the 531 * available VPD pages of the device. If the device does 532 * not support the Block Limits page (page 0xb0), set 533 * the needs_vpd_bl_emulation flag for future use. 534 */ 535 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s) 536 { 537 uint8_t cmd[6]; 538 uint8_t buf[250]; 539 uint8_t page_len; 540 int ret, i; 541 542 memset(cmd, 0, sizeof(cmd)); 543 memset(buf, 0, sizeof(buf)); 544 cmd[0] = INQUIRY; 545 cmd[1] = 1; 546 cmd[2] = 0x00; 547 cmd[4] = sizeof(buf); 548 549 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 550 buf, sizeof(buf)); 551 if (ret < 0) { 552 /* 553 * Do not assume anything if we can't retrieve the 554 * INQUIRY response to assert the VPD Block Limits 555 * support. 556 */ 557 s->needs_vpd_bl_emulation = false; 558 return; 559 } 560 561 page_len = buf[3]; 562 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) { 563 if (buf[i] == 0xb0) { 564 s->needs_vpd_bl_emulation = false; 565 return; 566 } 567 } 568 s->needs_vpd_bl_emulation = true; 569 } 570 571 static void scsi_generic_read_device_identification(SCSIDevice *s) 572 { 573 uint8_t cmd[6]; 574 uint8_t buf[250]; 575 int ret; 576 int i, len; 577 578 memset(cmd, 0, sizeof(cmd)); 579 memset(buf, 0, sizeof(buf)); 580 cmd[0] = INQUIRY; 581 cmd[1] = 1; 582 cmd[2] = 0x83; 583 cmd[4] = sizeof(buf); 584 585 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 586 buf, sizeof(buf)); 587 if (ret < 0) { 588 return; 589 } 590 591 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4); 592 for (i = 0; i + 3 <= len; ) { 593 const uint8_t *p = &buf[i + 4]; 594 uint64_t wwn; 595 596 if (i + (p[3] + 4) > len) { 597 break; 598 } 599 600 if ((p[1] & 0x10) == 0) { 601 /* Associated with the logical unit */ 602 if (read_naa_id(p, &wwn) == 0) { 603 s->wwn = wwn; 604 } 605 } else if ((p[1] & 0x10) == 0x10) { 606 /* Associated with the target port */ 607 if (read_naa_id(p, &wwn) == 0) { 608 s->port_wwn = wwn; 609 } 610 } 611 612 i += p[3] + 4; 613 } 614 } 615 616 void scsi_generic_read_device_inquiry(SCSIDevice *s) 617 { 618 scsi_generic_read_device_identification(s); 619 if (s->type == TYPE_DISK) { 620 scsi_generic_set_vpd_bl_emulation(s); 621 } else { 622 s->needs_vpd_bl_emulation = false; 623 } 624 } 625 626 static int get_stream_blocksize(BlockBackend *blk) 627 { 628 uint8_t cmd[6]; 629 uint8_t buf[12]; 630 int ret; 631 632 memset(cmd, 0, sizeof(cmd)); 633 memset(buf, 0, sizeof(buf)); 634 cmd[0] = MODE_SENSE; 635 cmd[4] = sizeof(buf); 636 637 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf)); 638 if (ret < 0) { 639 return -1; 640 } 641 642 return (buf[9] << 16) | (buf[10] << 8) | buf[11]; 643 } 644 645 static void scsi_generic_reset(DeviceState *dev) 646 { 647 SCSIDevice *s = SCSI_DEVICE(dev); 648 649 s->scsi_version = s->default_scsi_version; 650 scsi_device_purge_requests(s, SENSE_CODE(RESET)); 651 } 652 653 static void scsi_generic_realize(SCSIDevice *s, Error **errp) 654 { 655 int rc; 656 int sg_version; 657 struct sg_scsi_id scsiid; 658 659 if (!s->conf.blk) { 660 error_setg(errp, "drive property not set"); 661 return; 662 } 663 664 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC) { 665 error_setg(errp, "Device doesn't support drive option werror"); 666 return; 667 } 668 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) { 669 error_setg(errp, "Device doesn't support drive option rerror"); 670 return; 671 } 672 673 /* check we are using a driver managing SG_IO (version 3 and after */ 674 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version); 675 if (rc < 0) { 676 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 677 if (rc != -EPERM) { 678 error_append_hint(errp, "Is this a SCSI device?\n"); 679 } 680 return; 681 } 682 if (sg_version < 30000) { 683 error_setg(errp, "scsi generic interface too old"); 684 return; 685 } 686 687 /* get LUN of the /dev/sg? */ 688 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) { 689 error_setg(errp, "SG_GET_SCSI_ID ioctl failed"); 690 return; 691 } 692 if (!blkconf_apply_backend_options(&s->conf, 693 blk_is_read_only(s->conf.blk), 694 true, errp)) { 695 return; 696 } 697 698 /* define device state */ 699 s->type = scsiid.scsi_type; 700 trace_scsi_generic_realize_type(s->type); 701 702 switch (s->type) { 703 case TYPE_TAPE: 704 s->blocksize = get_stream_blocksize(s->conf.blk); 705 if (s->blocksize == -1) { 706 s->blocksize = 0; 707 } 708 break; 709 710 /* Make a guess for block devices, we'll fix it when the guest sends. 711 * READ CAPACITY. If they don't, they likely would assume these sizes 712 * anyway. (TODO: they could also send MODE SENSE). 713 */ 714 case TYPE_ROM: 715 case TYPE_WORM: 716 s->blocksize = 2048; 717 break; 718 default: 719 s->blocksize = 512; 720 break; 721 } 722 723 trace_scsi_generic_realize_blocksize(s->blocksize); 724 725 /* Only used by scsi-block, but initialize it nevertheless to be clean. */ 726 s->default_scsi_version = -1; 727 scsi_generic_read_device_inquiry(s); 728 } 729 730 const SCSIReqOps scsi_generic_req_ops = { 731 .size = sizeof(SCSIGenericReq), 732 .free_req = scsi_free_request, 733 .send_command = scsi_send_command, 734 .read_data = scsi_read_data, 735 .write_data = scsi_write_data, 736 .get_buf = scsi_get_buf, 737 .load_request = scsi_generic_load_request, 738 .save_request = scsi_generic_save_request, 739 }; 740 741 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 742 uint8_t *buf, void *hba_private) 743 { 744 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private); 745 } 746 747 static Property scsi_generic_properties[] = { 748 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk), 749 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false), 750 DEFINE_PROP_END_OF_LIST(), 751 }; 752 753 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, 754 uint8_t *buf, void *hba_private) 755 { 756 return scsi_bus_parse_cdb(dev, cmd, buf, hba_private); 757 } 758 759 static void scsi_generic_class_initfn(ObjectClass *klass, void *data) 760 { 761 DeviceClass *dc = DEVICE_CLASS(klass); 762 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 763 764 sc->realize = scsi_generic_realize; 765 sc->alloc_req = scsi_new_request; 766 sc->parse_cdb = scsi_generic_parse_cdb; 767 dc->fw_name = "disk"; 768 dc->desc = "pass through generic scsi device (/dev/sg*)"; 769 dc->reset = scsi_generic_reset; 770 dc->props = scsi_generic_properties; 771 dc->vmsd = &vmstate_scsi_device; 772 } 773 774 static const TypeInfo scsi_generic_info = { 775 .name = "scsi-generic", 776 .parent = TYPE_SCSI_DEVICE, 777 .instance_size = sizeof(SCSIDevice), 778 .class_init = scsi_generic_class_initfn, 779 }; 780 781 static void scsi_generic_register_types(void) 782 { 783 type_register_static(&scsi_generic_info); 784 } 785 786 type_init(scsi_generic_register_types) 787 788 #endif /* __linux__ */ 789