1 /* 2 * Generic SCSI Device support 3 * 4 * Copyright (c) 2007 Bull S.A.S. 5 * Based on code by Paul Brook 6 * Based on code by Fabrice Bellard 7 * 8 * Written by Laurent Vivier <Laurent.Vivier@bull.net> 9 * 10 * This code is licensed under the LGPL. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qemu/ctype.h" 17 #include "qemu/error-report.h" 18 #include "qemu/module.h" 19 #include "hw/scsi/scsi.h" 20 #include "hw/scsi/emulation.h" 21 #include "sysemu/block-backend.h" 22 #include "trace.h" 23 24 #ifdef __linux__ 25 26 #include <scsi/sg.h> 27 #include "scsi/constants.h" 28 29 #ifndef MAX_UINT 30 #define MAX_UINT ((unsigned int)-1) 31 #endif 32 33 typedef struct SCSIGenericReq { 34 SCSIRequest req; 35 uint8_t *buf; 36 int buflen; 37 int len; 38 sg_io_hdr_t io_header; 39 } SCSIGenericReq; 40 41 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req) 42 { 43 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 44 45 qemu_put_sbe32s(f, &r->buflen); 46 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 47 assert(!r->req.sg); 48 qemu_put_buffer(f, r->buf, r->req.cmd.xfer); 49 } 50 } 51 52 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req) 53 { 54 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 55 56 qemu_get_sbe32s(f, &r->buflen); 57 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 58 assert(!r->req.sg); 59 qemu_get_buffer(f, r->buf, r->req.cmd.xfer); 60 } 61 } 62 63 static void scsi_free_request(SCSIRequest *req) 64 { 65 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 66 67 g_free(r->buf); 68 } 69 70 /* Helper function for command completion. */ 71 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret) 72 { 73 int status; 74 SCSISense sense; 75 76 assert(r->req.aiocb == NULL); 77 78 if (r->req.io_canceled) { 79 scsi_req_cancel_complete(&r->req); 80 goto done; 81 } 82 status = sg_io_sense_from_errno(-ret, &r->io_header, &sense); 83 if (status == CHECK_CONDITION) { 84 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { 85 r->req.sense_len = r->io_header.sb_len_wr; 86 } else { 87 scsi_req_build_sense(&r->req, sense); 88 } 89 } 90 91 trace_scsi_generic_command_complete_noio(r, r->req.tag, status); 92 93 scsi_req_complete(&r->req, status); 94 done: 95 scsi_req_unref(&r->req); 96 } 97 98 static void scsi_command_complete(void *opaque, int ret) 99 { 100 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 101 SCSIDevice *s = r->req.dev; 102 103 assert(r->req.aiocb != NULL); 104 r->req.aiocb = NULL; 105 106 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 107 scsi_command_complete_noio(r, ret); 108 aio_context_release(blk_get_aio_context(s->conf.blk)); 109 } 110 111 static int execute_command(BlockBackend *blk, 112 SCSIGenericReq *r, int direction, 113 BlockCompletionFunc *complete) 114 { 115 r->io_header.interface_id = 'S'; 116 r->io_header.dxfer_direction = direction; 117 r->io_header.dxferp = r->buf; 118 r->io_header.dxfer_len = r->buflen; 119 r->io_header.cmdp = r->req.cmd.buf; 120 r->io_header.cmd_len = r->req.cmd.len; 121 r->io_header.mx_sb_len = sizeof(r->req.sense); 122 r->io_header.sbp = r->req.sense; 123 r->io_header.timeout = MAX_UINT; 124 r->io_header.usr_ptr = r; 125 r->io_header.flags |= SG_FLAG_DIRECT_IO; 126 127 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r); 128 if (r->req.aiocb == NULL) { 129 return -EIO; 130 } 131 132 return 0; 133 } 134 135 static void scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s) 136 { 137 uint8_t page, page_idx; 138 139 /* 140 * EVPD set to zero returns the standard INQUIRY data. 141 * 142 * Check if scsi_version is unset (-1) to avoid re-defining it 143 * each time an INQUIRY with standard data is received. 144 * scsi_version is initialized with -1 in scsi_generic_reset 145 * and scsi_disk_reset, making sure that we'll set the 146 * scsi_version after a reset. If the version field of the 147 * INQUIRY response somehow changes after a guest reboot, 148 * we'll be able to keep track of it. 149 * 150 * On SCSI-2 and older, first 3 bits of byte 2 is the 151 * ANSI-approved version, while on later versions the 152 * whole byte 2 contains the version. Check if we're dealing 153 * with a newer version and, in that case, assign the 154 * whole byte. 155 */ 156 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) { 157 s->scsi_version = r->buf[2] & 0x07; 158 if (s->scsi_version > 2) { 159 s->scsi_version = r->buf[2]; 160 } 161 } 162 163 if (s->type == TYPE_DISK && (r->req.cmd.buf[1] & 0x01)) { 164 page = r->req.cmd.buf[2]; 165 if (page == 0xb0) { 166 uint32_t max_transfer = 167 blk_get_max_transfer(s->conf.blk) / s->blocksize; 168 169 assert(max_transfer); 170 stl_be_p(&r->buf[8], max_transfer); 171 /* Also take care of the opt xfer len. */ 172 stl_be_p(&r->buf[12], 173 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); 174 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { 175 /* 176 * Now we're capable of supplying the VPD Block Limits 177 * response if the hardware can't. Add it in the INQUIRY 178 * Supported VPD pages response in case we are using the 179 * emulation for this device. 180 * 181 * This way, the guest kernel will be aware of the support 182 * and will use it to proper setup the SCSI device. 183 * 184 * VPD page numbers must be sorted, so insert 0xb0 at the 185 * right place with an in-place insert. When the while loop 186 * begins the device response is at r[0] to r[page_idx - 1]. 187 */ 188 page_idx = lduw_be_p(r->buf + 2) + 4; 189 page_idx = MIN(page_idx, r->buflen); 190 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) { 191 if (page_idx < r->buflen) { 192 r->buf[page_idx] = r->buf[page_idx - 1]; 193 } 194 page_idx--; 195 } 196 if (page_idx < r->buflen) { 197 r->buf[page_idx] = 0xb0; 198 } 199 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1); 200 } 201 } 202 } 203 204 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s) 205 { 206 int len; 207 uint8_t buf[64]; 208 209 SCSIBlockLimits bl = { 210 .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize 211 }; 212 213 memset(r->buf, 0, r->buflen); 214 stb_p(buf, s->type); 215 stb_p(buf + 1, 0xb0); 216 len = scsi_emulate_block_limits(buf + 4, &bl); 217 assert(len <= sizeof(buf) - 4); 218 stw_be_p(buf + 2, len); 219 220 memcpy(r->buf, buf, MIN(r->buflen, len + 4)); 221 222 r->io_header.sb_len_wr = 0; 223 224 /* 225 * We have valid contents in the reply buffer but the 226 * io_header can report a sense error coming from 227 * the hardware in scsi_command_complete_noio. Clean 228 * up the io_header to avoid reporting it. 229 */ 230 r->io_header.driver_status = 0; 231 r->io_header.status = 0; 232 233 return r->buflen; 234 } 235 236 static void scsi_read_complete(void * opaque, int ret) 237 { 238 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 239 SCSIDevice *s = r->req.dev; 240 int len; 241 242 assert(r->req.aiocb != NULL); 243 r->req.aiocb = NULL; 244 245 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 246 247 if (ret || r->req.io_canceled) { 248 scsi_command_complete_noio(r, ret); 249 goto done; 250 } 251 252 len = r->io_header.dxfer_len - r->io_header.resid; 253 trace_scsi_generic_read_complete(r->req.tag, len); 254 255 r->len = -1; 256 257 /* 258 * Check if this is a VPD Block Limits request that 259 * resulted in sense error but would need emulation. 260 * In this case, emulate a valid VPD response. 261 */ 262 if (s->needs_vpd_bl_emulation && ret == 0 && 263 (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) && 264 r->req.cmd.buf[0] == INQUIRY && 265 (r->req.cmd.buf[1] & 0x01) && 266 r->req.cmd.buf[2] == 0xb0) { 267 SCSISense sense = 268 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr); 269 if (sense.key == ILLEGAL_REQUEST) { 270 len = scsi_generic_emulate_block_limits(r, s); 271 /* 272 * No need to let scsi_read_complete go on and handle an 273 * INQUIRY VPD BL request we created manually. 274 */ 275 goto req_complete; 276 } 277 } 278 279 if (len == 0) { 280 scsi_command_complete_noio(r, 0); 281 goto done; 282 } 283 284 /* Snoop READ CAPACITY output to set the blocksize. */ 285 if (r->req.cmd.buf[0] == READ_CAPACITY_10 && 286 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) { 287 s->blocksize = ldl_be_p(&r->buf[4]); 288 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL; 289 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 && 290 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 291 s->blocksize = ldl_be_p(&r->buf[8]); 292 s->max_lba = ldq_be_p(&r->buf[0]); 293 } 294 blk_set_guest_block_size(s->conf.blk, s->blocksize); 295 296 /* Patch MODE SENSE device specific parameters if the BDS is opened 297 * readonly. 298 */ 299 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE) && 300 blk_is_read_only(s->conf.blk) && 301 (r->req.cmd.buf[0] == MODE_SENSE || 302 r->req.cmd.buf[0] == MODE_SENSE_10) && 303 (r->req.cmd.buf[1] & 0x8) == 0) { 304 if (r->req.cmd.buf[0] == MODE_SENSE) { 305 r->buf[2] |= 0x80; 306 } else { 307 r->buf[3] |= 0x80; 308 } 309 } 310 if (r->req.cmd.buf[0] == INQUIRY) { 311 scsi_handle_inquiry_reply(r, s); 312 } 313 314 req_complete: 315 scsi_req_data(&r->req, len); 316 scsi_req_unref(&r->req); 317 318 done: 319 aio_context_release(blk_get_aio_context(s->conf.blk)); 320 } 321 322 /* Read more data from scsi device into buffer. */ 323 static void scsi_read_data(SCSIRequest *req) 324 { 325 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 326 SCSIDevice *s = r->req.dev; 327 int ret; 328 329 trace_scsi_generic_read_data(req->tag); 330 331 /* The request is used as the AIO opaque value, so add a ref. */ 332 scsi_req_ref(&r->req); 333 if (r->len == -1) { 334 scsi_command_complete_noio(r, 0); 335 return; 336 } 337 338 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV, 339 scsi_read_complete); 340 if (ret < 0) { 341 scsi_command_complete_noio(r, ret); 342 } 343 } 344 345 static void scsi_write_complete(void * opaque, int ret) 346 { 347 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 348 SCSIDevice *s = r->req.dev; 349 350 trace_scsi_generic_write_complete(ret); 351 352 assert(r->req.aiocb != NULL); 353 r->req.aiocb = NULL; 354 355 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 356 357 if (ret || r->req.io_canceled) { 358 scsi_command_complete_noio(r, ret); 359 goto done; 360 } 361 362 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 && 363 s->type == TYPE_TAPE) { 364 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11]; 365 trace_scsi_generic_write_complete_blocksize(s->blocksize); 366 } 367 368 scsi_command_complete_noio(r, ret); 369 370 done: 371 aio_context_release(blk_get_aio_context(s->conf.blk)); 372 } 373 374 /* Write data to a scsi device. Returns nonzero on failure. 375 The transfer may complete asynchronously. */ 376 static void scsi_write_data(SCSIRequest *req) 377 { 378 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 379 SCSIDevice *s = r->req.dev; 380 int ret; 381 382 trace_scsi_generic_write_data(req->tag); 383 if (r->len == 0) { 384 r->len = r->buflen; 385 scsi_req_data(&r->req, r->len); 386 return; 387 } 388 389 /* The request is used as the AIO opaque value, so add a ref. */ 390 scsi_req_ref(&r->req); 391 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete); 392 if (ret < 0) { 393 scsi_command_complete_noio(r, ret); 394 } 395 } 396 397 /* Return a pointer to the data buffer. */ 398 static uint8_t *scsi_get_buf(SCSIRequest *req) 399 { 400 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 401 402 return r->buf; 403 } 404 405 static void scsi_generic_command_dump(uint8_t *cmd, int len) 406 { 407 int i; 408 char *line_buffer, *p; 409 410 line_buffer = g_malloc(len * 5 + 1); 411 412 for (i = 0, p = line_buffer; i < len; i++) { 413 p += sprintf(p, " 0x%02x", cmd[i]); 414 } 415 trace_scsi_generic_send_command(line_buffer); 416 417 g_free(line_buffer); 418 } 419 420 /* Execute a scsi command. Returns the length of the data expected by the 421 command. This will be Positive for data transfers from the device 422 (eg. disk reads), negative for transfers to the device (eg. disk writes), 423 and zero if the command does not transfer any data. */ 424 425 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd) 426 { 427 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 428 SCSIDevice *s = r->req.dev; 429 int ret; 430 431 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) { 432 scsi_generic_command_dump(cmd, r->req.cmd.len); 433 } 434 435 if (r->req.cmd.xfer == 0) { 436 g_free(r->buf); 437 r->buflen = 0; 438 r->buf = NULL; 439 /* The request is used as the AIO opaque value, so add a ref. */ 440 scsi_req_ref(&r->req); 441 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE, 442 scsi_command_complete); 443 if (ret < 0) { 444 scsi_command_complete_noio(r, ret); 445 return 0; 446 } 447 return 0; 448 } 449 450 if (r->buflen != r->req.cmd.xfer) { 451 g_free(r->buf); 452 r->buf = g_malloc(r->req.cmd.xfer); 453 r->buflen = r->req.cmd.xfer; 454 } 455 456 memset(r->buf, 0, r->buflen); 457 r->len = r->req.cmd.xfer; 458 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 459 r->len = 0; 460 return -r->req.cmd.xfer; 461 } else { 462 return r->req.cmd.xfer; 463 } 464 } 465 466 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn) 467 { 468 int i; 469 470 if ((p[1] & 0xF) == 3) { 471 /* NAA designator type */ 472 if (p[3] != 8) { 473 return -EINVAL; 474 } 475 *p_wwn = ldq_be_p(p + 4); 476 return 0; 477 } 478 479 if ((p[1] & 0xF) == 8) { 480 /* SCSI name string designator type */ 481 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) { 482 return -EINVAL; 483 } 484 if (p[3] > 20 && p[24] != ',') { 485 return -EINVAL; 486 } 487 *p_wwn = 0; 488 for (i = 8; i < 24; i++) { 489 char c = qemu_toupper(p[i]); 490 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10); 491 *p_wwn = (*p_wwn << 4) | c; 492 } 493 return 0; 494 } 495 496 return -EINVAL; 497 } 498 499 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size, 500 uint8_t *buf, uint8_t buf_size) 501 { 502 sg_io_hdr_t io_header; 503 uint8_t sensebuf[8]; 504 int ret; 505 506 memset(&io_header, 0, sizeof(io_header)); 507 io_header.interface_id = 'S'; 508 io_header.dxfer_direction = SG_DXFER_FROM_DEV; 509 io_header.dxfer_len = buf_size; 510 io_header.dxferp = buf; 511 io_header.cmdp = cmd; 512 io_header.cmd_len = cmd_size; 513 io_header.mx_sb_len = sizeof(sensebuf); 514 io_header.sbp = sensebuf; 515 io_header.timeout = 6000; /* XXX */ 516 517 ret = blk_ioctl(blk, SG_IO, &io_header); 518 if (ret < 0 || io_header.driver_status || io_header.host_status) { 519 return -1; 520 } 521 return 0; 522 } 523 524 /* 525 * Executes an INQUIRY request with EVPD set to retrieve the 526 * available VPD pages of the device. If the device does 527 * not support the Block Limits page (page 0xb0), set 528 * the needs_vpd_bl_emulation flag for future use. 529 */ 530 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s) 531 { 532 uint8_t cmd[6]; 533 uint8_t buf[250]; 534 uint8_t page_len; 535 int ret, i; 536 537 memset(cmd, 0, sizeof(cmd)); 538 memset(buf, 0, sizeof(buf)); 539 cmd[0] = INQUIRY; 540 cmd[1] = 1; 541 cmd[2] = 0x00; 542 cmd[4] = sizeof(buf); 543 544 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 545 buf, sizeof(buf)); 546 if (ret < 0) { 547 /* 548 * Do not assume anything if we can't retrieve the 549 * INQUIRY response to assert the VPD Block Limits 550 * support. 551 */ 552 s->needs_vpd_bl_emulation = false; 553 return; 554 } 555 556 page_len = buf[3]; 557 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) { 558 if (buf[i] == 0xb0) { 559 s->needs_vpd_bl_emulation = false; 560 return; 561 } 562 } 563 s->needs_vpd_bl_emulation = true; 564 } 565 566 static void scsi_generic_read_device_identification(SCSIDevice *s) 567 { 568 uint8_t cmd[6]; 569 uint8_t buf[250]; 570 int ret; 571 int i, len; 572 573 memset(cmd, 0, sizeof(cmd)); 574 memset(buf, 0, sizeof(buf)); 575 cmd[0] = INQUIRY; 576 cmd[1] = 1; 577 cmd[2] = 0x83; 578 cmd[4] = sizeof(buf); 579 580 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 581 buf, sizeof(buf)); 582 if (ret < 0) { 583 return; 584 } 585 586 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4); 587 for (i = 0; i + 3 <= len; ) { 588 const uint8_t *p = &buf[i + 4]; 589 uint64_t wwn; 590 591 if (i + (p[3] + 4) > len) { 592 break; 593 } 594 595 if ((p[1] & 0x10) == 0) { 596 /* Associated with the logical unit */ 597 if (read_naa_id(p, &wwn) == 0) { 598 s->wwn = wwn; 599 } 600 } else if ((p[1] & 0x10) == 0x10) { 601 /* Associated with the target port */ 602 if (read_naa_id(p, &wwn) == 0) { 603 s->port_wwn = wwn; 604 } 605 } 606 607 i += p[3] + 4; 608 } 609 } 610 611 void scsi_generic_read_device_inquiry(SCSIDevice *s) 612 { 613 scsi_generic_read_device_identification(s); 614 if (s->type == TYPE_DISK) { 615 scsi_generic_set_vpd_bl_emulation(s); 616 } else { 617 s->needs_vpd_bl_emulation = false; 618 } 619 } 620 621 static int get_stream_blocksize(BlockBackend *blk) 622 { 623 uint8_t cmd[6]; 624 uint8_t buf[12]; 625 int ret; 626 627 memset(cmd, 0, sizeof(cmd)); 628 memset(buf, 0, sizeof(buf)); 629 cmd[0] = MODE_SENSE; 630 cmd[4] = sizeof(buf); 631 632 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf)); 633 if (ret < 0) { 634 return -1; 635 } 636 637 return (buf[9] << 16) | (buf[10] << 8) | buf[11]; 638 } 639 640 static void scsi_generic_reset(DeviceState *dev) 641 { 642 SCSIDevice *s = SCSI_DEVICE(dev); 643 644 s->scsi_version = s->default_scsi_version; 645 scsi_device_purge_requests(s, SENSE_CODE(RESET)); 646 } 647 648 static void scsi_generic_realize(SCSIDevice *s, Error **errp) 649 { 650 int rc; 651 int sg_version; 652 struct sg_scsi_id scsiid; 653 654 if (!s->conf.blk) { 655 error_setg(errp, "drive property not set"); 656 return; 657 } 658 659 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC) { 660 error_setg(errp, "Device doesn't support drive option werror"); 661 return; 662 } 663 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) { 664 error_setg(errp, "Device doesn't support drive option rerror"); 665 return; 666 } 667 668 /* check we are using a driver managing SG_IO (version 3 and after */ 669 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version); 670 if (rc < 0) { 671 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 672 if (rc != -EPERM) { 673 error_append_hint(errp, "Is this a SCSI device?\n"); 674 } 675 return; 676 } 677 if (sg_version < 30000) { 678 error_setg(errp, "scsi generic interface too old"); 679 return; 680 } 681 682 /* get LUN of the /dev/sg? */ 683 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) { 684 error_setg(errp, "SG_GET_SCSI_ID ioctl failed"); 685 return; 686 } 687 if (!blkconf_apply_backend_options(&s->conf, 688 blk_is_read_only(s->conf.blk), 689 true, errp)) { 690 return; 691 } 692 693 /* define device state */ 694 s->type = scsiid.scsi_type; 695 trace_scsi_generic_realize_type(s->type); 696 697 switch (s->type) { 698 case TYPE_TAPE: 699 s->blocksize = get_stream_blocksize(s->conf.blk); 700 if (s->blocksize == -1) { 701 s->blocksize = 0; 702 } 703 break; 704 705 /* Make a guess for block devices, we'll fix it when the guest sends. 706 * READ CAPACITY. If they don't, they likely would assume these sizes 707 * anyway. (TODO: they could also send MODE SENSE). 708 */ 709 case TYPE_ROM: 710 case TYPE_WORM: 711 s->blocksize = 2048; 712 break; 713 default: 714 s->blocksize = 512; 715 break; 716 } 717 718 trace_scsi_generic_realize_blocksize(s->blocksize); 719 720 /* Only used by scsi-block, but initialize it nevertheless to be clean. */ 721 s->default_scsi_version = -1; 722 scsi_generic_read_device_inquiry(s); 723 } 724 725 const SCSIReqOps scsi_generic_req_ops = { 726 .size = sizeof(SCSIGenericReq), 727 .free_req = scsi_free_request, 728 .send_command = scsi_send_command, 729 .read_data = scsi_read_data, 730 .write_data = scsi_write_data, 731 .get_buf = scsi_get_buf, 732 .load_request = scsi_generic_load_request, 733 .save_request = scsi_generic_save_request, 734 }; 735 736 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 737 uint8_t *buf, void *hba_private) 738 { 739 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private); 740 } 741 742 static Property scsi_generic_properties[] = { 743 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk), 744 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false), 745 DEFINE_PROP_END_OF_LIST(), 746 }; 747 748 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, 749 uint8_t *buf, void *hba_private) 750 { 751 return scsi_bus_parse_cdb(dev, cmd, buf, hba_private); 752 } 753 754 static void scsi_generic_class_initfn(ObjectClass *klass, void *data) 755 { 756 DeviceClass *dc = DEVICE_CLASS(klass); 757 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 758 759 sc->realize = scsi_generic_realize; 760 sc->alloc_req = scsi_new_request; 761 sc->parse_cdb = scsi_generic_parse_cdb; 762 dc->fw_name = "disk"; 763 dc->desc = "pass through generic scsi device (/dev/sg*)"; 764 dc->reset = scsi_generic_reset; 765 dc->props = scsi_generic_properties; 766 dc->vmsd = &vmstate_scsi_device; 767 } 768 769 static const TypeInfo scsi_generic_info = { 770 .name = "scsi-generic", 771 .parent = TYPE_SCSI_DEVICE, 772 .instance_size = sizeof(SCSIDevice), 773 .class_init = scsi_generic_class_initfn, 774 }; 775 776 static void scsi_generic_register_types(void) 777 { 778 type_register_static(&scsi_generic_info); 779 } 780 781 type_init(scsi_generic_register_types) 782 783 #endif /* __linux__ */ 784