1 /* 2 * Generic SCSI Device support 3 * 4 * Copyright (c) 2007 Bull S.A.S. 5 * Based on code by Paul Brook 6 * Based on code by Fabrice Bellard 7 * 8 * Written by Laurent Vivier <Laurent.Vivier@bull.net> 9 * 10 * This code is licensed under the LGPL. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qemu-common.h" 17 #include "qemu/error-report.h" 18 #include "hw/scsi/scsi.h" 19 #include "hw/scsi/emulation.h" 20 #include "sysemu/block-backend.h" 21 22 #ifdef __linux__ 23 24 //#define DEBUG_SCSI 25 26 #ifdef DEBUG_SCSI 27 #define DPRINTF(fmt, ...) \ 28 do { printf("scsi-generic: " fmt , ## __VA_ARGS__); } while (0) 29 #else 30 #define DPRINTF(fmt, ...) do {} while(0) 31 #endif 32 33 #define BADF(fmt, ...) \ 34 do { fprintf(stderr, "scsi-generic: " fmt , ## __VA_ARGS__); } while (0) 35 36 #include <scsi/sg.h> 37 #include "scsi/constants.h" 38 39 #ifndef MAX_UINT 40 #define MAX_UINT ((unsigned int)-1) 41 #endif 42 43 typedef struct SCSIGenericReq { 44 SCSIRequest req; 45 uint8_t *buf; 46 int buflen; 47 int len; 48 sg_io_hdr_t io_header; 49 } SCSIGenericReq; 50 51 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req) 52 { 53 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 54 55 qemu_put_sbe32s(f, &r->buflen); 56 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 57 assert(!r->req.sg); 58 qemu_put_buffer(f, r->buf, r->req.cmd.xfer); 59 } 60 } 61 62 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req) 63 { 64 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 65 66 qemu_get_sbe32s(f, &r->buflen); 67 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 68 assert(!r->req.sg); 69 qemu_get_buffer(f, r->buf, r->req.cmd.xfer); 70 } 71 } 72 73 static void scsi_free_request(SCSIRequest *req) 74 { 75 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 76 77 g_free(r->buf); 78 } 79 80 /* Helper function for command completion. */ 81 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret) 82 { 83 int status; 84 SCSISense sense; 85 86 assert(r->req.aiocb == NULL); 87 88 if (r->req.io_canceled) { 89 scsi_req_cancel_complete(&r->req); 90 goto done; 91 } 92 status = sg_io_sense_from_errno(-ret, &r->io_header, &sense); 93 if (status == CHECK_CONDITION) { 94 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { 95 r->req.sense_len = r->io_header.sb_len_wr; 96 } else { 97 scsi_req_build_sense(&r->req, sense); 98 } 99 } 100 101 DPRINTF("Command complete 0x%p tag=0x%x status=%d\n", 102 r, r->req.tag, status); 103 104 scsi_req_complete(&r->req, status); 105 done: 106 scsi_req_unref(&r->req); 107 } 108 109 static void scsi_command_complete(void *opaque, int ret) 110 { 111 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 112 SCSIDevice *s = r->req.dev; 113 114 assert(r->req.aiocb != NULL); 115 r->req.aiocb = NULL; 116 117 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 118 scsi_command_complete_noio(r, ret); 119 aio_context_release(blk_get_aio_context(s->conf.blk)); 120 } 121 122 static int execute_command(BlockBackend *blk, 123 SCSIGenericReq *r, int direction, 124 BlockCompletionFunc *complete) 125 { 126 r->io_header.interface_id = 'S'; 127 r->io_header.dxfer_direction = direction; 128 r->io_header.dxferp = r->buf; 129 r->io_header.dxfer_len = r->buflen; 130 r->io_header.cmdp = r->req.cmd.buf; 131 r->io_header.cmd_len = r->req.cmd.len; 132 r->io_header.mx_sb_len = sizeof(r->req.sense); 133 r->io_header.sbp = r->req.sense; 134 r->io_header.timeout = MAX_UINT; 135 r->io_header.usr_ptr = r; 136 r->io_header.flags |= SG_FLAG_DIRECT_IO; 137 138 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r); 139 if (r->req.aiocb == NULL) { 140 return -EIO; 141 } 142 143 return 0; 144 } 145 146 static void scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s) 147 { 148 uint8_t page, page_idx; 149 150 /* 151 * EVPD set to zero returns the standard INQUIRY data. 152 * 153 * Check if scsi_version is unset (-1) to avoid re-defining it 154 * each time an INQUIRY with standard data is received. 155 * scsi_version is initialized with -1 in scsi_generic_reset 156 * and scsi_disk_reset, making sure that we'll set the 157 * scsi_version after a reset. If the version field of the 158 * INQUIRY response somehow changes after a guest reboot, 159 * we'll be able to keep track of it. 160 * 161 * On SCSI-2 and older, first 3 bits of byte 2 is the 162 * ANSI-approved version, while on later versions the 163 * whole byte 2 contains the version. Check if we're dealing 164 * with a newer version and, in that case, assign the 165 * whole byte. 166 */ 167 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) { 168 s->scsi_version = r->buf[2] & 0x07; 169 if (s->scsi_version > 2) { 170 s->scsi_version = r->buf[2]; 171 } 172 } 173 174 if (s->type == TYPE_DISK && (r->req.cmd.buf[1] & 0x01)) { 175 page = r->req.cmd.buf[2]; 176 if (page == 0xb0) { 177 uint32_t max_transfer = 178 blk_get_max_transfer(s->conf.blk) / s->blocksize; 179 180 assert(max_transfer); 181 stl_be_p(&r->buf[8], max_transfer); 182 /* Also take care of the opt xfer len. */ 183 stl_be_p(&r->buf[12], 184 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); 185 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { 186 /* 187 * Now we're capable of supplying the VPD Block Limits 188 * response if the hardware can't. Add it in the INQUIRY 189 * Supported VPD pages response in case we are using the 190 * emulation for this device. 191 * 192 * This way, the guest kernel will be aware of the support 193 * and will use it to proper setup the SCSI device. 194 * 195 * VPD page numbers must be sorted, so insert 0xb0 at the 196 * right place with an in-place insert. When the while loop 197 * begins the device response is at r[0] to r[page_idx - 1]. 198 */ 199 page_idx = lduw_be_p(r->buf + 2) + 4; 200 page_idx = MIN(page_idx, r->buflen); 201 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) { 202 if (page_idx < r->buflen) { 203 r->buf[page_idx] = r->buf[page_idx - 1]; 204 } 205 page_idx--; 206 } 207 if (page_idx < r->buflen) { 208 r->buf[page_idx] = 0xb0; 209 } 210 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1); 211 } 212 } 213 } 214 215 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s) 216 { 217 int len; 218 uint8_t buf[64]; 219 220 SCSIBlockLimits bl = { 221 .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize 222 }; 223 224 memset(r->buf, 0, r->buflen); 225 stb_p(buf, s->type); 226 stb_p(buf + 1, 0xb0); 227 len = scsi_emulate_block_limits(buf + 4, &bl); 228 assert(len <= sizeof(buf) - 4); 229 stw_be_p(buf + 2, len); 230 231 memcpy(r->buf, buf, MIN(r->buflen, len + 4)); 232 233 r->io_header.sb_len_wr = 0; 234 235 /* 236 * We have valid contents in the reply buffer but the 237 * io_header can report a sense error coming from 238 * the hardware in scsi_command_complete_noio. Clean 239 * up the io_header to avoid reporting it. 240 */ 241 r->io_header.driver_status = 0; 242 r->io_header.status = 0; 243 244 return r->buflen; 245 } 246 247 static void scsi_read_complete(void * opaque, int ret) 248 { 249 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 250 SCSIDevice *s = r->req.dev; 251 int len; 252 253 assert(r->req.aiocb != NULL); 254 r->req.aiocb = NULL; 255 256 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 257 258 if (ret || r->req.io_canceled) { 259 scsi_command_complete_noio(r, ret); 260 goto done; 261 } 262 263 len = r->io_header.dxfer_len - r->io_header.resid; 264 DPRINTF("Data ready tag=0x%x len=%d\n", r->req.tag, len); 265 266 r->len = -1; 267 268 /* 269 * Check if this is a VPD Block Limits request that 270 * resulted in sense error but would need emulation. 271 * In this case, emulate a valid VPD response. 272 */ 273 if (s->needs_vpd_bl_emulation && ret == 0 && 274 (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) && 275 r->req.cmd.buf[0] == INQUIRY && 276 (r->req.cmd.buf[1] & 0x01) && 277 r->req.cmd.buf[2] == 0xb0) { 278 SCSISense sense = 279 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr); 280 if (sense.key == ILLEGAL_REQUEST) { 281 len = scsi_generic_emulate_block_limits(r, s); 282 /* 283 * No need to let scsi_read_complete go on and handle an 284 * INQUIRY VPD BL request we created manually. 285 */ 286 goto req_complete; 287 } 288 } 289 290 if (len == 0) { 291 scsi_command_complete_noio(r, 0); 292 goto done; 293 } 294 295 /* Snoop READ CAPACITY output to set the blocksize. */ 296 if (r->req.cmd.buf[0] == READ_CAPACITY_10 && 297 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) { 298 s->blocksize = ldl_be_p(&r->buf[4]); 299 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL; 300 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 && 301 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 302 s->blocksize = ldl_be_p(&r->buf[8]); 303 s->max_lba = ldq_be_p(&r->buf[0]); 304 } 305 blk_set_guest_block_size(s->conf.blk, s->blocksize); 306 307 /* Patch MODE SENSE device specific parameters if the BDS is opened 308 * readonly. 309 */ 310 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE) && 311 blk_is_read_only(s->conf.blk) && 312 (r->req.cmd.buf[0] == MODE_SENSE || 313 r->req.cmd.buf[0] == MODE_SENSE_10) && 314 (r->req.cmd.buf[1] & 0x8) == 0) { 315 if (r->req.cmd.buf[0] == MODE_SENSE) { 316 r->buf[2] |= 0x80; 317 } else { 318 r->buf[3] |= 0x80; 319 } 320 } 321 if (r->req.cmd.buf[0] == INQUIRY) { 322 scsi_handle_inquiry_reply(r, s); 323 } 324 325 req_complete: 326 scsi_req_data(&r->req, len); 327 scsi_req_unref(&r->req); 328 329 done: 330 aio_context_release(blk_get_aio_context(s->conf.blk)); 331 } 332 333 /* Read more data from scsi device into buffer. */ 334 static void scsi_read_data(SCSIRequest *req) 335 { 336 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 337 SCSIDevice *s = r->req.dev; 338 int ret; 339 340 DPRINTF("scsi_read_data tag=0x%x\n", req->tag); 341 342 /* The request is used as the AIO opaque value, so add a ref. */ 343 scsi_req_ref(&r->req); 344 if (r->len == -1) { 345 scsi_command_complete_noio(r, 0); 346 return; 347 } 348 349 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV, 350 scsi_read_complete); 351 if (ret < 0) { 352 scsi_command_complete_noio(r, ret); 353 } 354 } 355 356 static void scsi_write_complete(void * opaque, int ret) 357 { 358 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 359 SCSIDevice *s = r->req.dev; 360 361 DPRINTF("scsi_write_complete() ret = %d\n", ret); 362 363 assert(r->req.aiocb != NULL); 364 r->req.aiocb = NULL; 365 366 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 367 368 if (ret || r->req.io_canceled) { 369 scsi_command_complete_noio(r, ret); 370 goto done; 371 } 372 373 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 && 374 s->type == TYPE_TAPE) { 375 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11]; 376 DPRINTF("block size %d\n", s->blocksize); 377 } 378 379 scsi_command_complete_noio(r, ret); 380 381 done: 382 aio_context_release(blk_get_aio_context(s->conf.blk)); 383 } 384 385 /* Write data to a scsi device. Returns nonzero on failure. 386 The transfer may complete asynchronously. */ 387 static void scsi_write_data(SCSIRequest *req) 388 { 389 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 390 SCSIDevice *s = r->req.dev; 391 int ret; 392 393 DPRINTF("scsi_write_data tag=0x%x\n", req->tag); 394 if (r->len == 0) { 395 r->len = r->buflen; 396 scsi_req_data(&r->req, r->len); 397 return; 398 } 399 400 /* The request is used as the AIO opaque value, so add a ref. */ 401 scsi_req_ref(&r->req); 402 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete); 403 if (ret < 0) { 404 scsi_command_complete_noio(r, ret); 405 } 406 } 407 408 /* Return a pointer to the data buffer. */ 409 static uint8_t *scsi_get_buf(SCSIRequest *req) 410 { 411 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 412 413 return r->buf; 414 } 415 416 /* Execute a scsi command. Returns the length of the data expected by the 417 command. This will be Positive for data transfers from the device 418 (eg. disk reads), negative for transfers to the device (eg. disk writes), 419 and zero if the command does not transfer any data. */ 420 421 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd) 422 { 423 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 424 SCSIDevice *s = r->req.dev; 425 int ret; 426 427 #ifdef DEBUG_SCSI 428 DPRINTF("Command: data=0x%02x", cmd[0]); 429 { 430 int i; 431 for (i = 1; i < r->req.cmd.len; i++) { 432 printf(" 0x%02x", cmd[i]); 433 } 434 printf("\n"); 435 } 436 #endif 437 438 if (r->req.cmd.xfer == 0) { 439 g_free(r->buf); 440 r->buflen = 0; 441 r->buf = NULL; 442 /* The request is used as the AIO opaque value, so add a ref. */ 443 scsi_req_ref(&r->req); 444 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE, 445 scsi_command_complete); 446 if (ret < 0) { 447 scsi_command_complete_noio(r, ret); 448 return 0; 449 } 450 return 0; 451 } 452 453 if (r->buflen != r->req.cmd.xfer) { 454 g_free(r->buf); 455 r->buf = g_malloc(r->req.cmd.xfer); 456 r->buflen = r->req.cmd.xfer; 457 } 458 459 memset(r->buf, 0, r->buflen); 460 r->len = r->req.cmd.xfer; 461 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 462 r->len = 0; 463 return -r->req.cmd.xfer; 464 } else { 465 return r->req.cmd.xfer; 466 } 467 } 468 469 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn) 470 { 471 int i; 472 473 if ((p[1] & 0xF) == 3) { 474 /* NAA designator type */ 475 if (p[3] != 8) { 476 return -EINVAL; 477 } 478 *p_wwn = ldq_be_p(p + 4); 479 return 0; 480 } 481 482 if ((p[1] & 0xF) == 8) { 483 /* SCSI name string designator type */ 484 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) { 485 return -EINVAL; 486 } 487 if (p[3] > 20 && p[24] != ',') { 488 return -EINVAL; 489 } 490 *p_wwn = 0; 491 for (i = 8; i < 24; i++) { 492 char c = qemu_toupper(p[i]); 493 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10); 494 *p_wwn = (*p_wwn << 4) | c; 495 } 496 return 0; 497 } 498 499 return -EINVAL; 500 } 501 502 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size, 503 uint8_t *buf, uint8_t buf_size) 504 { 505 sg_io_hdr_t io_header; 506 uint8_t sensebuf[8]; 507 int ret; 508 509 memset(&io_header, 0, sizeof(io_header)); 510 io_header.interface_id = 'S'; 511 io_header.dxfer_direction = SG_DXFER_FROM_DEV; 512 io_header.dxfer_len = buf_size; 513 io_header.dxferp = buf; 514 io_header.cmdp = cmd; 515 io_header.cmd_len = cmd_size; 516 io_header.mx_sb_len = sizeof(sensebuf); 517 io_header.sbp = sensebuf; 518 io_header.timeout = 6000; /* XXX */ 519 520 ret = blk_ioctl(blk, SG_IO, &io_header); 521 if (ret < 0 || io_header.driver_status || io_header.host_status) { 522 return -1; 523 } 524 return 0; 525 } 526 527 /* 528 * Executes an INQUIRY request with EVPD set to retrieve the 529 * available VPD pages of the device. If the device does 530 * not support the Block Limits page (page 0xb0), set 531 * the needs_vpd_bl_emulation flag for future use. 532 */ 533 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s) 534 { 535 uint8_t cmd[6]; 536 uint8_t buf[250]; 537 uint8_t page_len; 538 int ret, i; 539 540 memset(cmd, 0, sizeof(cmd)); 541 memset(buf, 0, sizeof(buf)); 542 cmd[0] = INQUIRY; 543 cmd[1] = 1; 544 cmd[2] = 0x00; 545 cmd[4] = sizeof(buf); 546 547 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 548 buf, sizeof(buf)); 549 if (ret < 0) { 550 /* 551 * Do not assume anything if we can't retrieve the 552 * INQUIRY response to assert the VPD Block Limits 553 * support. 554 */ 555 s->needs_vpd_bl_emulation = false; 556 return; 557 } 558 559 page_len = buf[3]; 560 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) { 561 if (buf[i] == 0xb0) { 562 s->needs_vpd_bl_emulation = false; 563 return; 564 } 565 } 566 s->needs_vpd_bl_emulation = true; 567 } 568 569 static void scsi_generic_read_device_identification(SCSIDevice *s) 570 { 571 uint8_t cmd[6]; 572 uint8_t buf[250]; 573 int ret; 574 int i, len; 575 576 memset(cmd, 0, sizeof(cmd)); 577 memset(buf, 0, sizeof(buf)); 578 cmd[0] = INQUIRY; 579 cmd[1] = 1; 580 cmd[2] = 0x83; 581 cmd[4] = sizeof(buf); 582 583 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 584 buf, sizeof(buf)); 585 if (ret < 0) { 586 return; 587 } 588 589 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4); 590 for (i = 0; i + 3 <= len; ) { 591 const uint8_t *p = &buf[i + 4]; 592 uint64_t wwn; 593 594 if (i + (p[3] + 4) > len) { 595 break; 596 } 597 598 if ((p[1] & 0x10) == 0) { 599 /* Associated with the logical unit */ 600 if (read_naa_id(p, &wwn) == 0) { 601 s->wwn = wwn; 602 } 603 } else if ((p[1] & 0x10) == 0x10) { 604 /* Associated with the target port */ 605 if (read_naa_id(p, &wwn) == 0) { 606 s->port_wwn = wwn; 607 } 608 } 609 610 i += p[3] + 4; 611 } 612 } 613 614 void scsi_generic_read_device_inquiry(SCSIDevice *s) 615 { 616 scsi_generic_read_device_identification(s); 617 if (s->type == TYPE_DISK) { 618 scsi_generic_set_vpd_bl_emulation(s); 619 } else { 620 s->needs_vpd_bl_emulation = false; 621 } 622 } 623 624 static int get_stream_blocksize(BlockBackend *blk) 625 { 626 uint8_t cmd[6]; 627 uint8_t buf[12]; 628 int ret; 629 630 memset(cmd, 0, sizeof(cmd)); 631 memset(buf, 0, sizeof(buf)); 632 cmd[0] = MODE_SENSE; 633 cmd[4] = sizeof(buf); 634 635 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf)); 636 if (ret < 0) { 637 return -1; 638 } 639 640 return (buf[9] << 16) | (buf[10] << 8) | buf[11]; 641 } 642 643 static void scsi_generic_reset(DeviceState *dev) 644 { 645 SCSIDevice *s = SCSI_DEVICE(dev); 646 647 s->scsi_version = s->default_scsi_version; 648 scsi_device_purge_requests(s, SENSE_CODE(RESET)); 649 } 650 651 static void scsi_generic_realize(SCSIDevice *s, Error **errp) 652 { 653 int rc; 654 int sg_version; 655 struct sg_scsi_id scsiid; 656 657 if (!s->conf.blk) { 658 error_setg(errp, "drive property not set"); 659 return; 660 } 661 662 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC) { 663 error_setg(errp, "Device doesn't support drive option werror"); 664 return; 665 } 666 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) { 667 error_setg(errp, "Device doesn't support drive option rerror"); 668 return; 669 } 670 671 /* check we are using a driver managing SG_IO (version 3 and after */ 672 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version); 673 if (rc < 0) { 674 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 675 if (rc != -EPERM) { 676 error_append_hint(errp, "Is this a SCSI device?\n"); 677 } 678 return; 679 } 680 if (sg_version < 30000) { 681 error_setg(errp, "scsi generic interface too old"); 682 return; 683 } 684 685 /* get LUN of the /dev/sg? */ 686 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) { 687 error_setg(errp, "SG_GET_SCSI_ID ioctl failed"); 688 return; 689 } 690 if (!blkconf_apply_backend_options(&s->conf, 691 blk_is_read_only(s->conf.blk), 692 true, errp)) { 693 return; 694 } 695 696 /* define device state */ 697 s->type = scsiid.scsi_type; 698 DPRINTF("device type %d\n", s->type); 699 700 switch (s->type) { 701 case TYPE_TAPE: 702 s->blocksize = get_stream_blocksize(s->conf.blk); 703 if (s->blocksize == -1) { 704 s->blocksize = 0; 705 } 706 break; 707 708 /* Make a guess for block devices, we'll fix it when the guest sends. 709 * READ CAPACITY. If they don't, they likely would assume these sizes 710 * anyway. (TODO: they could also send MODE SENSE). 711 */ 712 case TYPE_ROM: 713 case TYPE_WORM: 714 s->blocksize = 2048; 715 break; 716 default: 717 s->blocksize = 512; 718 break; 719 } 720 721 DPRINTF("block size %d\n", s->blocksize); 722 723 /* Only used by scsi-block, but initialize it nevertheless to be clean. */ 724 s->default_scsi_version = -1; 725 scsi_generic_read_device_inquiry(s); 726 } 727 728 const SCSIReqOps scsi_generic_req_ops = { 729 .size = sizeof(SCSIGenericReq), 730 .free_req = scsi_free_request, 731 .send_command = scsi_send_command, 732 .read_data = scsi_read_data, 733 .write_data = scsi_write_data, 734 .get_buf = scsi_get_buf, 735 .load_request = scsi_generic_load_request, 736 .save_request = scsi_generic_save_request, 737 }; 738 739 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 740 uint8_t *buf, void *hba_private) 741 { 742 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private); 743 } 744 745 static Property scsi_generic_properties[] = { 746 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk), 747 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false), 748 DEFINE_PROP_END_OF_LIST(), 749 }; 750 751 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, 752 uint8_t *buf, void *hba_private) 753 { 754 return scsi_bus_parse_cdb(dev, cmd, buf, hba_private); 755 } 756 757 static void scsi_generic_class_initfn(ObjectClass *klass, void *data) 758 { 759 DeviceClass *dc = DEVICE_CLASS(klass); 760 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 761 762 sc->realize = scsi_generic_realize; 763 sc->alloc_req = scsi_new_request; 764 sc->parse_cdb = scsi_generic_parse_cdb; 765 dc->fw_name = "disk"; 766 dc->desc = "pass through generic scsi device (/dev/sg*)"; 767 dc->reset = scsi_generic_reset; 768 dc->props = scsi_generic_properties; 769 dc->vmsd = &vmstate_scsi_device; 770 } 771 772 static const TypeInfo scsi_generic_info = { 773 .name = "scsi-generic", 774 .parent = TYPE_SCSI_DEVICE, 775 .instance_size = sizeof(SCSIDevice), 776 .class_init = scsi_generic_class_initfn, 777 }; 778 779 static void scsi_generic_register_types(void) 780 { 781 type_register_static(&scsi_generic_info); 782 } 783 784 type_init(scsi_generic_register_types) 785 786 #endif /* __linux__ */ 787