1 /* 2 * Generic SCSI Device support 3 * 4 * Copyright (c) 2007 Bull S.A.S. 5 * Based on code by Paul Brook 6 * Based on code by Fabrice Bellard 7 * 8 * Written by Laurent Vivier <Laurent.Vivier@bull.net> 9 * 10 * This code is licensed under the LGPL. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qemu/ctype.h" 17 #include "qemu/error-report.h" 18 #include "qemu/module.h" 19 #include "hw/scsi/scsi.h" 20 #include "migration/qemu-file-types.h" 21 #include "hw/qdev-properties.h" 22 #include "hw/qdev-properties-system.h" 23 #include "hw/scsi/emulation.h" 24 #include "sysemu/block-backend.h" 25 #include "trace.h" 26 27 #ifdef __linux__ 28 29 #include <scsi/sg.h> 30 #include "scsi/constants.h" 31 32 #ifndef MAX_UINT 33 #define MAX_UINT ((unsigned int)-1) 34 #endif 35 36 typedef struct SCSIGenericReq { 37 SCSIRequest req; 38 uint8_t *buf; 39 int buflen; 40 int len; 41 sg_io_hdr_t io_header; 42 } SCSIGenericReq; 43 44 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req) 45 { 46 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 47 48 qemu_put_sbe32s(f, &r->buflen); 49 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 50 assert(!r->req.sg); 51 qemu_put_buffer(f, r->buf, r->req.cmd.xfer); 52 } 53 } 54 55 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req) 56 { 57 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 58 59 qemu_get_sbe32s(f, &r->buflen); 60 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 61 assert(!r->req.sg); 62 qemu_get_buffer(f, r->buf, r->req.cmd.xfer); 63 } 64 } 65 66 static void scsi_free_request(SCSIRequest *req) 67 { 68 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 69 70 g_free(r->buf); 71 } 72 73 /* Helper function for command completion. */ 74 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret) 75 { 76 int status; 77 SCSISense sense; 78 79 assert(r->req.aiocb == NULL); 80 81 if (r->req.io_canceled) { 82 scsi_req_cancel_complete(&r->req); 83 goto done; 84 } 85 status = sg_io_sense_from_errno(-ret, &r->io_header, &sense); 86 if (status == CHECK_CONDITION) { 87 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { 88 r->req.sense_len = r->io_header.sb_len_wr; 89 } else { 90 scsi_req_build_sense(&r->req, sense); 91 } 92 } 93 94 trace_scsi_generic_command_complete_noio(r, r->req.tag, status); 95 96 scsi_req_complete(&r->req, status); 97 done: 98 scsi_req_unref(&r->req); 99 } 100 101 static void scsi_command_complete(void *opaque, int ret) 102 { 103 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 104 SCSIDevice *s = r->req.dev; 105 106 assert(r->req.aiocb != NULL); 107 r->req.aiocb = NULL; 108 109 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 110 scsi_command_complete_noio(r, ret); 111 aio_context_release(blk_get_aio_context(s->conf.blk)); 112 } 113 114 static int execute_command(BlockBackend *blk, 115 SCSIGenericReq *r, int direction, 116 BlockCompletionFunc *complete) 117 { 118 r->io_header.interface_id = 'S'; 119 r->io_header.dxfer_direction = direction; 120 r->io_header.dxferp = r->buf; 121 r->io_header.dxfer_len = r->buflen; 122 r->io_header.cmdp = r->req.cmd.buf; 123 r->io_header.cmd_len = r->req.cmd.len; 124 r->io_header.mx_sb_len = sizeof(r->req.sense); 125 r->io_header.sbp = r->req.sense; 126 r->io_header.timeout = MAX_UINT; 127 r->io_header.usr_ptr = r; 128 r->io_header.flags |= SG_FLAG_DIRECT_IO; 129 130 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r); 131 if (r->req.aiocb == NULL) { 132 return -EIO; 133 } 134 135 return 0; 136 } 137 138 static void scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s) 139 { 140 uint8_t page, page_idx; 141 142 /* 143 * EVPD set to zero returns the standard INQUIRY data. 144 * 145 * Check if scsi_version is unset (-1) to avoid re-defining it 146 * each time an INQUIRY with standard data is received. 147 * scsi_version is initialized with -1 in scsi_generic_reset 148 * and scsi_disk_reset, making sure that we'll set the 149 * scsi_version after a reset. If the version field of the 150 * INQUIRY response somehow changes after a guest reboot, 151 * we'll be able to keep track of it. 152 * 153 * On SCSI-2 and older, first 3 bits of byte 2 is the 154 * ANSI-approved version, while on later versions the 155 * whole byte 2 contains the version. Check if we're dealing 156 * with a newer version and, in that case, assign the 157 * whole byte. 158 */ 159 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) { 160 s->scsi_version = r->buf[2] & 0x07; 161 if (s->scsi_version > 2) { 162 s->scsi_version = r->buf[2]; 163 } 164 } 165 166 if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) && 167 (r->req.cmd.buf[1] & 0x01)) { 168 page = r->req.cmd.buf[2]; 169 if (page == 0xb0) { 170 uint32_t max_transfer = 171 blk_get_max_transfer(s->conf.blk) / s->blocksize; 172 173 assert(max_transfer); 174 stl_be_p(&r->buf[8], max_transfer); 175 /* Also take care of the opt xfer len. */ 176 stl_be_p(&r->buf[12], 177 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); 178 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { 179 /* 180 * Now we're capable of supplying the VPD Block Limits 181 * response if the hardware can't. Add it in the INQUIRY 182 * Supported VPD pages response in case we are using the 183 * emulation for this device. 184 * 185 * This way, the guest kernel will be aware of the support 186 * and will use it to proper setup the SCSI device. 187 * 188 * VPD page numbers must be sorted, so insert 0xb0 at the 189 * right place with an in-place insert. When the while loop 190 * begins the device response is at r[0] to r[page_idx - 1]. 191 */ 192 page_idx = lduw_be_p(r->buf + 2) + 4; 193 page_idx = MIN(page_idx, r->buflen); 194 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) { 195 if (page_idx < r->buflen) { 196 r->buf[page_idx] = r->buf[page_idx - 1]; 197 } 198 page_idx--; 199 } 200 if (page_idx < r->buflen) { 201 r->buf[page_idx] = 0xb0; 202 } 203 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1); 204 } 205 } 206 } 207 208 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s) 209 { 210 int len; 211 uint8_t buf[64]; 212 213 SCSIBlockLimits bl = { 214 .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize 215 }; 216 217 memset(r->buf, 0, r->buflen); 218 stb_p(buf, s->type); 219 stb_p(buf + 1, 0xb0); 220 len = scsi_emulate_block_limits(buf + 4, &bl); 221 assert(len <= sizeof(buf) - 4); 222 stw_be_p(buf + 2, len); 223 224 memcpy(r->buf, buf, MIN(r->buflen, len + 4)); 225 226 r->io_header.sb_len_wr = 0; 227 228 /* 229 * We have valid contents in the reply buffer but the 230 * io_header can report a sense error coming from 231 * the hardware in scsi_command_complete_noio. Clean 232 * up the io_header to avoid reporting it. 233 */ 234 r->io_header.driver_status = 0; 235 r->io_header.status = 0; 236 237 return r->buflen; 238 } 239 240 static void scsi_read_complete(void * opaque, int ret) 241 { 242 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 243 SCSIDevice *s = r->req.dev; 244 int len; 245 246 assert(r->req.aiocb != NULL); 247 r->req.aiocb = NULL; 248 249 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 250 251 if (ret || r->req.io_canceled) { 252 scsi_command_complete_noio(r, ret); 253 goto done; 254 } 255 256 len = r->io_header.dxfer_len - r->io_header.resid; 257 trace_scsi_generic_read_complete(r->req.tag, len); 258 259 r->len = -1; 260 261 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { 262 SCSISense sense = 263 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr); 264 265 /* 266 * Check if this is a VPD Block Limits request that 267 * resulted in sense error but would need emulation. 268 * In this case, emulate a valid VPD response. 269 */ 270 if (sense.key == ILLEGAL_REQUEST && 271 s->needs_vpd_bl_emulation && 272 r->req.cmd.buf[0] == INQUIRY && 273 (r->req.cmd.buf[1] & 0x01) && 274 r->req.cmd.buf[2] == 0xb0) { 275 len = scsi_generic_emulate_block_limits(r, s); 276 /* 277 * It's okay to jup to req_complete: no need to 278 * let scsi_handle_inquiry_reply handle an 279 * INQUIRY VPD BL request we created manually. 280 */ 281 } 282 if (sense.key) { 283 goto req_complete; 284 } 285 } 286 287 if (len == 0) { 288 scsi_command_complete_noio(r, 0); 289 goto done; 290 } 291 292 /* Snoop READ CAPACITY output to set the blocksize. */ 293 if (r->req.cmd.buf[0] == READ_CAPACITY_10 && 294 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) { 295 s->blocksize = ldl_be_p(&r->buf[4]); 296 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL; 297 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 && 298 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 299 s->blocksize = ldl_be_p(&r->buf[8]); 300 s->max_lba = ldq_be_p(&r->buf[0]); 301 } 302 blk_set_guest_block_size(s->conf.blk, s->blocksize); 303 304 /* 305 * Patch MODE SENSE device specific parameters if the BDS is opened 306 * readonly. 307 */ 308 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) && 309 blk_is_read_only(s->conf.blk) && 310 (r->req.cmd.buf[0] == MODE_SENSE || 311 r->req.cmd.buf[0] == MODE_SENSE_10) && 312 (r->req.cmd.buf[1] & 0x8) == 0) { 313 if (r->req.cmd.buf[0] == MODE_SENSE) { 314 r->buf[2] |= 0x80; 315 } else { 316 r->buf[3] |= 0x80; 317 } 318 } 319 if (r->req.cmd.buf[0] == INQUIRY) { 320 scsi_handle_inquiry_reply(r, s); 321 } 322 323 req_complete: 324 scsi_req_data(&r->req, len); 325 scsi_req_unref(&r->req); 326 327 done: 328 aio_context_release(blk_get_aio_context(s->conf.blk)); 329 } 330 331 /* Read more data from scsi device into buffer. */ 332 static void scsi_read_data(SCSIRequest *req) 333 { 334 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 335 SCSIDevice *s = r->req.dev; 336 int ret; 337 338 trace_scsi_generic_read_data(req->tag); 339 340 /* The request is used as the AIO opaque value, so add a ref. */ 341 scsi_req_ref(&r->req); 342 if (r->len == -1) { 343 scsi_command_complete_noio(r, 0); 344 return; 345 } 346 347 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV, 348 scsi_read_complete); 349 if (ret < 0) { 350 scsi_command_complete_noio(r, ret); 351 } 352 } 353 354 static void scsi_write_complete(void * opaque, int ret) 355 { 356 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 357 SCSIDevice *s = r->req.dev; 358 359 trace_scsi_generic_write_complete(ret); 360 361 assert(r->req.aiocb != NULL); 362 r->req.aiocb = NULL; 363 364 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 365 366 if (ret || r->req.io_canceled) { 367 scsi_command_complete_noio(r, ret); 368 goto done; 369 } 370 371 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 && 372 s->type == TYPE_TAPE) { 373 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11]; 374 trace_scsi_generic_write_complete_blocksize(s->blocksize); 375 } 376 377 scsi_command_complete_noio(r, ret); 378 379 done: 380 aio_context_release(blk_get_aio_context(s->conf.blk)); 381 } 382 383 /* Write data to a scsi device. Returns nonzero on failure. 384 The transfer may complete asynchronously. */ 385 static void scsi_write_data(SCSIRequest *req) 386 { 387 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 388 SCSIDevice *s = r->req.dev; 389 int ret; 390 391 trace_scsi_generic_write_data(req->tag); 392 if (r->len == 0) { 393 r->len = r->buflen; 394 scsi_req_data(&r->req, r->len); 395 return; 396 } 397 398 /* The request is used as the AIO opaque value, so add a ref. */ 399 scsi_req_ref(&r->req); 400 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete); 401 if (ret < 0) { 402 scsi_command_complete_noio(r, ret); 403 } 404 } 405 406 /* Return a pointer to the data buffer. */ 407 static uint8_t *scsi_get_buf(SCSIRequest *req) 408 { 409 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 410 411 return r->buf; 412 } 413 414 static void scsi_generic_command_dump(uint8_t *cmd, int len) 415 { 416 int i; 417 char *line_buffer, *p; 418 419 line_buffer = g_malloc(len * 5 + 1); 420 421 for (i = 0, p = line_buffer; i < len; i++) { 422 p += sprintf(p, " 0x%02x", cmd[i]); 423 } 424 trace_scsi_generic_send_command(line_buffer); 425 426 g_free(line_buffer); 427 } 428 429 /* Execute a scsi command. Returns the length of the data expected by the 430 command. This will be Positive for data transfers from the device 431 (eg. disk reads), negative for transfers to the device (eg. disk writes), 432 and zero if the command does not transfer any data. */ 433 434 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd) 435 { 436 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 437 SCSIDevice *s = r->req.dev; 438 int ret; 439 440 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) { 441 scsi_generic_command_dump(cmd, r->req.cmd.len); 442 } 443 444 if (r->req.cmd.xfer == 0) { 445 g_free(r->buf); 446 r->buflen = 0; 447 r->buf = NULL; 448 /* The request is used as the AIO opaque value, so add a ref. */ 449 scsi_req_ref(&r->req); 450 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE, 451 scsi_command_complete); 452 if (ret < 0) { 453 scsi_command_complete_noio(r, ret); 454 return 0; 455 } 456 return 0; 457 } 458 459 if (r->buflen != r->req.cmd.xfer) { 460 g_free(r->buf); 461 r->buf = g_malloc(r->req.cmd.xfer); 462 r->buflen = r->req.cmd.xfer; 463 } 464 465 memset(r->buf, 0, r->buflen); 466 r->len = r->req.cmd.xfer; 467 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 468 r->len = 0; 469 return -r->req.cmd.xfer; 470 } else { 471 return r->req.cmd.xfer; 472 } 473 } 474 475 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn) 476 { 477 int i; 478 479 if ((p[1] & 0xF) == 3) { 480 /* NAA designator type */ 481 if (p[3] != 8) { 482 return -EINVAL; 483 } 484 *p_wwn = ldq_be_p(p + 4); 485 return 0; 486 } 487 488 if ((p[1] & 0xF) == 8) { 489 /* SCSI name string designator type */ 490 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) { 491 return -EINVAL; 492 } 493 if (p[3] > 20 && p[24] != ',') { 494 return -EINVAL; 495 } 496 *p_wwn = 0; 497 for (i = 8; i < 24; i++) { 498 char c = qemu_toupper(p[i]); 499 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10); 500 *p_wwn = (*p_wwn << 4) | c; 501 } 502 return 0; 503 } 504 505 return -EINVAL; 506 } 507 508 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size, 509 uint8_t *buf, uint8_t buf_size) 510 { 511 sg_io_hdr_t io_header; 512 uint8_t sensebuf[8]; 513 int ret; 514 515 memset(&io_header, 0, sizeof(io_header)); 516 io_header.interface_id = 'S'; 517 io_header.dxfer_direction = SG_DXFER_FROM_DEV; 518 io_header.dxfer_len = buf_size; 519 io_header.dxferp = buf; 520 io_header.cmdp = cmd; 521 io_header.cmd_len = cmd_size; 522 io_header.mx_sb_len = sizeof(sensebuf); 523 io_header.sbp = sensebuf; 524 io_header.timeout = 6000; /* XXX */ 525 526 ret = blk_ioctl(blk, SG_IO, &io_header); 527 if (ret < 0 || io_header.driver_status || io_header.host_status) { 528 return -1; 529 } 530 return 0; 531 } 532 533 /* 534 * Executes an INQUIRY request with EVPD set to retrieve the 535 * available VPD pages of the device. If the device does 536 * not support the Block Limits page (page 0xb0), set 537 * the needs_vpd_bl_emulation flag for future use. 538 */ 539 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s) 540 { 541 uint8_t cmd[6]; 542 uint8_t buf[250]; 543 uint8_t page_len; 544 int ret, i; 545 546 memset(cmd, 0, sizeof(cmd)); 547 memset(buf, 0, sizeof(buf)); 548 cmd[0] = INQUIRY; 549 cmd[1] = 1; 550 cmd[2] = 0x00; 551 cmd[4] = sizeof(buf); 552 553 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 554 buf, sizeof(buf)); 555 if (ret < 0) { 556 /* 557 * Do not assume anything if we can't retrieve the 558 * INQUIRY response to assert the VPD Block Limits 559 * support. 560 */ 561 s->needs_vpd_bl_emulation = false; 562 return; 563 } 564 565 page_len = buf[3]; 566 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) { 567 if (buf[i] == 0xb0) { 568 s->needs_vpd_bl_emulation = false; 569 return; 570 } 571 } 572 s->needs_vpd_bl_emulation = true; 573 } 574 575 static void scsi_generic_read_device_identification(SCSIDevice *s) 576 { 577 uint8_t cmd[6]; 578 uint8_t buf[250]; 579 int ret; 580 int i, len; 581 582 memset(cmd, 0, sizeof(cmd)); 583 memset(buf, 0, sizeof(buf)); 584 cmd[0] = INQUIRY; 585 cmd[1] = 1; 586 cmd[2] = 0x83; 587 cmd[4] = sizeof(buf); 588 589 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 590 buf, sizeof(buf)); 591 if (ret < 0) { 592 return; 593 } 594 595 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4); 596 for (i = 0; i + 3 <= len; ) { 597 const uint8_t *p = &buf[i + 4]; 598 uint64_t wwn; 599 600 if (i + (p[3] + 4) > len) { 601 break; 602 } 603 604 if ((p[1] & 0x10) == 0) { 605 /* Associated with the logical unit */ 606 if (read_naa_id(p, &wwn) == 0) { 607 s->wwn = wwn; 608 } 609 } else if ((p[1] & 0x10) == 0x10) { 610 /* Associated with the target port */ 611 if (read_naa_id(p, &wwn) == 0) { 612 s->port_wwn = wwn; 613 } 614 } 615 616 i += p[3] + 4; 617 } 618 } 619 620 void scsi_generic_read_device_inquiry(SCSIDevice *s) 621 { 622 scsi_generic_read_device_identification(s); 623 if (s->type == TYPE_DISK || s->type == TYPE_ZBC) { 624 scsi_generic_set_vpd_bl_emulation(s); 625 } else { 626 s->needs_vpd_bl_emulation = false; 627 } 628 } 629 630 static int get_stream_blocksize(BlockBackend *blk) 631 { 632 uint8_t cmd[6]; 633 uint8_t buf[12]; 634 int ret; 635 636 memset(cmd, 0, sizeof(cmd)); 637 memset(buf, 0, sizeof(buf)); 638 cmd[0] = MODE_SENSE; 639 cmd[4] = sizeof(buf); 640 641 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf)); 642 if (ret < 0) { 643 return -1; 644 } 645 646 return (buf[9] << 16) | (buf[10] << 8) | buf[11]; 647 } 648 649 static void scsi_generic_reset(DeviceState *dev) 650 { 651 SCSIDevice *s = SCSI_DEVICE(dev); 652 653 s->scsi_version = s->default_scsi_version; 654 scsi_device_purge_requests(s, SENSE_CODE(RESET)); 655 } 656 657 static void scsi_generic_realize(SCSIDevice *s, Error **errp) 658 { 659 int rc; 660 int sg_version; 661 struct sg_scsi_id scsiid; 662 663 if (!s->conf.blk) { 664 error_setg(errp, "drive property not set"); 665 return; 666 } 667 668 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC) { 669 error_setg(errp, "Device doesn't support drive option werror"); 670 return; 671 } 672 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) { 673 error_setg(errp, "Device doesn't support drive option rerror"); 674 return; 675 } 676 677 /* check we are using a driver managing SG_IO (version 3 and after */ 678 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version); 679 if (rc < 0) { 680 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 681 if (rc != -EPERM) { 682 error_append_hint(errp, "Is this a SCSI device?\n"); 683 } 684 return; 685 } 686 if (sg_version < 30000) { 687 error_setg(errp, "scsi generic interface too old"); 688 return; 689 } 690 691 /* get LUN of the /dev/sg? */ 692 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) { 693 error_setg(errp, "SG_GET_SCSI_ID ioctl failed"); 694 return; 695 } 696 if (!blkconf_apply_backend_options(&s->conf, 697 blk_is_read_only(s->conf.blk), 698 true, errp)) { 699 return; 700 } 701 702 /* define device state */ 703 s->type = scsiid.scsi_type; 704 trace_scsi_generic_realize_type(s->type); 705 706 switch (s->type) { 707 case TYPE_TAPE: 708 s->blocksize = get_stream_blocksize(s->conf.blk); 709 if (s->blocksize == -1) { 710 s->blocksize = 0; 711 } 712 break; 713 714 /* Make a guess for block devices, we'll fix it when the guest sends. 715 * READ CAPACITY. If they don't, they likely would assume these sizes 716 * anyway. (TODO: they could also send MODE SENSE). 717 */ 718 case TYPE_ROM: 719 case TYPE_WORM: 720 s->blocksize = 2048; 721 break; 722 default: 723 s->blocksize = 512; 724 break; 725 } 726 727 trace_scsi_generic_realize_blocksize(s->blocksize); 728 729 /* Only used by scsi-block, but initialize it nevertheless to be clean. */ 730 s->default_scsi_version = -1; 731 scsi_generic_read_device_inquiry(s); 732 } 733 734 const SCSIReqOps scsi_generic_req_ops = { 735 .size = sizeof(SCSIGenericReq), 736 .free_req = scsi_free_request, 737 .send_command = scsi_send_command, 738 .read_data = scsi_read_data, 739 .write_data = scsi_write_data, 740 .get_buf = scsi_get_buf, 741 .load_request = scsi_generic_load_request, 742 .save_request = scsi_generic_save_request, 743 }; 744 745 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 746 uint8_t *buf, void *hba_private) 747 { 748 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private); 749 } 750 751 static Property scsi_generic_properties[] = { 752 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk), 753 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false), 754 DEFINE_PROP_END_OF_LIST(), 755 }; 756 757 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, 758 uint8_t *buf, void *hba_private) 759 { 760 return scsi_bus_parse_cdb(dev, cmd, buf, hba_private); 761 } 762 763 static void scsi_generic_class_initfn(ObjectClass *klass, void *data) 764 { 765 DeviceClass *dc = DEVICE_CLASS(klass); 766 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 767 768 sc->realize = scsi_generic_realize; 769 sc->alloc_req = scsi_new_request; 770 sc->parse_cdb = scsi_generic_parse_cdb; 771 dc->fw_name = "disk"; 772 dc->desc = "pass through generic scsi device (/dev/sg*)"; 773 dc->reset = scsi_generic_reset; 774 device_class_set_props(dc, scsi_generic_properties); 775 dc->vmsd = &vmstate_scsi_device; 776 } 777 778 static const TypeInfo scsi_generic_info = { 779 .name = "scsi-generic", 780 .parent = TYPE_SCSI_DEVICE, 781 .instance_size = sizeof(SCSIDevice), 782 .class_init = scsi_generic_class_initfn, 783 }; 784 785 static void scsi_generic_register_types(void) 786 { 787 type_register_static(&scsi_generic_info); 788 } 789 790 type_init(scsi_generic_register_types) 791 792 #endif /* __linux__ */ 793