1 /* 2 * Generic SCSI Device support 3 * 4 * Copyright (c) 2007 Bull S.A.S. 5 * Based on code by Paul Brook 6 * Based on code by Fabrice Bellard 7 * 8 * Written by Laurent Vivier <Laurent.Vivier@bull.net> 9 * 10 * This code is licensed under the LGPL. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qemu/ctype.h" 17 #include "qemu/error-report.h" 18 #include "qemu/module.h" 19 #include "hw/scsi/scsi.h" 20 #include "migration/qemu-file-types.h" 21 #include "hw/qdev-properties.h" 22 #include "hw/qdev-properties-system.h" 23 #include "hw/scsi/emulation.h" 24 #include "sysemu/block-backend.h" 25 #include "trace.h" 26 27 #ifdef __linux__ 28 29 #include <scsi/sg.h> 30 #include "scsi/constants.h" 31 32 #ifndef MAX_UINT 33 #define MAX_UINT ((unsigned int)-1) 34 #endif 35 36 typedef struct SCSIGenericReq { 37 SCSIRequest req; 38 uint8_t *buf; 39 int buflen; 40 int len; 41 sg_io_hdr_t io_header; 42 } SCSIGenericReq; 43 44 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req) 45 { 46 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 47 48 qemu_put_sbe32s(f, &r->buflen); 49 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 50 assert(!r->req.sg); 51 qemu_put_buffer(f, r->buf, r->req.cmd.xfer); 52 } 53 } 54 55 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req) 56 { 57 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 58 59 qemu_get_sbe32s(f, &r->buflen); 60 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 61 assert(!r->req.sg); 62 qemu_get_buffer(f, r->buf, r->req.cmd.xfer); 63 } 64 } 65 66 static void scsi_free_request(SCSIRequest *req) 67 { 68 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 69 70 g_free(r->buf); 71 } 72 73 /* Helper function for command completion. */ 74 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret) 75 { 76 int status; 77 SCSISense sense; 78 sg_io_hdr_t *io_hdr = &r->io_header; 79 80 assert(r->req.aiocb == NULL); 81 82 if (r->req.io_canceled) { 83 scsi_req_cancel_complete(&r->req); 84 goto done; 85 } 86 if (ret < 0) { 87 status = scsi_sense_from_errno(-ret, &sense); 88 if (status == CHECK_CONDITION) { 89 scsi_req_build_sense(&r->req, sense); 90 } 91 } else if (io_hdr->host_status != SCSI_HOST_OK) { 92 scsi_req_complete_failed(&r->req, io_hdr->host_status); 93 goto done; 94 } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) { 95 status = BUSY; 96 } else { 97 status = io_hdr->status; 98 if (io_hdr->driver_status & SG_ERR_DRIVER_SENSE) { 99 r->req.sense_len = io_hdr->sb_len_wr; 100 } 101 } 102 trace_scsi_generic_command_complete_noio(r, r->req.tag, status); 103 104 scsi_req_complete(&r->req, status); 105 done: 106 scsi_req_unref(&r->req); 107 } 108 109 static void scsi_command_complete(void *opaque, int ret) 110 { 111 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 112 SCSIDevice *s = r->req.dev; 113 114 assert(r->req.aiocb != NULL); 115 r->req.aiocb = NULL; 116 117 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 118 scsi_command_complete_noio(r, ret); 119 aio_context_release(blk_get_aio_context(s->conf.blk)); 120 } 121 122 static int execute_command(BlockBackend *blk, 123 SCSIGenericReq *r, int direction, 124 BlockCompletionFunc *complete) 125 { 126 SCSIDevice *s = r->req.dev; 127 128 r->io_header.interface_id = 'S'; 129 r->io_header.dxfer_direction = direction; 130 r->io_header.dxferp = r->buf; 131 r->io_header.dxfer_len = r->buflen; 132 r->io_header.cmdp = r->req.cmd.buf; 133 r->io_header.cmd_len = r->req.cmd.len; 134 r->io_header.mx_sb_len = sizeof(r->req.sense); 135 r->io_header.sbp = r->req.sense; 136 r->io_header.timeout = s->io_timeout * 1000; 137 r->io_header.usr_ptr = r; 138 r->io_header.flags |= SG_FLAG_DIRECT_IO; 139 140 trace_scsi_generic_aio_sgio_command(r->req.tag, r->req.cmd.buf[0], 141 r->io_header.timeout); 142 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r); 143 if (r->req.aiocb == NULL) { 144 return -EIO; 145 } 146 147 return 0; 148 } 149 150 static uint64_t calculate_max_transfer(SCSIDevice *s) 151 { 152 uint64_t max_transfer = blk_get_max_hw_transfer(s->conf.blk); 153 uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk); 154 155 assert(max_transfer); 156 max_transfer = MIN_NON_ZERO(max_transfer, 157 max_iov * qemu_real_host_page_size()); 158 159 return max_transfer / s->blocksize; 160 } 161 162 static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len) 163 { 164 uint8_t page, page_idx; 165 166 /* 167 * EVPD set to zero returns the standard INQUIRY data. 168 * 169 * Check if scsi_version is unset (-1) to avoid re-defining it 170 * each time an INQUIRY with standard data is received. 171 * scsi_version is initialized with -1 in scsi_generic_reset 172 * and scsi_disk_reset, making sure that we'll set the 173 * scsi_version after a reset. If the version field of the 174 * INQUIRY response somehow changes after a guest reboot, 175 * we'll be able to keep track of it. 176 * 177 * On SCSI-2 and older, first 3 bits of byte 2 is the 178 * ANSI-approved version, while on later versions the 179 * whole byte 2 contains the version. Check if we're dealing 180 * with a newer version and, in that case, assign the 181 * whole byte. 182 */ 183 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) { 184 s->scsi_version = r->buf[2] & 0x07; 185 if (s->scsi_version > 2) { 186 s->scsi_version = r->buf[2]; 187 } 188 } 189 190 if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) && 191 (r->req.cmd.buf[1] & 0x01)) { 192 page = r->req.cmd.buf[2]; 193 if (page == 0xb0) { 194 uint64_t max_transfer = calculate_max_transfer(s); 195 stl_be_p(&r->buf[8], max_transfer); 196 /* Also take care of the opt xfer len. */ 197 stl_be_p(&r->buf[12], 198 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); 199 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { 200 /* 201 * Now we're capable of supplying the VPD Block Limits 202 * response if the hardware can't. Add it in the INQUIRY 203 * Supported VPD pages response in case we are using the 204 * emulation for this device. 205 * 206 * This way, the guest kernel will be aware of the support 207 * and will use it to proper setup the SCSI device. 208 * 209 * VPD page numbers must be sorted, so insert 0xb0 at the 210 * right place with an in-place insert. When the while loop 211 * begins the device response is at r[0] to r[page_idx - 1]. 212 */ 213 page_idx = lduw_be_p(r->buf + 2) + 4; 214 page_idx = MIN(page_idx, r->buflen); 215 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) { 216 if (page_idx < r->buflen) { 217 r->buf[page_idx] = r->buf[page_idx - 1]; 218 } 219 page_idx--; 220 } 221 if (page_idx < r->buflen) { 222 r->buf[page_idx] = 0xb0; 223 } 224 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1); 225 226 if (len < r->buflen) { 227 len++; 228 } 229 } 230 } 231 return len; 232 } 233 234 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s) 235 { 236 int len; 237 uint8_t buf[64]; 238 239 SCSIBlockLimits bl = { 240 .max_io_sectors = calculate_max_transfer(s), 241 }; 242 243 memset(r->buf, 0, r->buflen); 244 stb_p(buf, s->type); 245 stb_p(buf + 1, 0xb0); 246 len = scsi_emulate_block_limits(buf + 4, &bl); 247 assert(len <= sizeof(buf) - 4); 248 stw_be_p(buf + 2, len); 249 250 memcpy(r->buf, buf, MIN(r->buflen, len + 4)); 251 252 r->io_header.sb_len_wr = 0; 253 254 /* 255 * We have valid contents in the reply buffer but the 256 * io_header can report a sense error coming from 257 * the hardware in scsi_command_complete_noio. Clean 258 * up the io_header to avoid reporting it. 259 */ 260 r->io_header.driver_status = 0; 261 r->io_header.status = 0; 262 263 return r->buflen; 264 } 265 266 static void scsi_read_complete(void * opaque, int ret) 267 { 268 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 269 SCSIDevice *s = r->req.dev; 270 int len; 271 272 assert(r->req.aiocb != NULL); 273 r->req.aiocb = NULL; 274 275 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 276 277 if (ret || r->req.io_canceled) { 278 scsi_command_complete_noio(r, ret); 279 goto done; 280 } 281 282 len = r->io_header.dxfer_len - r->io_header.resid; 283 trace_scsi_generic_read_complete(r->req.tag, len); 284 285 r->len = -1; 286 287 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { 288 SCSISense sense = 289 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr); 290 291 /* 292 * Check if this is a VPD Block Limits request that 293 * resulted in sense error but would need emulation. 294 * In this case, emulate a valid VPD response. 295 */ 296 if (sense.key == ILLEGAL_REQUEST && 297 s->needs_vpd_bl_emulation && 298 r->req.cmd.buf[0] == INQUIRY && 299 (r->req.cmd.buf[1] & 0x01) && 300 r->req.cmd.buf[2] == 0xb0) { 301 len = scsi_generic_emulate_block_limits(r, s); 302 /* 303 * It's okay to jup to req_complete: no need to 304 * let scsi_handle_inquiry_reply handle an 305 * INQUIRY VPD BL request we created manually. 306 */ 307 } 308 if (sense.key) { 309 goto req_complete; 310 } 311 } 312 313 if (r->io_header.host_status != SCSI_HOST_OK || 314 (r->io_header.driver_status & SG_ERR_DRIVER_TIMEOUT) || 315 r->io_header.status != GOOD || 316 len == 0) { 317 scsi_command_complete_noio(r, 0); 318 goto done; 319 } 320 321 /* Snoop READ CAPACITY output to set the blocksize. */ 322 if (r->req.cmd.buf[0] == READ_CAPACITY_10 && 323 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) { 324 s->blocksize = ldl_be_p(&r->buf[4]); 325 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL; 326 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 && 327 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 328 s->blocksize = ldl_be_p(&r->buf[8]); 329 s->max_lba = ldq_be_p(&r->buf[0]); 330 } 331 332 /* 333 * Patch MODE SENSE device specific parameters if the BDS is opened 334 * readonly. 335 */ 336 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) && 337 !blk_is_writable(s->conf.blk) && 338 (r->req.cmd.buf[0] == MODE_SENSE || 339 r->req.cmd.buf[0] == MODE_SENSE_10) && 340 (r->req.cmd.buf[1] & 0x8) == 0) { 341 if (r->req.cmd.buf[0] == MODE_SENSE) { 342 r->buf[2] |= 0x80; 343 } else { 344 r->buf[3] |= 0x80; 345 } 346 } 347 if (r->req.cmd.buf[0] == INQUIRY) { 348 len = scsi_handle_inquiry_reply(r, s, len); 349 } 350 351 req_complete: 352 scsi_req_data(&r->req, len); 353 scsi_req_unref(&r->req); 354 355 done: 356 aio_context_release(blk_get_aio_context(s->conf.blk)); 357 } 358 359 /* Read more data from scsi device into buffer. */ 360 static void scsi_read_data(SCSIRequest *req) 361 { 362 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 363 SCSIDevice *s = r->req.dev; 364 int ret; 365 366 trace_scsi_generic_read_data(req->tag); 367 368 /* The request is used as the AIO opaque value, so add a ref. */ 369 scsi_req_ref(&r->req); 370 if (r->len == -1) { 371 scsi_command_complete_noio(r, 0); 372 return; 373 } 374 375 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV, 376 scsi_read_complete); 377 if (ret < 0) { 378 scsi_command_complete_noio(r, ret); 379 } 380 } 381 382 static void scsi_write_complete(void * opaque, int ret) 383 { 384 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 385 SCSIDevice *s = r->req.dev; 386 387 trace_scsi_generic_write_complete(ret); 388 389 assert(r->req.aiocb != NULL); 390 r->req.aiocb = NULL; 391 392 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 393 394 if (ret || r->req.io_canceled) { 395 scsi_command_complete_noio(r, ret); 396 goto done; 397 } 398 399 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 && 400 s->type == TYPE_TAPE) { 401 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11]; 402 trace_scsi_generic_write_complete_blocksize(s->blocksize); 403 } 404 405 scsi_command_complete_noio(r, ret); 406 407 done: 408 aio_context_release(blk_get_aio_context(s->conf.blk)); 409 } 410 411 /* Write data to a scsi device. Returns nonzero on failure. 412 The transfer may complete asynchronously. */ 413 static void scsi_write_data(SCSIRequest *req) 414 { 415 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 416 SCSIDevice *s = r->req.dev; 417 int ret; 418 419 trace_scsi_generic_write_data(req->tag); 420 if (r->len == 0) { 421 r->len = r->buflen; 422 scsi_req_data(&r->req, r->len); 423 return; 424 } 425 426 /* The request is used as the AIO opaque value, so add a ref. */ 427 scsi_req_ref(&r->req); 428 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete); 429 if (ret < 0) { 430 scsi_command_complete_noio(r, ret); 431 } 432 } 433 434 /* Return a pointer to the data buffer. */ 435 static uint8_t *scsi_get_buf(SCSIRequest *req) 436 { 437 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 438 439 return r->buf; 440 } 441 442 static void scsi_generic_command_dump(uint8_t *cmd, int len) 443 { 444 int i; 445 char *line_buffer, *p; 446 447 line_buffer = g_malloc(len * 5 + 1); 448 449 for (i = 0, p = line_buffer; i < len; i++) { 450 p += sprintf(p, " 0x%02x", cmd[i]); 451 } 452 trace_scsi_generic_send_command(line_buffer); 453 454 g_free(line_buffer); 455 } 456 457 /* Execute a scsi command. Returns the length of the data expected by the 458 command. This will be Positive for data transfers from the device 459 (eg. disk reads), negative for transfers to the device (eg. disk writes), 460 and zero if the command does not transfer any data. */ 461 462 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd) 463 { 464 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 465 SCSIDevice *s = r->req.dev; 466 int ret; 467 468 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) { 469 scsi_generic_command_dump(cmd, r->req.cmd.len); 470 } 471 472 if (r->req.cmd.xfer == 0) { 473 g_free(r->buf); 474 r->buflen = 0; 475 r->buf = NULL; 476 /* The request is used as the AIO opaque value, so add a ref. */ 477 scsi_req_ref(&r->req); 478 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE, 479 scsi_command_complete); 480 if (ret < 0) { 481 scsi_command_complete_noio(r, ret); 482 return 0; 483 } 484 return 0; 485 } 486 487 if (r->buflen != r->req.cmd.xfer) { 488 g_free(r->buf); 489 r->buf = g_malloc(r->req.cmd.xfer); 490 r->buflen = r->req.cmd.xfer; 491 } 492 493 memset(r->buf, 0, r->buflen); 494 r->len = r->req.cmd.xfer; 495 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 496 r->len = 0; 497 return -r->req.cmd.xfer; 498 } else { 499 return r->req.cmd.xfer; 500 } 501 } 502 503 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn) 504 { 505 int i; 506 507 if ((p[1] & 0xF) == 3) { 508 /* NAA designator type */ 509 if (p[3] != 8) { 510 return -EINVAL; 511 } 512 *p_wwn = ldq_be_p(p + 4); 513 return 0; 514 } 515 516 if ((p[1] & 0xF) == 8) { 517 /* SCSI name string designator type */ 518 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) { 519 return -EINVAL; 520 } 521 if (p[3] > 20 && p[24] != ',') { 522 return -EINVAL; 523 } 524 *p_wwn = 0; 525 for (i = 8; i < 24; i++) { 526 char c = qemu_toupper(p[i]); 527 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10); 528 *p_wwn = (*p_wwn << 4) | c; 529 } 530 return 0; 531 } 532 533 return -EINVAL; 534 } 535 536 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size, 537 uint8_t *buf, uint8_t buf_size, uint32_t timeout) 538 { 539 sg_io_hdr_t io_header; 540 uint8_t sensebuf[8]; 541 int ret; 542 543 memset(&io_header, 0, sizeof(io_header)); 544 io_header.interface_id = 'S'; 545 io_header.dxfer_direction = SG_DXFER_FROM_DEV; 546 io_header.dxfer_len = buf_size; 547 io_header.dxferp = buf; 548 io_header.cmdp = cmd; 549 io_header.cmd_len = cmd_size; 550 io_header.mx_sb_len = sizeof(sensebuf); 551 io_header.sbp = sensebuf; 552 io_header.timeout = timeout * 1000; 553 554 trace_scsi_generic_ioctl_sgio_command(cmd[0], io_header.timeout); 555 ret = blk_ioctl(blk, SG_IO, &io_header); 556 if (ret < 0 || io_header.status || 557 io_header.driver_status || io_header.host_status) { 558 trace_scsi_generic_ioctl_sgio_done(cmd[0], ret, io_header.status, 559 io_header.host_status); 560 return -1; 561 } 562 return 0; 563 } 564 565 /* 566 * Executes an INQUIRY request with EVPD set to retrieve the 567 * available VPD pages of the device. If the device does 568 * not support the Block Limits page (page 0xb0), set 569 * the needs_vpd_bl_emulation flag for future use. 570 */ 571 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s) 572 { 573 uint8_t cmd[6]; 574 uint8_t buf[250]; 575 uint8_t page_len; 576 int ret, i; 577 578 memset(cmd, 0, sizeof(cmd)); 579 memset(buf, 0, sizeof(buf)); 580 cmd[0] = INQUIRY; 581 cmd[1] = 1; 582 cmd[2] = 0x00; 583 cmd[4] = sizeof(buf); 584 585 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 586 buf, sizeof(buf), s->io_timeout); 587 if (ret < 0) { 588 /* 589 * Do not assume anything if we can't retrieve the 590 * INQUIRY response to assert the VPD Block Limits 591 * support. 592 */ 593 s->needs_vpd_bl_emulation = false; 594 return; 595 } 596 597 page_len = buf[3]; 598 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) { 599 if (buf[i] == 0xb0) { 600 s->needs_vpd_bl_emulation = false; 601 return; 602 } 603 } 604 s->needs_vpd_bl_emulation = true; 605 } 606 607 static void scsi_generic_read_device_identification(SCSIDevice *s) 608 { 609 uint8_t cmd[6]; 610 uint8_t buf[250]; 611 int ret; 612 int i, len; 613 614 memset(cmd, 0, sizeof(cmd)); 615 memset(buf, 0, sizeof(buf)); 616 cmd[0] = INQUIRY; 617 cmd[1] = 1; 618 cmd[2] = 0x83; 619 cmd[4] = sizeof(buf); 620 621 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 622 buf, sizeof(buf), s->io_timeout); 623 if (ret < 0) { 624 return; 625 } 626 627 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4); 628 for (i = 0; i + 3 <= len; ) { 629 const uint8_t *p = &buf[i + 4]; 630 uint64_t wwn; 631 632 if (i + (p[3] + 4) > len) { 633 break; 634 } 635 636 if ((p[1] & 0x10) == 0) { 637 /* Associated with the logical unit */ 638 if (read_naa_id(p, &wwn) == 0) { 639 s->wwn = wwn; 640 } 641 } else if ((p[1] & 0x10) == 0x10) { 642 /* Associated with the target port */ 643 if (read_naa_id(p, &wwn) == 0) { 644 s->port_wwn = wwn; 645 } 646 } 647 648 i += p[3] + 4; 649 } 650 } 651 652 void scsi_generic_read_device_inquiry(SCSIDevice *s) 653 { 654 scsi_generic_read_device_identification(s); 655 if (s->type == TYPE_DISK || s->type == TYPE_ZBC) { 656 scsi_generic_set_vpd_bl_emulation(s); 657 } else { 658 s->needs_vpd_bl_emulation = false; 659 } 660 } 661 662 static int get_stream_blocksize(BlockBackend *blk) 663 { 664 uint8_t cmd[6]; 665 uint8_t buf[12]; 666 int ret; 667 668 memset(cmd, 0, sizeof(cmd)); 669 memset(buf, 0, sizeof(buf)); 670 cmd[0] = MODE_SENSE; 671 cmd[4] = sizeof(buf); 672 673 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf), 6); 674 if (ret < 0) { 675 return -1; 676 } 677 678 return (buf[9] << 16) | (buf[10] << 8) | buf[11]; 679 } 680 681 static void scsi_generic_reset(DeviceState *dev) 682 { 683 SCSIDevice *s = SCSI_DEVICE(dev); 684 685 s->scsi_version = s->default_scsi_version; 686 scsi_device_purge_requests(s, SENSE_CODE(RESET)); 687 } 688 689 static void scsi_generic_realize(SCSIDevice *s, Error **errp) 690 { 691 int rc; 692 int sg_version; 693 struct sg_scsi_id scsiid; 694 695 if (!s->conf.blk) { 696 error_setg(errp, "drive property not set"); 697 return; 698 } 699 700 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC && 701 blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) { 702 error_setg(errp, "Device doesn't support drive option werror"); 703 return; 704 } 705 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) { 706 error_setg(errp, "Device doesn't support drive option rerror"); 707 return; 708 } 709 710 /* check we are using a driver managing SG_IO (version 3 and after */ 711 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version); 712 if (rc < 0) { 713 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 714 if (rc != -EPERM) { 715 error_append_hint(errp, "Is this a SCSI device?\n"); 716 } 717 return; 718 } 719 if (sg_version < 30000) { 720 error_setg(errp, "scsi generic interface too old"); 721 return; 722 } 723 724 /* get LUN of the /dev/sg? */ 725 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) { 726 error_setg(errp, "SG_GET_SCSI_ID ioctl failed"); 727 return; 728 } 729 if (!blkconf_apply_backend_options(&s->conf, 730 !blk_supports_write_perm(s->conf.blk), 731 true, errp)) { 732 return; 733 } 734 735 /* define device state */ 736 s->type = scsiid.scsi_type; 737 trace_scsi_generic_realize_type(s->type); 738 739 switch (s->type) { 740 case TYPE_TAPE: 741 s->blocksize = get_stream_blocksize(s->conf.blk); 742 if (s->blocksize == -1) { 743 s->blocksize = 0; 744 } 745 break; 746 747 /* Make a guess for block devices, we'll fix it when the guest sends. 748 * READ CAPACITY. If they don't, they likely would assume these sizes 749 * anyway. (TODO: they could also send MODE SENSE). 750 */ 751 case TYPE_ROM: 752 case TYPE_WORM: 753 s->blocksize = 2048; 754 break; 755 default: 756 s->blocksize = 512; 757 break; 758 } 759 760 trace_scsi_generic_realize_blocksize(s->blocksize); 761 762 /* Only used by scsi-block, but initialize it nevertheless to be clean. */ 763 s->default_scsi_version = -1; 764 s->io_timeout = DEFAULT_IO_TIMEOUT; 765 scsi_generic_read_device_inquiry(s); 766 } 767 768 const SCSIReqOps scsi_generic_req_ops = { 769 .size = sizeof(SCSIGenericReq), 770 .free_req = scsi_free_request, 771 .send_command = scsi_send_command, 772 .read_data = scsi_read_data, 773 .write_data = scsi_write_data, 774 .get_buf = scsi_get_buf, 775 .load_request = scsi_generic_load_request, 776 .save_request = scsi_generic_save_request, 777 }; 778 779 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 780 uint8_t *buf, void *hba_private) 781 { 782 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private); 783 } 784 785 static Property scsi_generic_properties[] = { 786 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk), 787 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false), 788 DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout, 789 DEFAULT_IO_TIMEOUT), 790 DEFINE_PROP_END_OF_LIST(), 791 }; 792 793 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, 794 uint8_t *buf, size_t buf_len, 795 void *hba_private) 796 { 797 return scsi_bus_parse_cdb(dev, cmd, buf, buf_len, hba_private); 798 } 799 800 static void scsi_generic_class_initfn(ObjectClass *klass, void *data) 801 { 802 DeviceClass *dc = DEVICE_CLASS(klass); 803 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 804 805 sc->realize = scsi_generic_realize; 806 sc->alloc_req = scsi_new_request; 807 sc->parse_cdb = scsi_generic_parse_cdb; 808 dc->fw_name = "disk"; 809 dc->desc = "pass through generic scsi device (/dev/sg*)"; 810 dc->reset = scsi_generic_reset; 811 device_class_set_props(dc, scsi_generic_properties); 812 dc->vmsd = &vmstate_scsi_device; 813 } 814 815 static const TypeInfo scsi_generic_info = { 816 .name = "scsi-generic", 817 .parent = TYPE_SCSI_DEVICE, 818 .instance_size = sizeof(SCSIDevice), 819 .class_init = scsi_generic_class_initfn, 820 }; 821 822 static void scsi_generic_register_types(void) 823 { 824 type_register_static(&scsi_generic_info); 825 } 826 827 type_init(scsi_generic_register_types) 828 829 #endif /* __linux__ */ 830