1 /* 2 * Generic SCSI Device support 3 * 4 * Copyright (c) 2007 Bull S.A.S. 5 * Based on code by Paul Brook 6 * Based on code by Fabrice Bellard 7 * 8 * Written by Laurent Vivier <Laurent.Vivier@bull.net> 9 * 10 * This code is licensed under the LGPL. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qemu/ctype.h" 17 #include "qemu/error-report.h" 18 #include "qemu/module.h" 19 #include "hw/scsi/scsi.h" 20 #include "migration/qemu-file-types.h" 21 #include "hw/qdev-properties.h" 22 #include "hw/qdev-properties-system.h" 23 #include "hw/scsi/emulation.h" 24 #include "sysemu/block-backend.h" 25 #include "trace.h" 26 27 #ifdef __linux__ 28 29 #include <scsi/sg.h> 30 #include "scsi/constants.h" 31 32 #ifndef MAX_UINT 33 #define MAX_UINT ((unsigned int)-1) 34 #endif 35 36 typedef struct SCSIGenericReq { 37 SCSIRequest req; 38 uint8_t *buf; 39 int buflen; 40 int len; 41 sg_io_hdr_t io_header; 42 } SCSIGenericReq; 43 44 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req) 45 { 46 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 47 48 qemu_put_sbe32s(f, &r->buflen); 49 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 50 assert(!r->req.sg); 51 qemu_put_buffer(f, r->buf, r->req.cmd.xfer); 52 } 53 } 54 55 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req) 56 { 57 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 58 59 qemu_get_sbe32s(f, &r->buflen); 60 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 61 assert(!r->req.sg); 62 qemu_get_buffer(f, r->buf, r->req.cmd.xfer); 63 } 64 } 65 66 static void scsi_free_request(SCSIRequest *req) 67 { 68 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 69 70 g_free(r->buf); 71 } 72 73 /* Helper function for command completion. */ 74 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret) 75 { 76 int status; 77 SCSISense sense; 78 sg_io_hdr_t *io_hdr = &r->io_header; 79 80 assert(r->req.aiocb == NULL); 81 82 if (r->req.io_canceled) { 83 scsi_req_cancel_complete(&r->req); 84 goto done; 85 } 86 if (ret < 0) { 87 status = scsi_sense_from_errno(-ret, &sense); 88 if (status == CHECK_CONDITION) { 89 scsi_req_build_sense(&r->req, sense); 90 } 91 } else if (io_hdr->host_status != SCSI_HOST_OK) { 92 scsi_req_complete_failed(&r->req, io_hdr->host_status); 93 goto done; 94 } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) { 95 status = BUSY; 96 } else { 97 status = io_hdr->status; 98 if (io_hdr->driver_status & SG_ERR_DRIVER_SENSE) { 99 r->req.sense_len = io_hdr->sb_len_wr; 100 } 101 } 102 trace_scsi_generic_command_complete_noio(r, r->req.tag, status); 103 104 scsi_req_complete(&r->req, status); 105 done: 106 scsi_req_unref(&r->req); 107 } 108 109 static void scsi_command_complete(void *opaque, int ret) 110 { 111 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 112 SCSIDevice *s = r->req.dev; 113 114 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 115 116 assert(r->req.aiocb != NULL); 117 r->req.aiocb = NULL; 118 119 scsi_command_complete_noio(r, ret); 120 aio_context_release(blk_get_aio_context(s->conf.blk)); 121 } 122 123 static int execute_command(BlockBackend *blk, 124 SCSIGenericReq *r, int direction, 125 BlockCompletionFunc *complete) 126 { 127 SCSIDevice *s = r->req.dev; 128 129 r->io_header.interface_id = 'S'; 130 r->io_header.dxfer_direction = direction; 131 r->io_header.dxferp = r->buf; 132 r->io_header.dxfer_len = r->buflen; 133 r->io_header.cmdp = r->req.cmd.buf; 134 r->io_header.cmd_len = r->req.cmd.len; 135 r->io_header.mx_sb_len = sizeof(r->req.sense); 136 r->io_header.sbp = r->req.sense; 137 r->io_header.timeout = s->io_timeout * 1000; 138 r->io_header.usr_ptr = r; 139 r->io_header.flags |= SG_FLAG_DIRECT_IO; 140 141 trace_scsi_generic_aio_sgio_command(r->req.tag, r->req.cmd.buf[0], 142 r->io_header.timeout); 143 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r); 144 if (r->req.aiocb == NULL) { 145 return -EIO; 146 } 147 148 return 0; 149 } 150 151 static uint64_t calculate_max_transfer(SCSIDevice *s) 152 { 153 uint64_t max_transfer = blk_get_max_hw_transfer(s->conf.blk); 154 uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk); 155 156 assert(max_transfer); 157 max_transfer = MIN_NON_ZERO(max_transfer, 158 max_iov * qemu_real_host_page_size()); 159 160 return max_transfer / s->blocksize; 161 } 162 163 static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len) 164 { 165 uint8_t page, page_idx; 166 167 /* 168 * EVPD set to zero returns the standard INQUIRY data. 169 * 170 * Check if scsi_version is unset (-1) to avoid re-defining it 171 * each time an INQUIRY with standard data is received. 172 * scsi_version is initialized with -1 in scsi_generic_reset 173 * and scsi_disk_reset, making sure that we'll set the 174 * scsi_version after a reset. If the version field of the 175 * INQUIRY response somehow changes after a guest reboot, 176 * we'll be able to keep track of it. 177 * 178 * On SCSI-2 and older, first 3 bits of byte 2 is the 179 * ANSI-approved version, while on later versions the 180 * whole byte 2 contains the version. Check if we're dealing 181 * with a newer version and, in that case, assign the 182 * whole byte. 183 */ 184 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) { 185 s->scsi_version = r->buf[2] & 0x07; 186 if (s->scsi_version > 2) { 187 s->scsi_version = r->buf[2]; 188 } 189 } 190 191 if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) && 192 (r->req.cmd.buf[1] & 0x01)) { 193 page = r->req.cmd.buf[2]; 194 if (page == 0xb0) { 195 uint64_t max_transfer = calculate_max_transfer(s); 196 stl_be_p(&r->buf[8], max_transfer); 197 /* Also take care of the opt xfer len. */ 198 stl_be_p(&r->buf[12], 199 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); 200 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { 201 /* 202 * Now we're capable of supplying the VPD Block Limits 203 * response if the hardware can't. Add it in the INQUIRY 204 * Supported VPD pages response in case we are using the 205 * emulation for this device. 206 * 207 * This way, the guest kernel will be aware of the support 208 * and will use it to proper setup the SCSI device. 209 * 210 * VPD page numbers must be sorted, so insert 0xb0 at the 211 * right place with an in-place insert. When the while loop 212 * begins the device response is at r[0] to r[page_idx - 1]. 213 */ 214 page_idx = lduw_be_p(r->buf + 2) + 4; 215 page_idx = MIN(page_idx, r->buflen); 216 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) { 217 if (page_idx < r->buflen) { 218 r->buf[page_idx] = r->buf[page_idx - 1]; 219 } 220 page_idx--; 221 } 222 if (page_idx < r->buflen) { 223 r->buf[page_idx] = 0xb0; 224 } 225 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1); 226 227 if (len < r->buflen) { 228 len++; 229 } 230 } 231 } 232 return len; 233 } 234 235 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s) 236 { 237 int len; 238 uint8_t buf[64]; 239 240 SCSIBlockLimits bl = { 241 .max_io_sectors = calculate_max_transfer(s), 242 }; 243 244 memset(r->buf, 0, r->buflen); 245 stb_p(buf, s->type); 246 stb_p(buf + 1, 0xb0); 247 len = scsi_emulate_block_limits(buf + 4, &bl); 248 assert(len <= sizeof(buf) - 4); 249 stw_be_p(buf + 2, len); 250 251 memcpy(r->buf, buf, MIN(r->buflen, len + 4)); 252 253 r->io_header.sb_len_wr = 0; 254 255 /* 256 * We have valid contents in the reply buffer but the 257 * io_header can report a sense error coming from 258 * the hardware in scsi_command_complete_noio. Clean 259 * up the io_header to avoid reporting it. 260 */ 261 r->io_header.driver_status = 0; 262 r->io_header.status = 0; 263 264 return r->buflen; 265 } 266 267 static void scsi_read_complete(void * opaque, int ret) 268 { 269 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 270 SCSIDevice *s = r->req.dev; 271 int len; 272 273 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 274 275 assert(r->req.aiocb != NULL); 276 r->req.aiocb = NULL; 277 278 if (ret || r->req.io_canceled) { 279 scsi_command_complete_noio(r, ret); 280 goto done; 281 } 282 283 len = r->io_header.dxfer_len - r->io_header.resid; 284 trace_scsi_generic_read_complete(r->req.tag, len); 285 286 r->len = -1; 287 288 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { 289 SCSISense sense = 290 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr); 291 292 /* 293 * Check if this is a VPD Block Limits request that 294 * resulted in sense error but would need emulation. 295 * In this case, emulate a valid VPD response. 296 */ 297 if (sense.key == ILLEGAL_REQUEST && 298 s->needs_vpd_bl_emulation && 299 r->req.cmd.buf[0] == INQUIRY && 300 (r->req.cmd.buf[1] & 0x01) && 301 r->req.cmd.buf[2] == 0xb0) { 302 len = scsi_generic_emulate_block_limits(r, s); 303 /* 304 * It's okay to jup to req_complete: no need to 305 * let scsi_handle_inquiry_reply handle an 306 * INQUIRY VPD BL request we created manually. 307 */ 308 } 309 if (sense.key) { 310 goto req_complete; 311 } 312 } 313 314 if (r->io_header.host_status != SCSI_HOST_OK || 315 (r->io_header.driver_status & SG_ERR_DRIVER_TIMEOUT) || 316 r->io_header.status != GOOD || 317 len == 0) { 318 scsi_command_complete_noio(r, 0); 319 goto done; 320 } 321 322 /* Snoop READ CAPACITY output to set the blocksize. */ 323 if (r->req.cmd.buf[0] == READ_CAPACITY_10 && 324 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) { 325 s->blocksize = ldl_be_p(&r->buf[4]); 326 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL; 327 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 && 328 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 329 s->blocksize = ldl_be_p(&r->buf[8]); 330 s->max_lba = ldq_be_p(&r->buf[0]); 331 } 332 333 /* 334 * Patch MODE SENSE device specific parameters if the BDS is opened 335 * readonly. 336 */ 337 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) && 338 !blk_is_writable(s->conf.blk) && 339 (r->req.cmd.buf[0] == MODE_SENSE || 340 r->req.cmd.buf[0] == MODE_SENSE_10) && 341 (r->req.cmd.buf[1] & 0x8) == 0) { 342 if (r->req.cmd.buf[0] == MODE_SENSE) { 343 r->buf[2] |= 0x80; 344 } else { 345 r->buf[3] |= 0x80; 346 } 347 } 348 if (r->req.cmd.buf[0] == INQUIRY) { 349 len = scsi_handle_inquiry_reply(r, s, len); 350 } 351 352 req_complete: 353 scsi_req_data(&r->req, len); 354 scsi_req_unref(&r->req); 355 356 done: 357 aio_context_release(blk_get_aio_context(s->conf.blk)); 358 } 359 360 /* Read more data from scsi device into buffer. */ 361 static void scsi_read_data(SCSIRequest *req) 362 { 363 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 364 SCSIDevice *s = r->req.dev; 365 int ret; 366 367 trace_scsi_generic_read_data(req->tag); 368 369 /* The request is used as the AIO opaque value, so add a ref. */ 370 scsi_req_ref(&r->req); 371 if (r->len == -1) { 372 scsi_command_complete_noio(r, 0); 373 return; 374 } 375 376 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV, 377 scsi_read_complete); 378 if (ret < 0) { 379 scsi_command_complete_noio(r, ret); 380 } 381 } 382 383 static void scsi_write_complete(void * opaque, int ret) 384 { 385 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 386 SCSIDevice *s = r->req.dev; 387 388 trace_scsi_generic_write_complete(ret); 389 390 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 391 392 assert(r->req.aiocb != NULL); 393 r->req.aiocb = NULL; 394 395 if (ret || r->req.io_canceled) { 396 scsi_command_complete_noio(r, ret); 397 goto done; 398 } 399 400 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 && 401 s->type == TYPE_TAPE) { 402 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11]; 403 trace_scsi_generic_write_complete_blocksize(s->blocksize); 404 } 405 406 scsi_command_complete_noio(r, ret); 407 408 done: 409 aio_context_release(blk_get_aio_context(s->conf.blk)); 410 } 411 412 /* Write data to a scsi device. Returns nonzero on failure. 413 The transfer may complete asynchronously. */ 414 static void scsi_write_data(SCSIRequest *req) 415 { 416 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 417 SCSIDevice *s = r->req.dev; 418 int ret; 419 420 trace_scsi_generic_write_data(req->tag); 421 if (r->len == 0) { 422 r->len = r->buflen; 423 scsi_req_data(&r->req, r->len); 424 return; 425 } 426 427 /* The request is used as the AIO opaque value, so add a ref. */ 428 scsi_req_ref(&r->req); 429 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete); 430 if (ret < 0) { 431 scsi_command_complete_noio(r, ret); 432 } 433 } 434 435 /* Return a pointer to the data buffer. */ 436 static uint8_t *scsi_get_buf(SCSIRequest *req) 437 { 438 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 439 440 return r->buf; 441 } 442 443 static void scsi_generic_command_dump(uint8_t *cmd, int len) 444 { 445 int i; 446 char *line_buffer, *p; 447 448 line_buffer = g_malloc(len * 5 + 1); 449 450 for (i = 0, p = line_buffer; i < len; i++) { 451 p += sprintf(p, " 0x%02x", cmd[i]); 452 } 453 trace_scsi_generic_send_command(line_buffer); 454 455 g_free(line_buffer); 456 } 457 458 /* Execute a scsi command. Returns the length of the data expected by the 459 command. This will be Positive for data transfers from the device 460 (eg. disk reads), negative for transfers to the device (eg. disk writes), 461 and zero if the command does not transfer any data. */ 462 463 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd) 464 { 465 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 466 SCSIDevice *s = r->req.dev; 467 int ret; 468 469 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) { 470 scsi_generic_command_dump(cmd, r->req.cmd.len); 471 } 472 473 if (r->req.cmd.xfer == 0) { 474 g_free(r->buf); 475 r->buflen = 0; 476 r->buf = NULL; 477 /* The request is used as the AIO opaque value, so add a ref. */ 478 scsi_req_ref(&r->req); 479 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE, 480 scsi_command_complete); 481 if (ret < 0) { 482 scsi_command_complete_noio(r, ret); 483 return 0; 484 } 485 return 0; 486 } 487 488 if (r->buflen != r->req.cmd.xfer) { 489 g_free(r->buf); 490 r->buf = g_malloc(r->req.cmd.xfer); 491 r->buflen = r->req.cmd.xfer; 492 } 493 494 memset(r->buf, 0, r->buflen); 495 r->len = r->req.cmd.xfer; 496 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 497 r->len = 0; 498 return -r->req.cmd.xfer; 499 } else { 500 return r->req.cmd.xfer; 501 } 502 } 503 504 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn) 505 { 506 int i; 507 508 if ((p[1] & 0xF) == 3) { 509 /* NAA designator type */ 510 if (p[3] != 8) { 511 return -EINVAL; 512 } 513 *p_wwn = ldq_be_p(p + 4); 514 return 0; 515 } 516 517 if ((p[1] & 0xF) == 8) { 518 /* SCSI name string designator type */ 519 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) { 520 return -EINVAL; 521 } 522 if (p[3] > 20 && p[24] != ',') { 523 return -EINVAL; 524 } 525 *p_wwn = 0; 526 for (i = 8; i < 24; i++) { 527 char c = qemu_toupper(p[i]); 528 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10); 529 *p_wwn = (*p_wwn << 4) | c; 530 } 531 return 0; 532 } 533 534 return -EINVAL; 535 } 536 537 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size, 538 uint8_t *buf, uint8_t buf_size, uint32_t timeout) 539 { 540 sg_io_hdr_t io_header; 541 uint8_t sensebuf[8]; 542 int ret; 543 544 memset(&io_header, 0, sizeof(io_header)); 545 io_header.interface_id = 'S'; 546 io_header.dxfer_direction = SG_DXFER_FROM_DEV; 547 io_header.dxfer_len = buf_size; 548 io_header.dxferp = buf; 549 io_header.cmdp = cmd; 550 io_header.cmd_len = cmd_size; 551 io_header.mx_sb_len = sizeof(sensebuf); 552 io_header.sbp = sensebuf; 553 io_header.timeout = timeout * 1000; 554 555 trace_scsi_generic_ioctl_sgio_command(cmd[0], io_header.timeout); 556 ret = blk_ioctl(blk, SG_IO, &io_header); 557 if (ret < 0 || io_header.status || 558 io_header.driver_status || io_header.host_status) { 559 trace_scsi_generic_ioctl_sgio_done(cmd[0], ret, io_header.status, 560 io_header.host_status); 561 return -1; 562 } 563 return 0; 564 } 565 566 /* 567 * Executes an INQUIRY request with EVPD set to retrieve the 568 * available VPD pages of the device. If the device does 569 * not support the Block Limits page (page 0xb0), set 570 * the needs_vpd_bl_emulation flag for future use. 571 */ 572 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s) 573 { 574 uint8_t cmd[6]; 575 uint8_t buf[250]; 576 uint8_t page_len; 577 int ret, i; 578 579 memset(cmd, 0, sizeof(cmd)); 580 memset(buf, 0, sizeof(buf)); 581 cmd[0] = INQUIRY; 582 cmd[1] = 1; 583 cmd[2] = 0x00; 584 cmd[4] = sizeof(buf); 585 586 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 587 buf, sizeof(buf), s->io_timeout); 588 if (ret < 0) { 589 /* 590 * Do not assume anything if we can't retrieve the 591 * INQUIRY response to assert the VPD Block Limits 592 * support. 593 */ 594 s->needs_vpd_bl_emulation = false; 595 return; 596 } 597 598 page_len = buf[3]; 599 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) { 600 if (buf[i] == 0xb0) { 601 s->needs_vpd_bl_emulation = false; 602 return; 603 } 604 } 605 s->needs_vpd_bl_emulation = true; 606 } 607 608 static void scsi_generic_read_device_identification(SCSIDevice *s) 609 { 610 uint8_t cmd[6]; 611 uint8_t buf[250]; 612 int ret; 613 int i, len; 614 615 memset(cmd, 0, sizeof(cmd)); 616 memset(buf, 0, sizeof(buf)); 617 cmd[0] = INQUIRY; 618 cmd[1] = 1; 619 cmd[2] = 0x83; 620 cmd[4] = sizeof(buf); 621 622 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 623 buf, sizeof(buf), s->io_timeout); 624 if (ret < 0) { 625 return; 626 } 627 628 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4); 629 for (i = 0; i + 3 <= len; ) { 630 const uint8_t *p = &buf[i + 4]; 631 uint64_t wwn; 632 633 if (i + (p[3] + 4) > len) { 634 break; 635 } 636 637 if ((p[1] & 0x10) == 0) { 638 /* Associated with the logical unit */ 639 if (read_naa_id(p, &wwn) == 0) { 640 s->wwn = wwn; 641 } 642 } else if ((p[1] & 0x10) == 0x10) { 643 /* Associated with the target port */ 644 if (read_naa_id(p, &wwn) == 0) { 645 s->port_wwn = wwn; 646 } 647 } 648 649 i += p[3] + 4; 650 } 651 } 652 653 void scsi_generic_read_device_inquiry(SCSIDevice *s) 654 { 655 scsi_generic_read_device_identification(s); 656 if (s->type == TYPE_DISK || s->type == TYPE_ZBC) { 657 scsi_generic_set_vpd_bl_emulation(s); 658 } else { 659 s->needs_vpd_bl_emulation = false; 660 } 661 } 662 663 static int get_stream_blocksize(BlockBackend *blk) 664 { 665 uint8_t cmd[6]; 666 uint8_t buf[12]; 667 int ret; 668 669 memset(cmd, 0, sizeof(cmd)); 670 memset(buf, 0, sizeof(buf)); 671 cmd[0] = MODE_SENSE; 672 cmd[4] = sizeof(buf); 673 674 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf), 6); 675 if (ret < 0) { 676 return -1; 677 } 678 679 return (buf[9] << 16) | (buf[10] << 8) | buf[11]; 680 } 681 682 static void scsi_generic_reset(DeviceState *dev) 683 { 684 SCSIDevice *s = SCSI_DEVICE(dev); 685 686 s->scsi_version = s->default_scsi_version; 687 scsi_device_purge_requests(s, SENSE_CODE(RESET)); 688 } 689 690 static void scsi_generic_realize(SCSIDevice *s, Error **errp) 691 { 692 int rc; 693 int sg_version; 694 struct sg_scsi_id scsiid; 695 696 if (!s->conf.blk) { 697 error_setg(errp, "drive property not set"); 698 return; 699 } 700 701 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC && 702 blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) { 703 error_setg(errp, "Device doesn't support drive option werror"); 704 return; 705 } 706 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) { 707 error_setg(errp, "Device doesn't support drive option rerror"); 708 return; 709 } 710 711 /* check we are using a driver managing SG_IO (version 3 and after */ 712 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version); 713 if (rc < 0) { 714 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 715 if (rc != -EPERM) { 716 error_append_hint(errp, "Is this a SCSI device?\n"); 717 } 718 return; 719 } 720 if (sg_version < 30000) { 721 error_setg(errp, "scsi generic interface too old"); 722 return; 723 } 724 725 /* get LUN of the /dev/sg? */ 726 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) { 727 error_setg(errp, "SG_GET_SCSI_ID ioctl failed"); 728 return; 729 } 730 if (!blkconf_apply_backend_options(&s->conf, 731 !blk_supports_write_perm(s->conf.blk), 732 true, errp)) { 733 return; 734 } 735 736 /* define device state */ 737 s->type = scsiid.scsi_type; 738 trace_scsi_generic_realize_type(s->type); 739 740 switch (s->type) { 741 case TYPE_TAPE: 742 s->blocksize = get_stream_blocksize(s->conf.blk); 743 if (s->blocksize == -1) { 744 s->blocksize = 0; 745 } 746 break; 747 748 /* Make a guess for block devices, we'll fix it when the guest sends. 749 * READ CAPACITY. If they don't, they likely would assume these sizes 750 * anyway. (TODO: they could also send MODE SENSE). 751 */ 752 case TYPE_ROM: 753 case TYPE_WORM: 754 s->blocksize = 2048; 755 break; 756 default: 757 s->blocksize = 512; 758 break; 759 } 760 761 trace_scsi_generic_realize_blocksize(s->blocksize); 762 763 /* Only used by scsi-block, but initialize it nevertheless to be clean. */ 764 s->default_scsi_version = -1; 765 s->io_timeout = DEFAULT_IO_TIMEOUT; 766 scsi_generic_read_device_inquiry(s); 767 } 768 769 const SCSIReqOps scsi_generic_req_ops = { 770 .size = sizeof(SCSIGenericReq), 771 .free_req = scsi_free_request, 772 .send_command = scsi_send_command, 773 .read_data = scsi_read_data, 774 .write_data = scsi_write_data, 775 .get_buf = scsi_get_buf, 776 .load_request = scsi_generic_load_request, 777 .save_request = scsi_generic_save_request, 778 }; 779 780 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 781 uint8_t *buf, void *hba_private) 782 { 783 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private); 784 } 785 786 static Property scsi_generic_properties[] = { 787 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk), 788 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false), 789 DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout, 790 DEFAULT_IO_TIMEOUT), 791 DEFINE_PROP_END_OF_LIST(), 792 }; 793 794 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, 795 uint8_t *buf, size_t buf_len, 796 void *hba_private) 797 { 798 return scsi_bus_parse_cdb(dev, cmd, buf, buf_len, hba_private); 799 } 800 801 static void scsi_generic_class_initfn(ObjectClass *klass, void *data) 802 { 803 DeviceClass *dc = DEVICE_CLASS(klass); 804 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 805 806 sc->realize = scsi_generic_realize; 807 sc->alloc_req = scsi_new_request; 808 sc->parse_cdb = scsi_generic_parse_cdb; 809 dc->fw_name = "disk"; 810 dc->desc = "pass through generic scsi device (/dev/sg*)"; 811 dc->reset = scsi_generic_reset; 812 device_class_set_props(dc, scsi_generic_properties); 813 dc->vmsd = &vmstate_scsi_device; 814 } 815 816 static const TypeInfo scsi_generic_info = { 817 .name = "scsi-generic", 818 .parent = TYPE_SCSI_DEVICE, 819 .instance_size = sizeof(SCSIDevice), 820 .class_init = scsi_generic_class_initfn, 821 }; 822 823 static void scsi_generic_register_types(void) 824 { 825 type_register_static(&scsi_generic_info); 826 } 827 828 type_init(scsi_generic_register_types) 829 830 #endif /* __linux__ */ 831