1 /* 2 * Generic SCSI Device support 3 * 4 * Copyright (c) 2007 Bull S.A.S. 5 * Based on code by Paul Brook 6 * Based on code by Fabrice Bellard 7 * 8 * Written by Laurent Vivier <Laurent.Vivier@bull.net> 9 * 10 * This code is licensed under the LGPL. 11 * 12 */ 13 14 #include "qemu/osdep.h" 15 #include "qapi/error.h" 16 #include "qemu/ctype.h" 17 #include "qemu/error-report.h" 18 #include "qemu/module.h" 19 #include "hw/scsi/scsi.h" 20 #include "migration/qemu-file-types.h" 21 #include "hw/qdev-properties.h" 22 #include "hw/qdev-properties-system.h" 23 #include "hw/scsi/emulation.h" 24 #include "sysemu/block-backend.h" 25 #include "trace.h" 26 27 #ifdef __linux__ 28 29 #include <scsi/sg.h> 30 #include "scsi/constants.h" 31 32 #ifndef MAX_UINT 33 #define MAX_UINT ((unsigned int)-1) 34 #endif 35 36 typedef struct SCSIGenericReq { 37 SCSIRequest req; 38 uint8_t *buf; 39 int buflen; 40 int len; 41 sg_io_hdr_t io_header; 42 } SCSIGenericReq; 43 44 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req) 45 { 46 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 47 48 qemu_put_sbe32s(f, &r->buflen); 49 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 50 assert(!r->req.sg); 51 qemu_put_buffer(f, r->buf, r->req.cmd.xfer); 52 } 53 } 54 55 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req) 56 { 57 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 58 59 qemu_get_sbe32s(f, &r->buflen); 60 if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) { 61 assert(!r->req.sg); 62 qemu_get_buffer(f, r->buf, r->req.cmd.xfer); 63 } 64 } 65 66 static void scsi_free_request(SCSIRequest *req) 67 { 68 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 69 70 g_free(r->buf); 71 } 72 73 /* Helper function for command completion. */ 74 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret) 75 { 76 int status; 77 SCSISense sense; 78 sg_io_hdr_t *io_hdr = &r->io_header; 79 80 assert(r->req.aiocb == NULL); 81 82 if (r->req.io_canceled) { 83 scsi_req_cancel_complete(&r->req); 84 goto done; 85 } 86 if (ret < 0) { 87 status = scsi_sense_from_errno(-ret, &sense); 88 if (status == CHECK_CONDITION) { 89 scsi_req_build_sense(&r->req, sense); 90 } 91 } else if (io_hdr->host_status != SCSI_HOST_OK) { 92 scsi_req_complete_failed(&r->req, io_hdr->host_status); 93 goto done; 94 } else if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) { 95 status = BUSY; 96 } else { 97 status = io_hdr->status; 98 if (io_hdr->driver_status & SG_ERR_DRIVER_SENSE) { 99 r->req.sense_len = io_hdr->sb_len_wr; 100 } 101 } 102 trace_scsi_generic_command_complete_noio(r, r->req.tag, status); 103 104 scsi_req_complete(&r->req, status); 105 done: 106 scsi_req_unref(&r->req); 107 } 108 109 static void scsi_command_complete(void *opaque, int ret) 110 { 111 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 112 SCSIDevice *s = r->req.dev; 113 114 assert(r->req.aiocb != NULL); 115 r->req.aiocb = NULL; 116 117 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 118 scsi_command_complete_noio(r, ret); 119 aio_context_release(blk_get_aio_context(s->conf.blk)); 120 } 121 122 static int execute_command(BlockBackend *blk, 123 SCSIGenericReq *r, int direction, 124 BlockCompletionFunc *complete) 125 { 126 SCSIDevice *s = r->req.dev; 127 128 r->io_header.interface_id = 'S'; 129 r->io_header.dxfer_direction = direction; 130 r->io_header.dxferp = r->buf; 131 r->io_header.dxfer_len = r->buflen; 132 r->io_header.cmdp = r->req.cmd.buf; 133 r->io_header.cmd_len = r->req.cmd.len; 134 r->io_header.mx_sb_len = sizeof(r->req.sense); 135 r->io_header.sbp = r->req.sense; 136 r->io_header.timeout = s->io_timeout * 1000; 137 r->io_header.usr_ptr = r; 138 r->io_header.flags |= SG_FLAG_DIRECT_IO; 139 140 trace_scsi_generic_aio_sgio_command(r->req.tag, r->req.cmd.buf[0], 141 r->io_header.timeout); 142 r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r); 143 if (r->req.aiocb == NULL) { 144 return -EIO; 145 } 146 147 return 0; 148 } 149 150 static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len) 151 { 152 uint8_t page, page_idx; 153 154 /* 155 * EVPD set to zero returns the standard INQUIRY data. 156 * 157 * Check if scsi_version is unset (-1) to avoid re-defining it 158 * each time an INQUIRY with standard data is received. 159 * scsi_version is initialized with -1 in scsi_generic_reset 160 * and scsi_disk_reset, making sure that we'll set the 161 * scsi_version after a reset. If the version field of the 162 * INQUIRY response somehow changes after a guest reboot, 163 * we'll be able to keep track of it. 164 * 165 * On SCSI-2 and older, first 3 bits of byte 2 is the 166 * ANSI-approved version, while on later versions the 167 * whole byte 2 contains the version. Check if we're dealing 168 * with a newer version and, in that case, assign the 169 * whole byte. 170 */ 171 if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) { 172 s->scsi_version = r->buf[2] & 0x07; 173 if (s->scsi_version > 2) { 174 s->scsi_version = r->buf[2]; 175 } 176 } 177 178 if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) && 179 (r->req.cmd.buf[1] & 0x01)) { 180 page = r->req.cmd.buf[2]; 181 if (page == 0xb0) { 182 uint64_t max_transfer = blk_get_max_hw_transfer(s->conf.blk); 183 uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk); 184 185 assert(max_transfer); 186 max_transfer = MIN_NON_ZERO(max_transfer, max_iov * qemu_real_host_page_size()) 187 / s->blocksize; 188 stl_be_p(&r->buf[8], max_transfer); 189 /* Also take care of the opt xfer len. */ 190 stl_be_p(&r->buf[12], 191 MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12]))); 192 } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) { 193 /* 194 * Now we're capable of supplying the VPD Block Limits 195 * response if the hardware can't. Add it in the INQUIRY 196 * Supported VPD pages response in case we are using the 197 * emulation for this device. 198 * 199 * This way, the guest kernel will be aware of the support 200 * and will use it to proper setup the SCSI device. 201 * 202 * VPD page numbers must be sorted, so insert 0xb0 at the 203 * right place with an in-place insert. When the while loop 204 * begins the device response is at r[0] to r[page_idx - 1]. 205 */ 206 page_idx = lduw_be_p(r->buf + 2) + 4; 207 page_idx = MIN(page_idx, r->buflen); 208 while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) { 209 if (page_idx < r->buflen) { 210 r->buf[page_idx] = r->buf[page_idx - 1]; 211 } 212 page_idx--; 213 } 214 if (page_idx < r->buflen) { 215 r->buf[page_idx] = 0xb0; 216 } 217 stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1); 218 219 if (len < r->buflen) { 220 len++; 221 } 222 } 223 } 224 return len; 225 } 226 227 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s) 228 { 229 int len; 230 uint8_t buf[64]; 231 232 SCSIBlockLimits bl = { 233 .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize 234 }; 235 236 memset(r->buf, 0, r->buflen); 237 stb_p(buf, s->type); 238 stb_p(buf + 1, 0xb0); 239 len = scsi_emulate_block_limits(buf + 4, &bl); 240 assert(len <= sizeof(buf) - 4); 241 stw_be_p(buf + 2, len); 242 243 memcpy(r->buf, buf, MIN(r->buflen, len + 4)); 244 245 r->io_header.sb_len_wr = 0; 246 247 /* 248 * We have valid contents in the reply buffer but the 249 * io_header can report a sense error coming from 250 * the hardware in scsi_command_complete_noio. Clean 251 * up the io_header to avoid reporting it. 252 */ 253 r->io_header.driver_status = 0; 254 r->io_header.status = 0; 255 256 return r->buflen; 257 } 258 259 static void scsi_read_complete(void * opaque, int ret) 260 { 261 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 262 SCSIDevice *s = r->req.dev; 263 int len; 264 265 assert(r->req.aiocb != NULL); 266 r->req.aiocb = NULL; 267 268 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 269 270 if (ret || r->req.io_canceled) { 271 scsi_command_complete_noio(r, ret); 272 goto done; 273 } 274 275 len = r->io_header.dxfer_len - r->io_header.resid; 276 trace_scsi_generic_read_complete(r->req.tag, len); 277 278 r->len = -1; 279 280 if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) { 281 SCSISense sense = 282 scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr); 283 284 /* 285 * Check if this is a VPD Block Limits request that 286 * resulted in sense error but would need emulation. 287 * In this case, emulate a valid VPD response. 288 */ 289 if (sense.key == ILLEGAL_REQUEST && 290 s->needs_vpd_bl_emulation && 291 r->req.cmd.buf[0] == INQUIRY && 292 (r->req.cmd.buf[1] & 0x01) && 293 r->req.cmd.buf[2] == 0xb0) { 294 len = scsi_generic_emulate_block_limits(r, s); 295 /* 296 * It's okay to jup to req_complete: no need to 297 * let scsi_handle_inquiry_reply handle an 298 * INQUIRY VPD BL request we created manually. 299 */ 300 } 301 if (sense.key) { 302 goto req_complete; 303 } 304 } 305 306 if (r->io_header.host_status != SCSI_HOST_OK || 307 (r->io_header.driver_status & SG_ERR_DRIVER_TIMEOUT) || 308 r->io_header.status != GOOD || 309 len == 0) { 310 scsi_command_complete_noio(r, 0); 311 goto done; 312 } 313 314 /* Snoop READ CAPACITY output to set the blocksize. */ 315 if (r->req.cmd.buf[0] == READ_CAPACITY_10 && 316 (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) { 317 s->blocksize = ldl_be_p(&r->buf[4]); 318 s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL; 319 } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 && 320 (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { 321 s->blocksize = ldl_be_p(&r->buf[8]); 322 s->max_lba = ldq_be_p(&r->buf[0]); 323 } 324 325 /* 326 * Patch MODE SENSE device specific parameters if the BDS is opened 327 * readonly. 328 */ 329 if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) && 330 !blk_is_writable(s->conf.blk) && 331 (r->req.cmd.buf[0] == MODE_SENSE || 332 r->req.cmd.buf[0] == MODE_SENSE_10) && 333 (r->req.cmd.buf[1] & 0x8) == 0) { 334 if (r->req.cmd.buf[0] == MODE_SENSE) { 335 r->buf[2] |= 0x80; 336 } else { 337 r->buf[3] |= 0x80; 338 } 339 } 340 if (r->req.cmd.buf[0] == INQUIRY) { 341 len = scsi_handle_inquiry_reply(r, s, len); 342 } 343 344 req_complete: 345 scsi_req_data(&r->req, len); 346 scsi_req_unref(&r->req); 347 348 done: 349 aio_context_release(blk_get_aio_context(s->conf.blk)); 350 } 351 352 /* Read more data from scsi device into buffer. */ 353 static void scsi_read_data(SCSIRequest *req) 354 { 355 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 356 SCSIDevice *s = r->req.dev; 357 int ret; 358 359 trace_scsi_generic_read_data(req->tag); 360 361 /* The request is used as the AIO opaque value, so add a ref. */ 362 scsi_req_ref(&r->req); 363 if (r->len == -1) { 364 scsi_command_complete_noio(r, 0); 365 return; 366 } 367 368 ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV, 369 scsi_read_complete); 370 if (ret < 0) { 371 scsi_command_complete_noio(r, ret); 372 } 373 } 374 375 static void scsi_write_complete(void * opaque, int ret) 376 { 377 SCSIGenericReq *r = (SCSIGenericReq *)opaque; 378 SCSIDevice *s = r->req.dev; 379 380 trace_scsi_generic_write_complete(ret); 381 382 assert(r->req.aiocb != NULL); 383 r->req.aiocb = NULL; 384 385 aio_context_acquire(blk_get_aio_context(s->conf.blk)); 386 387 if (ret || r->req.io_canceled) { 388 scsi_command_complete_noio(r, ret); 389 goto done; 390 } 391 392 if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 && 393 s->type == TYPE_TAPE) { 394 s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11]; 395 trace_scsi_generic_write_complete_blocksize(s->blocksize); 396 } 397 398 scsi_command_complete_noio(r, ret); 399 400 done: 401 aio_context_release(blk_get_aio_context(s->conf.blk)); 402 } 403 404 /* Write data to a scsi device. Returns nonzero on failure. 405 The transfer may complete asynchronously. */ 406 static void scsi_write_data(SCSIRequest *req) 407 { 408 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 409 SCSIDevice *s = r->req.dev; 410 int ret; 411 412 trace_scsi_generic_write_data(req->tag); 413 if (r->len == 0) { 414 r->len = r->buflen; 415 scsi_req_data(&r->req, r->len); 416 return; 417 } 418 419 /* The request is used as the AIO opaque value, so add a ref. */ 420 scsi_req_ref(&r->req); 421 ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete); 422 if (ret < 0) { 423 scsi_command_complete_noio(r, ret); 424 } 425 } 426 427 /* Return a pointer to the data buffer. */ 428 static uint8_t *scsi_get_buf(SCSIRequest *req) 429 { 430 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 431 432 return r->buf; 433 } 434 435 static void scsi_generic_command_dump(uint8_t *cmd, int len) 436 { 437 int i; 438 char *line_buffer, *p; 439 440 line_buffer = g_malloc(len * 5 + 1); 441 442 for (i = 0, p = line_buffer; i < len; i++) { 443 p += sprintf(p, " 0x%02x", cmd[i]); 444 } 445 trace_scsi_generic_send_command(line_buffer); 446 447 g_free(line_buffer); 448 } 449 450 /* Execute a scsi command. Returns the length of the data expected by the 451 command. This will be Positive for data transfers from the device 452 (eg. disk reads), negative for transfers to the device (eg. disk writes), 453 and zero if the command does not transfer any data. */ 454 455 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd) 456 { 457 SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req); 458 SCSIDevice *s = r->req.dev; 459 int ret; 460 461 if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) { 462 scsi_generic_command_dump(cmd, r->req.cmd.len); 463 } 464 465 if (r->req.cmd.xfer == 0) { 466 g_free(r->buf); 467 r->buflen = 0; 468 r->buf = NULL; 469 /* The request is used as the AIO opaque value, so add a ref. */ 470 scsi_req_ref(&r->req); 471 ret = execute_command(s->conf.blk, r, SG_DXFER_NONE, 472 scsi_command_complete); 473 if (ret < 0) { 474 scsi_command_complete_noio(r, ret); 475 return 0; 476 } 477 return 0; 478 } 479 480 if (r->buflen != r->req.cmd.xfer) { 481 g_free(r->buf); 482 r->buf = g_malloc(r->req.cmd.xfer); 483 r->buflen = r->req.cmd.xfer; 484 } 485 486 memset(r->buf, 0, r->buflen); 487 r->len = r->req.cmd.xfer; 488 if (r->req.cmd.mode == SCSI_XFER_TO_DEV) { 489 r->len = 0; 490 return -r->req.cmd.xfer; 491 } else { 492 return r->req.cmd.xfer; 493 } 494 } 495 496 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn) 497 { 498 int i; 499 500 if ((p[1] & 0xF) == 3) { 501 /* NAA designator type */ 502 if (p[3] != 8) { 503 return -EINVAL; 504 } 505 *p_wwn = ldq_be_p(p + 4); 506 return 0; 507 } 508 509 if ((p[1] & 0xF) == 8) { 510 /* SCSI name string designator type */ 511 if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) { 512 return -EINVAL; 513 } 514 if (p[3] > 20 && p[24] != ',') { 515 return -EINVAL; 516 } 517 *p_wwn = 0; 518 for (i = 8; i < 24; i++) { 519 char c = qemu_toupper(p[i]); 520 c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10); 521 *p_wwn = (*p_wwn << 4) | c; 522 } 523 return 0; 524 } 525 526 return -EINVAL; 527 } 528 529 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size, 530 uint8_t *buf, uint8_t buf_size, uint32_t timeout) 531 { 532 sg_io_hdr_t io_header; 533 uint8_t sensebuf[8]; 534 int ret; 535 536 memset(&io_header, 0, sizeof(io_header)); 537 io_header.interface_id = 'S'; 538 io_header.dxfer_direction = SG_DXFER_FROM_DEV; 539 io_header.dxfer_len = buf_size; 540 io_header.dxferp = buf; 541 io_header.cmdp = cmd; 542 io_header.cmd_len = cmd_size; 543 io_header.mx_sb_len = sizeof(sensebuf); 544 io_header.sbp = sensebuf; 545 io_header.timeout = timeout * 1000; 546 547 trace_scsi_generic_ioctl_sgio_command(cmd[0], io_header.timeout); 548 ret = blk_ioctl(blk, SG_IO, &io_header); 549 if (ret < 0 || io_header.status || 550 io_header.driver_status || io_header.host_status) { 551 trace_scsi_generic_ioctl_sgio_done(cmd[0], ret, io_header.status, 552 io_header.host_status); 553 return -1; 554 } 555 return 0; 556 } 557 558 /* 559 * Executes an INQUIRY request with EVPD set to retrieve the 560 * available VPD pages of the device. If the device does 561 * not support the Block Limits page (page 0xb0), set 562 * the needs_vpd_bl_emulation flag for future use. 563 */ 564 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s) 565 { 566 uint8_t cmd[6]; 567 uint8_t buf[250]; 568 uint8_t page_len; 569 int ret, i; 570 571 memset(cmd, 0, sizeof(cmd)); 572 memset(buf, 0, sizeof(buf)); 573 cmd[0] = INQUIRY; 574 cmd[1] = 1; 575 cmd[2] = 0x00; 576 cmd[4] = sizeof(buf); 577 578 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 579 buf, sizeof(buf), s->io_timeout); 580 if (ret < 0) { 581 /* 582 * Do not assume anything if we can't retrieve the 583 * INQUIRY response to assert the VPD Block Limits 584 * support. 585 */ 586 s->needs_vpd_bl_emulation = false; 587 return; 588 } 589 590 page_len = buf[3]; 591 for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) { 592 if (buf[i] == 0xb0) { 593 s->needs_vpd_bl_emulation = false; 594 return; 595 } 596 } 597 s->needs_vpd_bl_emulation = true; 598 } 599 600 static void scsi_generic_read_device_identification(SCSIDevice *s) 601 { 602 uint8_t cmd[6]; 603 uint8_t buf[250]; 604 int ret; 605 int i, len; 606 607 memset(cmd, 0, sizeof(cmd)); 608 memset(buf, 0, sizeof(buf)); 609 cmd[0] = INQUIRY; 610 cmd[1] = 1; 611 cmd[2] = 0x83; 612 cmd[4] = sizeof(buf); 613 614 ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd), 615 buf, sizeof(buf), s->io_timeout); 616 if (ret < 0) { 617 return; 618 } 619 620 len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4); 621 for (i = 0; i + 3 <= len; ) { 622 const uint8_t *p = &buf[i + 4]; 623 uint64_t wwn; 624 625 if (i + (p[3] + 4) > len) { 626 break; 627 } 628 629 if ((p[1] & 0x10) == 0) { 630 /* Associated with the logical unit */ 631 if (read_naa_id(p, &wwn) == 0) { 632 s->wwn = wwn; 633 } 634 } else if ((p[1] & 0x10) == 0x10) { 635 /* Associated with the target port */ 636 if (read_naa_id(p, &wwn) == 0) { 637 s->port_wwn = wwn; 638 } 639 } 640 641 i += p[3] + 4; 642 } 643 } 644 645 void scsi_generic_read_device_inquiry(SCSIDevice *s) 646 { 647 scsi_generic_read_device_identification(s); 648 if (s->type == TYPE_DISK || s->type == TYPE_ZBC) { 649 scsi_generic_set_vpd_bl_emulation(s); 650 } else { 651 s->needs_vpd_bl_emulation = false; 652 } 653 } 654 655 static int get_stream_blocksize(BlockBackend *blk) 656 { 657 uint8_t cmd[6]; 658 uint8_t buf[12]; 659 int ret; 660 661 memset(cmd, 0, sizeof(cmd)); 662 memset(buf, 0, sizeof(buf)); 663 cmd[0] = MODE_SENSE; 664 cmd[4] = sizeof(buf); 665 666 ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf), 6); 667 if (ret < 0) { 668 return -1; 669 } 670 671 return (buf[9] << 16) | (buf[10] << 8) | buf[11]; 672 } 673 674 static void scsi_generic_reset(DeviceState *dev) 675 { 676 SCSIDevice *s = SCSI_DEVICE(dev); 677 678 s->scsi_version = s->default_scsi_version; 679 scsi_device_purge_requests(s, SENSE_CODE(RESET)); 680 } 681 682 static void scsi_generic_realize(SCSIDevice *s, Error **errp) 683 { 684 int rc; 685 int sg_version; 686 struct sg_scsi_id scsiid; 687 688 if (!s->conf.blk) { 689 error_setg(errp, "drive property not set"); 690 return; 691 } 692 693 if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC && 694 blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_REPORT) { 695 error_setg(errp, "Device doesn't support drive option werror"); 696 return; 697 } 698 if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) { 699 error_setg(errp, "Device doesn't support drive option rerror"); 700 return; 701 } 702 703 /* check we are using a driver managing SG_IO (version 3 and after */ 704 rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version); 705 if (rc < 0) { 706 error_setg_errno(errp, -rc, "cannot get SG_IO version number"); 707 if (rc != -EPERM) { 708 error_append_hint(errp, "Is this a SCSI device?\n"); 709 } 710 return; 711 } 712 if (sg_version < 30000) { 713 error_setg(errp, "scsi generic interface too old"); 714 return; 715 } 716 717 /* get LUN of the /dev/sg? */ 718 if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) { 719 error_setg(errp, "SG_GET_SCSI_ID ioctl failed"); 720 return; 721 } 722 if (!blkconf_apply_backend_options(&s->conf, 723 !blk_supports_write_perm(s->conf.blk), 724 true, errp)) { 725 return; 726 } 727 728 /* define device state */ 729 s->type = scsiid.scsi_type; 730 trace_scsi_generic_realize_type(s->type); 731 732 switch (s->type) { 733 case TYPE_TAPE: 734 s->blocksize = get_stream_blocksize(s->conf.blk); 735 if (s->blocksize == -1) { 736 s->blocksize = 0; 737 } 738 break; 739 740 /* Make a guess for block devices, we'll fix it when the guest sends. 741 * READ CAPACITY. If they don't, they likely would assume these sizes 742 * anyway. (TODO: they could also send MODE SENSE). 743 */ 744 case TYPE_ROM: 745 case TYPE_WORM: 746 s->blocksize = 2048; 747 break; 748 default: 749 s->blocksize = 512; 750 break; 751 } 752 753 trace_scsi_generic_realize_blocksize(s->blocksize); 754 755 /* Only used by scsi-block, but initialize it nevertheless to be clean. */ 756 s->default_scsi_version = -1; 757 s->io_timeout = DEFAULT_IO_TIMEOUT; 758 scsi_generic_read_device_inquiry(s); 759 } 760 761 const SCSIReqOps scsi_generic_req_ops = { 762 .size = sizeof(SCSIGenericReq), 763 .free_req = scsi_free_request, 764 .send_command = scsi_send_command, 765 .read_data = scsi_read_data, 766 .write_data = scsi_write_data, 767 .get_buf = scsi_get_buf, 768 .load_request = scsi_generic_load_request, 769 .save_request = scsi_generic_save_request, 770 }; 771 772 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun, 773 uint8_t *buf, void *hba_private) 774 { 775 return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private); 776 } 777 778 static Property scsi_generic_properties[] = { 779 DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk), 780 DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false), 781 DEFINE_PROP_UINT32("io_timeout", SCSIDevice, io_timeout, 782 DEFAULT_IO_TIMEOUT), 783 DEFINE_PROP_END_OF_LIST(), 784 }; 785 786 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd, 787 uint8_t *buf, void *hba_private) 788 { 789 return scsi_bus_parse_cdb(dev, cmd, buf, hba_private); 790 } 791 792 static void scsi_generic_class_initfn(ObjectClass *klass, void *data) 793 { 794 DeviceClass *dc = DEVICE_CLASS(klass); 795 SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass); 796 797 sc->realize = scsi_generic_realize; 798 sc->alloc_req = scsi_new_request; 799 sc->parse_cdb = scsi_generic_parse_cdb; 800 dc->fw_name = "disk"; 801 dc->desc = "pass through generic scsi device (/dev/sg*)"; 802 dc->reset = scsi_generic_reset; 803 device_class_set_props(dc, scsi_generic_properties); 804 dc->vmsd = &vmstate_scsi_device; 805 } 806 807 static const TypeInfo scsi_generic_info = { 808 .name = "scsi-generic", 809 .parent = TYPE_SCSI_DEVICE, 810 .instance_size = sizeof(SCSIDevice), 811 .class_init = scsi_generic_class_initfn, 812 }; 813 814 static void scsi_generic_register_types(void) 815 { 816 type_register_static(&scsi_generic_info); 817 } 818 819 type_init(scsi_generic_register_types) 820 821 #endif /* __linux__ */ 822