xref: /openbmc/qemu/hw/scsi/scsi-generic.c (revision 87b804ec)
1 /*
2  * Generic SCSI Device support
3  *
4  * Copyright (c) 2007 Bull S.A.S.
5  * Based on code by Paul Brook
6  * Based on code by Fabrice Bellard
7  *
8  * Written by Laurent Vivier <Laurent.Vivier@bull.net>
9  *
10  * This code is licensed under the LGPL.
11  *
12  */
13 
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
16 #include "qemu/ctype.h"
17 #include "qemu/error-report.h"
18 #include "qemu/module.h"
19 #include "hw/scsi/scsi.h"
20 #include "migration/qemu-file-types.h"
21 #include "hw/qdev-properties.h"
22 #include "hw/scsi/emulation.h"
23 #include "sysemu/block-backend.h"
24 #include "trace.h"
25 
26 #ifdef __linux__
27 
28 #include <scsi/sg.h>
29 #include "scsi/constants.h"
30 
31 #ifndef MAX_UINT
32 #define MAX_UINT ((unsigned int)-1)
33 #endif
34 
35 typedef struct SCSIGenericReq {
36     SCSIRequest req;
37     uint8_t *buf;
38     int buflen;
39     int len;
40     sg_io_hdr_t io_header;
41 } SCSIGenericReq;
42 
43 static void scsi_generic_save_request(QEMUFile *f, SCSIRequest *req)
44 {
45     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
46 
47     qemu_put_sbe32s(f, &r->buflen);
48     if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
49         assert(!r->req.sg);
50         qemu_put_buffer(f, r->buf, r->req.cmd.xfer);
51     }
52 }
53 
54 static void scsi_generic_load_request(QEMUFile *f, SCSIRequest *req)
55 {
56     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
57 
58     qemu_get_sbe32s(f, &r->buflen);
59     if (r->buflen && r->req.cmd.mode == SCSI_XFER_TO_DEV) {
60         assert(!r->req.sg);
61         qemu_get_buffer(f, r->buf, r->req.cmd.xfer);
62     }
63 }
64 
65 static void scsi_free_request(SCSIRequest *req)
66 {
67     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
68 
69     g_free(r->buf);
70 }
71 
72 /* Helper function for command completion.  */
73 static void scsi_command_complete_noio(SCSIGenericReq *r, int ret)
74 {
75     int status;
76     SCSISense sense;
77 
78     assert(r->req.aiocb == NULL);
79 
80     if (r->req.io_canceled) {
81         scsi_req_cancel_complete(&r->req);
82         goto done;
83     }
84     status = sg_io_sense_from_errno(-ret, &r->io_header, &sense);
85     if (status == CHECK_CONDITION) {
86         if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
87             r->req.sense_len = r->io_header.sb_len_wr;
88         } else {
89             scsi_req_build_sense(&r->req, sense);
90         }
91     }
92 
93     trace_scsi_generic_command_complete_noio(r, r->req.tag, status);
94 
95     scsi_req_complete(&r->req, status);
96 done:
97     scsi_req_unref(&r->req);
98 }
99 
100 static void scsi_command_complete(void *opaque, int ret)
101 {
102     SCSIGenericReq *r = (SCSIGenericReq *)opaque;
103     SCSIDevice *s = r->req.dev;
104 
105     assert(r->req.aiocb != NULL);
106     r->req.aiocb = NULL;
107 
108     aio_context_acquire(blk_get_aio_context(s->conf.blk));
109     scsi_command_complete_noio(r, ret);
110     aio_context_release(blk_get_aio_context(s->conf.blk));
111 }
112 
113 static int execute_command(BlockBackend *blk,
114                            SCSIGenericReq *r, int direction,
115                            BlockCompletionFunc *complete)
116 {
117     r->io_header.interface_id = 'S';
118     r->io_header.dxfer_direction = direction;
119     r->io_header.dxferp = r->buf;
120     r->io_header.dxfer_len = r->buflen;
121     r->io_header.cmdp = r->req.cmd.buf;
122     r->io_header.cmd_len = r->req.cmd.len;
123     r->io_header.mx_sb_len = sizeof(r->req.sense);
124     r->io_header.sbp = r->req.sense;
125     r->io_header.timeout = MAX_UINT;
126     r->io_header.usr_ptr = r;
127     r->io_header.flags |= SG_FLAG_DIRECT_IO;
128 
129     r->req.aiocb = blk_aio_ioctl(blk, SG_IO, &r->io_header, complete, r);
130     if (r->req.aiocb == NULL) {
131         return -EIO;
132     }
133 
134     return 0;
135 }
136 
137 static void scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s)
138 {
139     uint8_t page, page_idx;
140 
141     /*
142      *  EVPD set to zero returns the standard INQUIRY data.
143      *
144      *  Check if scsi_version is unset (-1) to avoid re-defining it
145      *  each time an INQUIRY with standard data is received.
146      *  scsi_version is initialized with -1 in scsi_generic_reset
147      *  and scsi_disk_reset, making sure that we'll set the
148      *  scsi_version after a reset. If the version field of the
149      *  INQUIRY response somehow changes after a guest reboot,
150      *  we'll be able to keep track of it.
151      *
152      *  On SCSI-2 and older, first 3 bits of byte 2 is the
153      *  ANSI-approved version, while on later versions the
154      *  whole byte 2 contains the version. Check if we're dealing
155      *  with a newer version and, in that case, assign the
156      *  whole byte.
157      */
158     if (s->scsi_version == -1 && !(r->req.cmd.buf[1] & 0x01)) {
159         s->scsi_version = r->buf[2] & 0x07;
160         if (s->scsi_version > 2) {
161             s->scsi_version = r->buf[2];
162         }
163     }
164 
165     if ((s->type == TYPE_DISK || s->type == TYPE_ZBC) &&
166         (r->req.cmd.buf[1] & 0x01)) {
167         page = r->req.cmd.buf[2];
168         if (page == 0xb0) {
169             uint32_t max_transfer =
170                 blk_get_max_transfer(s->conf.blk) / s->blocksize;
171 
172             assert(max_transfer);
173             stl_be_p(&r->buf[8], max_transfer);
174             /* Also take care of the opt xfer len. */
175             stl_be_p(&r->buf[12],
176                     MIN_NON_ZERO(max_transfer, ldl_be_p(&r->buf[12])));
177         } else if (s->needs_vpd_bl_emulation && page == 0x00 && r->buflen >= 4) {
178             /*
179              * Now we're capable of supplying the VPD Block Limits
180              * response if the hardware can't. Add it in the INQUIRY
181              * Supported VPD pages response in case we are using the
182              * emulation for this device.
183              *
184              * This way, the guest kernel will be aware of the support
185              * and will use it to proper setup the SCSI device.
186              *
187              * VPD page numbers must be sorted, so insert 0xb0 at the
188              * right place with an in-place insert.  When the while loop
189              * begins the device response is at r[0] to r[page_idx - 1].
190              */
191             page_idx = lduw_be_p(r->buf + 2) + 4;
192             page_idx = MIN(page_idx, r->buflen);
193             while (page_idx > 4 && r->buf[page_idx - 1] >= 0xb0) {
194                 if (page_idx < r->buflen) {
195                     r->buf[page_idx] = r->buf[page_idx - 1];
196                 }
197                 page_idx--;
198             }
199             if (page_idx < r->buflen) {
200                 r->buf[page_idx] = 0xb0;
201             }
202             stw_be_p(r->buf + 2, lduw_be_p(r->buf + 2) + 1);
203         }
204     }
205 }
206 
207 static int scsi_generic_emulate_block_limits(SCSIGenericReq *r, SCSIDevice *s)
208 {
209     int len;
210     uint8_t buf[64];
211 
212     SCSIBlockLimits bl = {
213         .max_io_sectors = blk_get_max_transfer(s->conf.blk) / s->blocksize
214     };
215 
216     memset(r->buf, 0, r->buflen);
217     stb_p(buf, s->type);
218     stb_p(buf + 1, 0xb0);
219     len = scsi_emulate_block_limits(buf + 4, &bl);
220     assert(len <= sizeof(buf) - 4);
221     stw_be_p(buf + 2, len);
222 
223     memcpy(r->buf, buf, MIN(r->buflen, len + 4));
224 
225     r->io_header.sb_len_wr = 0;
226 
227     /*
228     * We have valid contents in the reply buffer but the
229     * io_header can report a sense error coming from
230     * the hardware in scsi_command_complete_noio. Clean
231     * up the io_header to avoid reporting it.
232     */
233     r->io_header.driver_status = 0;
234     r->io_header.status = 0;
235 
236     return r->buflen;
237 }
238 
239 static void scsi_read_complete(void * opaque, int ret)
240 {
241     SCSIGenericReq *r = (SCSIGenericReq *)opaque;
242     SCSIDevice *s = r->req.dev;
243     int len;
244 
245     assert(r->req.aiocb != NULL);
246     r->req.aiocb = NULL;
247 
248     aio_context_acquire(blk_get_aio_context(s->conf.blk));
249 
250     if (ret || r->req.io_canceled) {
251         scsi_command_complete_noio(r, ret);
252         goto done;
253     }
254 
255     len = r->io_header.dxfer_len - r->io_header.resid;
256     trace_scsi_generic_read_complete(r->req.tag, len);
257 
258     r->len = -1;
259 
260     if (r->io_header.driver_status & SG_ERR_DRIVER_SENSE) {
261         SCSISense sense =
262             scsi_parse_sense_buf(r->req.sense, r->io_header.sb_len_wr);
263 
264         /*
265          * Check if this is a VPD Block Limits request that
266          * resulted in sense error but would need emulation.
267          * In this case, emulate a valid VPD response.
268          */
269         if (sense.key == ILLEGAL_REQUEST &&
270             s->needs_vpd_bl_emulation &&
271             r->req.cmd.buf[0] == INQUIRY &&
272             (r->req.cmd.buf[1] & 0x01) &&
273             r->req.cmd.buf[2] == 0xb0) {
274             len = scsi_generic_emulate_block_limits(r, s);
275             /*
276              * It's okay to jup to req_complete: no need to
277              * let scsi_handle_inquiry_reply handle an
278              * INQUIRY VPD BL request we created manually.
279              */
280         }
281         if (sense.key) {
282             goto req_complete;
283         }
284     }
285 
286     if (len == 0) {
287         scsi_command_complete_noio(r, 0);
288         goto done;
289     }
290 
291     /* Snoop READ CAPACITY output to set the blocksize.  */
292     if (r->req.cmd.buf[0] == READ_CAPACITY_10 &&
293         (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) {
294         s->blocksize = ldl_be_p(&r->buf[4]);
295         s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL;
296     } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 &&
297                (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
298         s->blocksize = ldl_be_p(&r->buf[8]);
299         s->max_lba = ldq_be_p(&r->buf[0]);
300     }
301     blk_set_guest_block_size(s->conf.blk, s->blocksize);
302 
303     /*
304      * Patch MODE SENSE device specific parameters if the BDS is opened
305      * readonly.
306      */
307     if ((s->type == TYPE_DISK || s->type == TYPE_TAPE || s->type == TYPE_ZBC) &&
308         blk_is_read_only(s->conf.blk) &&
309         (r->req.cmd.buf[0] == MODE_SENSE ||
310          r->req.cmd.buf[0] == MODE_SENSE_10) &&
311         (r->req.cmd.buf[1] & 0x8) == 0) {
312         if (r->req.cmd.buf[0] == MODE_SENSE) {
313             r->buf[2] |= 0x80;
314         } else  {
315             r->buf[3] |= 0x80;
316         }
317     }
318     if (r->req.cmd.buf[0] == INQUIRY) {
319         scsi_handle_inquiry_reply(r, s);
320     }
321 
322 req_complete:
323     scsi_req_data(&r->req, len);
324     scsi_req_unref(&r->req);
325 
326 done:
327     aio_context_release(blk_get_aio_context(s->conf.blk));
328 }
329 
330 /* Read more data from scsi device into buffer.  */
331 static void scsi_read_data(SCSIRequest *req)
332 {
333     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
334     SCSIDevice *s = r->req.dev;
335     int ret;
336 
337     trace_scsi_generic_read_data(req->tag);
338 
339     /* The request is used as the AIO opaque value, so add a ref.  */
340     scsi_req_ref(&r->req);
341     if (r->len == -1) {
342         scsi_command_complete_noio(r, 0);
343         return;
344     }
345 
346     ret = execute_command(s->conf.blk, r, SG_DXFER_FROM_DEV,
347                           scsi_read_complete);
348     if (ret < 0) {
349         scsi_command_complete_noio(r, ret);
350     }
351 }
352 
353 static void scsi_write_complete(void * opaque, int ret)
354 {
355     SCSIGenericReq *r = (SCSIGenericReq *)opaque;
356     SCSIDevice *s = r->req.dev;
357 
358     trace_scsi_generic_write_complete(ret);
359 
360     assert(r->req.aiocb != NULL);
361     r->req.aiocb = NULL;
362 
363     aio_context_acquire(blk_get_aio_context(s->conf.blk));
364 
365     if (ret || r->req.io_canceled) {
366         scsi_command_complete_noio(r, ret);
367         goto done;
368     }
369 
370     if (r->req.cmd.buf[0] == MODE_SELECT && r->req.cmd.buf[4] == 12 &&
371         s->type == TYPE_TAPE) {
372         s->blocksize = (r->buf[9] << 16) | (r->buf[10] << 8) | r->buf[11];
373         trace_scsi_generic_write_complete_blocksize(s->blocksize);
374     }
375 
376     scsi_command_complete_noio(r, ret);
377 
378 done:
379     aio_context_release(blk_get_aio_context(s->conf.blk));
380 }
381 
382 /* Write data to a scsi device.  Returns nonzero on failure.
383    The transfer may complete asynchronously.  */
384 static void scsi_write_data(SCSIRequest *req)
385 {
386     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
387     SCSIDevice *s = r->req.dev;
388     int ret;
389 
390     trace_scsi_generic_write_data(req->tag);
391     if (r->len == 0) {
392         r->len = r->buflen;
393         scsi_req_data(&r->req, r->len);
394         return;
395     }
396 
397     /* The request is used as the AIO opaque value, so add a ref.  */
398     scsi_req_ref(&r->req);
399     ret = execute_command(s->conf.blk, r, SG_DXFER_TO_DEV, scsi_write_complete);
400     if (ret < 0) {
401         scsi_command_complete_noio(r, ret);
402     }
403 }
404 
405 /* Return a pointer to the data buffer.  */
406 static uint8_t *scsi_get_buf(SCSIRequest *req)
407 {
408     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
409 
410     return r->buf;
411 }
412 
413 static void scsi_generic_command_dump(uint8_t *cmd, int len)
414 {
415     int i;
416     char *line_buffer, *p;
417 
418     line_buffer = g_malloc(len * 5 + 1);
419 
420     for (i = 0, p = line_buffer; i < len; i++) {
421         p += sprintf(p, " 0x%02x", cmd[i]);
422     }
423     trace_scsi_generic_send_command(line_buffer);
424 
425     g_free(line_buffer);
426 }
427 
428 /* Execute a scsi command.  Returns the length of the data expected by the
429    command.  This will be Positive for data transfers from the device
430    (eg. disk reads), negative for transfers to the device (eg. disk writes),
431    and zero if the command does not transfer any data.  */
432 
433 static int32_t scsi_send_command(SCSIRequest *req, uint8_t *cmd)
434 {
435     SCSIGenericReq *r = DO_UPCAST(SCSIGenericReq, req, req);
436     SCSIDevice *s = r->req.dev;
437     int ret;
438 
439     if (trace_event_get_state_backends(TRACE_SCSI_GENERIC_SEND_COMMAND)) {
440         scsi_generic_command_dump(cmd, r->req.cmd.len);
441     }
442 
443     if (r->req.cmd.xfer == 0) {
444         g_free(r->buf);
445         r->buflen = 0;
446         r->buf = NULL;
447         /* The request is used as the AIO opaque value, so add a ref.  */
448         scsi_req_ref(&r->req);
449         ret = execute_command(s->conf.blk, r, SG_DXFER_NONE,
450                               scsi_command_complete);
451         if (ret < 0) {
452             scsi_command_complete_noio(r, ret);
453             return 0;
454         }
455         return 0;
456     }
457 
458     if (r->buflen != r->req.cmd.xfer) {
459         g_free(r->buf);
460         r->buf = g_malloc(r->req.cmd.xfer);
461         r->buflen = r->req.cmd.xfer;
462     }
463 
464     memset(r->buf, 0, r->buflen);
465     r->len = r->req.cmd.xfer;
466     if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
467         r->len = 0;
468         return -r->req.cmd.xfer;
469     } else {
470         return r->req.cmd.xfer;
471     }
472 }
473 
474 static int read_naa_id(const uint8_t *p, uint64_t *p_wwn)
475 {
476     int i;
477 
478     if ((p[1] & 0xF) == 3) {
479         /* NAA designator type */
480         if (p[3] != 8) {
481             return -EINVAL;
482         }
483         *p_wwn = ldq_be_p(p + 4);
484         return 0;
485     }
486 
487     if ((p[1] & 0xF) == 8) {
488         /* SCSI name string designator type */
489         if (p[3] < 20 || memcmp(&p[4], "naa.", 4)) {
490             return -EINVAL;
491         }
492         if (p[3] > 20 && p[24] != ',') {
493             return -EINVAL;
494         }
495         *p_wwn = 0;
496         for (i = 8; i < 24; i++) {
497             char c = qemu_toupper(p[i]);
498             c -= (c >= '0' && c <= '9' ? '0' : 'A' - 10);
499             *p_wwn = (*p_wwn << 4) | c;
500         }
501         return 0;
502     }
503 
504     return -EINVAL;
505 }
506 
507 int scsi_SG_IO_FROM_DEV(BlockBackend *blk, uint8_t *cmd, uint8_t cmd_size,
508                         uint8_t *buf, uint8_t buf_size)
509 {
510     sg_io_hdr_t io_header;
511     uint8_t sensebuf[8];
512     int ret;
513 
514     memset(&io_header, 0, sizeof(io_header));
515     io_header.interface_id = 'S';
516     io_header.dxfer_direction = SG_DXFER_FROM_DEV;
517     io_header.dxfer_len = buf_size;
518     io_header.dxferp = buf;
519     io_header.cmdp = cmd;
520     io_header.cmd_len = cmd_size;
521     io_header.mx_sb_len = sizeof(sensebuf);
522     io_header.sbp = sensebuf;
523     io_header.timeout = 6000; /* XXX */
524 
525     ret = blk_ioctl(blk, SG_IO, &io_header);
526     if (ret < 0 || io_header.driver_status || io_header.host_status) {
527         return -1;
528     }
529     return 0;
530 }
531 
532 /*
533  * Executes an INQUIRY request with EVPD set to retrieve the
534  * available VPD pages of the device. If the device does
535  * not support the Block Limits page (page 0xb0), set
536  * the needs_vpd_bl_emulation flag for future use.
537  */
538 static void scsi_generic_set_vpd_bl_emulation(SCSIDevice *s)
539 {
540     uint8_t cmd[6];
541     uint8_t buf[250];
542     uint8_t page_len;
543     int ret, i;
544 
545     memset(cmd, 0, sizeof(cmd));
546     memset(buf, 0, sizeof(buf));
547     cmd[0] = INQUIRY;
548     cmd[1] = 1;
549     cmd[2] = 0x00;
550     cmd[4] = sizeof(buf);
551 
552     ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
553                               buf, sizeof(buf));
554     if (ret < 0) {
555         /*
556          * Do not assume anything if we can't retrieve the
557          * INQUIRY response to assert the VPD Block Limits
558          * support.
559          */
560         s->needs_vpd_bl_emulation = false;
561         return;
562     }
563 
564     page_len = buf[3];
565     for (i = 4; i < MIN(sizeof(buf), page_len + 4); i++) {
566         if (buf[i] == 0xb0) {
567             s->needs_vpd_bl_emulation = false;
568             return;
569         }
570     }
571     s->needs_vpd_bl_emulation = true;
572 }
573 
574 static void scsi_generic_read_device_identification(SCSIDevice *s)
575 {
576     uint8_t cmd[6];
577     uint8_t buf[250];
578     int ret;
579     int i, len;
580 
581     memset(cmd, 0, sizeof(cmd));
582     memset(buf, 0, sizeof(buf));
583     cmd[0] = INQUIRY;
584     cmd[1] = 1;
585     cmd[2] = 0x83;
586     cmd[4] = sizeof(buf);
587 
588     ret = scsi_SG_IO_FROM_DEV(s->conf.blk, cmd, sizeof(cmd),
589                               buf, sizeof(buf));
590     if (ret < 0) {
591         return;
592     }
593 
594     len = MIN((buf[2] << 8) | buf[3], sizeof(buf) - 4);
595     for (i = 0; i + 3 <= len; ) {
596         const uint8_t *p = &buf[i + 4];
597         uint64_t wwn;
598 
599         if (i + (p[3] + 4) > len) {
600             break;
601         }
602 
603         if ((p[1] & 0x10) == 0) {
604             /* Associated with the logical unit */
605             if (read_naa_id(p, &wwn) == 0) {
606                 s->wwn = wwn;
607             }
608         } else if ((p[1] & 0x10) == 0x10) {
609             /* Associated with the target port */
610             if (read_naa_id(p, &wwn) == 0) {
611                 s->port_wwn = wwn;
612             }
613         }
614 
615         i += p[3] + 4;
616     }
617 }
618 
619 void scsi_generic_read_device_inquiry(SCSIDevice *s)
620 {
621     scsi_generic_read_device_identification(s);
622     if (s->type == TYPE_DISK || s->type == TYPE_ZBC) {
623         scsi_generic_set_vpd_bl_emulation(s);
624     } else {
625         s->needs_vpd_bl_emulation = false;
626     }
627 }
628 
629 static int get_stream_blocksize(BlockBackend *blk)
630 {
631     uint8_t cmd[6];
632     uint8_t buf[12];
633     int ret;
634 
635     memset(cmd, 0, sizeof(cmd));
636     memset(buf, 0, sizeof(buf));
637     cmd[0] = MODE_SENSE;
638     cmd[4] = sizeof(buf);
639 
640     ret = scsi_SG_IO_FROM_DEV(blk, cmd, sizeof(cmd), buf, sizeof(buf));
641     if (ret < 0) {
642         return -1;
643     }
644 
645     return (buf[9] << 16) | (buf[10] << 8) | buf[11];
646 }
647 
648 static void scsi_generic_reset(DeviceState *dev)
649 {
650     SCSIDevice *s = SCSI_DEVICE(dev);
651 
652     s->scsi_version = s->default_scsi_version;
653     scsi_device_purge_requests(s, SENSE_CODE(RESET));
654 }
655 
656 static void scsi_generic_realize(SCSIDevice *s, Error **errp)
657 {
658     int rc;
659     int sg_version;
660     struct sg_scsi_id scsiid;
661 
662     if (!s->conf.blk) {
663         error_setg(errp, "drive property not set");
664         return;
665     }
666 
667     if (blk_get_on_error(s->conf.blk, 0) != BLOCKDEV_ON_ERROR_ENOSPC) {
668         error_setg(errp, "Device doesn't support drive option werror");
669         return;
670     }
671     if (blk_get_on_error(s->conf.blk, 1) != BLOCKDEV_ON_ERROR_REPORT) {
672         error_setg(errp, "Device doesn't support drive option rerror");
673         return;
674     }
675 
676     /* check we are using a driver managing SG_IO (version 3 and after */
677     rc = blk_ioctl(s->conf.blk, SG_GET_VERSION_NUM, &sg_version);
678     if (rc < 0) {
679         error_setg_errno(errp, -rc, "cannot get SG_IO version number");
680         if (rc != -EPERM) {
681             error_append_hint(errp, "Is this a SCSI device?\n");
682         }
683         return;
684     }
685     if (sg_version < 30000) {
686         error_setg(errp, "scsi generic interface too old");
687         return;
688     }
689 
690     /* get LUN of the /dev/sg? */
691     if (blk_ioctl(s->conf.blk, SG_GET_SCSI_ID, &scsiid)) {
692         error_setg(errp, "SG_GET_SCSI_ID ioctl failed");
693         return;
694     }
695     if (!blkconf_apply_backend_options(&s->conf,
696                                        blk_is_read_only(s->conf.blk),
697                                        true, errp)) {
698         return;
699     }
700 
701     /* define device state */
702     s->type = scsiid.scsi_type;
703     trace_scsi_generic_realize_type(s->type);
704 
705     switch (s->type) {
706     case TYPE_TAPE:
707         s->blocksize = get_stream_blocksize(s->conf.blk);
708         if (s->blocksize == -1) {
709             s->blocksize = 0;
710         }
711         break;
712 
713         /* Make a guess for block devices, we'll fix it when the guest sends.
714          * READ CAPACITY.  If they don't, they likely would assume these sizes
715          * anyway. (TODO: they could also send MODE SENSE).
716          */
717     case TYPE_ROM:
718     case TYPE_WORM:
719         s->blocksize = 2048;
720         break;
721     default:
722         s->blocksize = 512;
723         break;
724     }
725 
726     trace_scsi_generic_realize_blocksize(s->blocksize);
727 
728     /* Only used by scsi-block, but initialize it nevertheless to be clean.  */
729     s->default_scsi_version = -1;
730     scsi_generic_read_device_inquiry(s);
731 }
732 
733 const SCSIReqOps scsi_generic_req_ops = {
734     .size         = sizeof(SCSIGenericReq),
735     .free_req     = scsi_free_request,
736     .send_command = scsi_send_command,
737     .read_data    = scsi_read_data,
738     .write_data   = scsi_write_data,
739     .get_buf      = scsi_get_buf,
740     .load_request = scsi_generic_load_request,
741     .save_request = scsi_generic_save_request,
742 };
743 
744 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
745                                      uint8_t *buf, void *hba_private)
746 {
747     return scsi_req_alloc(&scsi_generic_req_ops, d, tag, lun, hba_private);
748 }
749 
750 static Property scsi_generic_properties[] = {
751     DEFINE_PROP_DRIVE("drive", SCSIDevice, conf.blk),
752     DEFINE_PROP_BOOL("share-rw", SCSIDevice, conf.share_rw, false),
753     DEFINE_PROP_END_OF_LIST(),
754 };
755 
756 static int scsi_generic_parse_cdb(SCSIDevice *dev, SCSICommand *cmd,
757                                   uint8_t *buf, void *hba_private)
758 {
759     return scsi_bus_parse_cdb(dev, cmd, buf, hba_private);
760 }
761 
762 static void scsi_generic_class_initfn(ObjectClass *klass, void *data)
763 {
764     DeviceClass *dc = DEVICE_CLASS(klass);
765     SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
766 
767     sc->realize      = scsi_generic_realize;
768     sc->alloc_req    = scsi_new_request;
769     sc->parse_cdb    = scsi_generic_parse_cdb;
770     dc->fw_name = "disk";
771     dc->desc = "pass through generic scsi device (/dev/sg*)";
772     dc->reset = scsi_generic_reset;
773     device_class_set_props(dc, scsi_generic_properties);
774     dc->vmsd  = &vmstate_scsi_device;
775 }
776 
777 static const TypeInfo scsi_generic_info = {
778     .name          = "scsi-generic",
779     .parent        = TYPE_SCSI_DEVICE,
780     .instance_size = sizeof(SCSIDevice),
781     .class_init    = scsi_generic_class_initfn,
782 };
783 
784 static void scsi_generic_register_types(void)
785 {
786     type_register_static(&scsi_generic_info);
787 }
788 
789 type_init(scsi_generic_register_types)
790 
791 #endif /* __linux__ */
792