xref: /openbmc/qemu/hw/scsi/scsi-disk.c (revision f0984d40)
1 /*
2  * SCSI Device emulation
3  *
4  * Copyright (c) 2006 CodeSourcery.
5  * Based on code by Fabrice Bellard
6  *
7  * Written by Paul Brook
8  * Modifications:
9  *  2009-Dec-12 Artyom Tarasenko : implemented stamdard inquiry for the case
10  *                                 when the allocation length of CDB is smaller
11  *                                 than 36.
12  *  2009-Oct-13 Artyom Tarasenko : implemented the block descriptor in the
13  *                                 MODE SENSE response.
14  *
15  * This code is licensed under the LGPL.
16  *
17  * Note that this file only handles the SCSI architecture model and device
18  * commands.  Emulation of interface/link layer protocols is handled by
19  * the host adapter emulator.
20  */
21 
22 #include "qemu/osdep.h"
23 #include "qemu/units.h"
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "qemu/main-loop.h"
27 #include "qemu/module.h"
28 #include "qemu/hw-version.h"
29 #include "qemu/memalign.h"
30 #include "hw/scsi/scsi.h"
31 #include "migration/qemu-file-types.h"
32 #include "migration/vmstate.h"
33 #include "hw/scsi/emulation.h"
34 #include "scsi/constants.h"
35 #include "sysemu/block-backend.h"
36 #include "sysemu/blockdev.h"
37 #include "hw/block/block.h"
38 #include "hw/qdev-properties.h"
39 #include "hw/qdev-properties-system.h"
40 #include "sysemu/dma.h"
41 #include "sysemu/sysemu.h"
42 #include "qemu/cutils.h"
43 #include "trace.h"
44 #include "qom/object.h"
45 
46 #ifdef __linux
47 #include <scsi/sg.h>
48 #endif
49 
50 #define SCSI_WRITE_SAME_MAX         (512 * KiB)
51 #define SCSI_DMA_BUF_SIZE           (128 * KiB)
52 #define SCSI_MAX_INQUIRY_LEN        256
53 #define SCSI_MAX_MODE_LEN           256
54 
55 #define DEFAULT_DISCARD_GRANULARITY (4 * KiB)
56 #define DEFAULT_MAX_UNMAP_SIZE      (1 * GiB)
57 #define DEFAULT_MAX_IO_SIZE         INT_MAX     /* 2 GB - 1 block */
58 
59 #define TYPE_SCSI_DISK_BASE         "scsi-disk-base"
60 
61 OBJECT_DECLARE_TYPE(SCSIDiskState, SCSIDiskClass, SCSI_DISK_BASE)
62 
63 struct SCSIDiskClass {
64     SCSIDeviceClass parent_class;
65     DMAIOFunc       *dma_readv;
66     DMAIOFunc       *dma_writev;
67     bool            (*need_fua_emulation)(SCSICommand *cmd);
68     void            (*update_sense)(SCSIRequest *r);
69 };
70 
71 typedef struct SCSIDiskReq {
72     SCSIRequest req;
73     /* Both sector and sector_count are in terms of BDRV_SECTOR_SIZE bytes.  */
74     uint64_t sector;
75     uint32_t sector_count;
76     uint32_t buflen;
77     bool started;
78     bool need_fua_emulation;
79     struct iovec iov;
80     QEMUIOVector qiov;
81     BlockAcctCookie acct;
82 } SCSIDiskReq;
83 
84 #define SCSI_DISK_F_REMOVABLE             0
85 #define SCSI_DISK_F_DPOFUA                1
86 #define SCSI_DISK_F_NO_REMOVABLE_DEVOPS   2
87 
88 struct SCSIDiskState {
89     SCSIDevice qdev;
90     uint32_t features;
91     bool media_changed;
92     bool media_event;
93     bool eject_request;
94     uint16_t port_index;
95     uint64_t max_unmap_size;
96     uint64_t max_io_size;
97     uint32_t quirks;
98     QEMUBH *bh;
99     char *version;
100     char *serial;
101     char *vendor;
102     char *product;
103     char *device_id;
104     bool tray_open;
105     bool tray_locked;
106     /*
107      * 0x0000        - rotation rate not reported
108      * 0x0001        - non-rotating medium (SSD)
109      * 0x0002-0x0400 - reserved
110      * 0x0401-0xffe  - rotations per minute
111      * 0xffff        - reserved
112      */
113     uint16_t rotation_rate;
114 };
115 
116 static void scsi_free_request(SCSIRequest *req)
117 {
118     SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
119 
120     qemu_vfree(r->iov.iov_base);
121 }
122 
123 /* Helper function for command completion with sense.  */
124 static void scsi_check_condition(SCSIDiskReq *r, SCSISense sense)
125 {
126     trace_scsi_disk_check_condition(r->req.tag, sense.key, sense.asc,
127                                     sense.ascq);
128     scsi_req_build_sense(&r->req, sense);
129     scsi_req_complete(&r->req, CHECK_CONDITION);
130 }
131 
132 static void scsi_init_iovec(SCSIDiskReq *r, size_t size)
133 {
134     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
135 
136     if (!r->iov.iov_base) {
137         r->buflen = size;
138         r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
139     }
140     r->iov.iov_len = MIN(r->sector_count * BDRV_SECTOR_SIZE, r->buflen);
141     qemu_iovec_init_external(&r->qiov, &r->iov, 1);
142 }
143 
144 static void scsi_disk_save_request(QEMUFile *f, SCSIRequest *req)
145 {
146     SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
147 
148     qemu_put_be64s(f, &r->sector);
149     qemu_put_be32s(f, &r->sector_count);
150     qemu_put_be32s(f, &r->buflen);
151     if (r->buflen) {
152         if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
153             qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
154         } else if (!req->retry) {
155             uint32_t len = r->iov.iov_len;
156             qemu_put_be32s(f, &len);
157             qemu_put_buffer(f, r->iov.iov_base, r->iov.iov_len);
158         }
159     }
160 }
161 
162 static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
163 {
164     SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
165 
166     qemu_get_be64s(f, &r->sector);
167     qemu_get_be32s(f, &r->sector_count);
168     qemu_get_be32s(f, &r->buflen);
169     if (r->buflen) {
170         scsi_init_iovec(r, r->buflen);
171         if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
172             qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
173         } else if (!r->req.retry) {
174             uint32_t len;
175             qemu_get_be32s(f, &len);
176             r->iov.iov_len = len;
177             assert(r->iov.iov_len <= r->buflen);
178             qemu_get_buffer(f, r->iov.iov_base, r->iov.iov_len);
179         }
180     }
181 
182     qemu_iovec_init_external(&r->qiov, &r->iov, 1);
183 }
184 
185 /*
186  * scsi_handle_rw_error has two return values.  False means that the error
187  * must be ignored, true means that the error has been processed and the
188  * caller should not do anything else for this request.  Note that
189  * scsi_handle_rw_error always manages its reference counts, independent
190  * of the return value.
191  */
192 static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed)
193 {
194     bool is_read = (r->req.cmd.mode == SCSI_XFER_FROM_DEV);
195     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
196     SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
197     SCSISense sense = SENSE_CODE(NO_SENSE);
198     int error = 0;
199     bool req_has_sense = false;
200     BlockErrorAction action;
201     int status;
202 
203     if (ret < 0) {
204         status = scsi_sense_from_errno(-ret, &sense);
205         error = -ret;
206     } else {
207         /* A passthrough command has completed with nonzero status.  */
208         status = ret;
209         if (status == CHECK_CONDITION) {
210             req_has_sense = true;
211             error = scsi_sense_buf_to_errno(r->req.sense, sizeof(r->req.sense));
212         } else {
213             error = EINVAL;
214         }
215     }
216 
217     /*
218      * Check whether the error has to be handled by the guest or should
219      * rather follow the rerror=/werror= settings.  Guest-handled errors
220      * are usually retried immediately, so do not post them to QMP and
221      * do not account them as failed I/O.
222      */
223     if (req_has_sense &&
224         scsi_sense_buf_is_guest_recoverable(r->req.sense, sizeof(r->req.sense))) {
225         action = BLOCK_ERROR_ACTION_REPORT;
226         acct_failed = false;
227     } else {
228         action = blk_get_error_action(s->qdev.conf.blk, is_read, error);
229         blk_error_action(s->qdev.conf.blk, action, is_read, error);
230     }
231 
232     switch (action) {
233     case BLOCK_ERROR_ACTION_REPORT:
234         if (acct_failed) {
235             block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
236         }
237         if (req_has_sense) {
238             sdc->update_sense(&r->req);
239         } else if (status == CHECK_CONDITION) {
240             scsi_req_build_sense(&r->req, sense);
241         }
242         scsi_req_complete(&r->req, status);
243         return true;
244 
245     case BLOCK_ERROR_ACTION_IGNORE:
246         return false;
247 
248     case BLOCK_ERROR_ACTION_STOP:
249         scsi_req_retry(&r->req);
250         return true;
251 
252     default:
253         g_assert_not_reached();
254     }
255 }
256 
257 static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed)
258 {
259     if (r->req.io_canceled) {
260         scsi_req_cancel_complete(&r->req);
261         return true;
262     }
263 
264     if (ret < 0) {
265         return scsi_handle_rw_error(r, ret, acct_failed);
266     }
267 
268     return false;
269 }
270 
271 static void scsi_aio_complete(void *opaque, int ret)
272 {
273     SCSIDiskReq *r = (SCSIDiskReq *)opaque;
274     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
275 
276     aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
277 
278     assert(r->req.aiocb != NULL);
279     r->req.aiocb = NULL;
280 
281     if (scsi_disk_req_check_error(r, ret, true)) {
282         goto done;
283     }
284 
285     block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
286     scsi_req_complete(&r->req, GOOD);
287 
288 done:
289     aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
290     scsi_req_unref(&r->req);
291 }
292 
293 static bool scsi_is_cmd_fua(SCSICommand *cmd)
294 {
295     switch (cmd->buf[0]) {
296     case READ_10:
297     case READ_12:
298     case READ_16:
299     case WRITE_10:
300     case WRITE_12:
301     case WRITE_16:
302         return (cmd->buf[1] & 8) != 0;
303 
304     case VERIFY_10:
305     case VERIFY_12:
306     case VERIFY_16:
307     case WRITE_VERIFY_10:
308     case WRITE_VERIFY_12:
309     case WRITE_VERIFY_16:
310         return true;
311 
312     case READ_6:
313     case WRITE_6:
314     default:
315         return false;
316     }
317 }
318 
319 static void scsi_write_do_fua(SCSIDiskReq *r)
320 {
321     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
322 
323     assert(r->req.aiocb == NULL);
324     assert(!r->req.io_canceled);
325 
326     if (r->need_fua_emulation) {
327         block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
328                          BLOCK_ACCT_FLUSH);
329         r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
330         return;
331     }
332 
333     scsi_req_complete(&r->req, GOOD);
334     scsi_req_unref(&r->req);
335 }
336 
337 static void scsi_dma_complete_noio(SCSIDiskReq *r, int ret)
338 {
339     assert(r->req.aiocb == NULL);
340     if (scsi_disk_req_check_error(r, ret, false)) {
341         goto done;
342     }
343 
344     r->sector += r->sector_count;
345     r->sector_count = 0;
346     if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
347         scsi_write_do_fua(r);
348         return;
349     } else {
350         scsi_req_complete(&r->req, GOOD);
351     }
352 
353 done:
354     scsi_req_unref(&r->req);
355 }
356 
357 /* Called with AioContext lock held */
358 static void scsi_dma_complete(void *opaque, int ret)
359 {
360     SCSIDiskReq *r = (SCSIDiskReq *)opaque;
361     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
362 
363     assert(r->req.aiocb != NULL);
364     r->req.aiocb = NULL;
365 
366     if (ret < 0) {
367         block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
368     } else {
369         block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
370     }
371     scsi_dma_complete_noio(r, ret);
372 }
373 
374 static void scsi_read_complete_noio(SCSIDiskReq *r, int ret)
375 {
376     uint32_t n;
377 
378     assert(r->req.aiocb == NULL);
379     if (scsi_disk_req_check_error(r, ret, false)) {
380         goto done;
381     }
382 
383     n = r->qiov.size / BDRV_SECTOR_SIZE;
384     r->sector += n;
385     r->sector_count -= n;
386     scsi_req_data(&r->req, r->qiov.size);
387 
388 done:
389     scsi_req_unref(&r->req);
390 }
391 
392 static void scsi_read_complete(void *opaque, int ret)
393 {
394     SCSIDiskReq *r = (SCSIDiskReq *)opaque;
395     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
396 
397     aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
398 
399     assert(r->req.aiocb != NULL);
400     r->req.aiocb = NULL;
401 
402     if (ret < 0) {
403         block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
404     } else {
405         block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
406         trace_scsi_disk_read_complete(r->req.tag, r->qiov.size);
407     }
408     scsi_read_complete_noio(r, ret);
409     aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
410 }
411 
412 /* Actually issue a read to the block device.  */
413 static void scsi_do_read(SCSIDiskReq *r, int ret)
414 {
415     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
416     SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
417 
418     assert (r->req.aiocb == NULL);
419     if (scsi_disk_req_check_error(r, ret, false)) {
420         goto done;
421     }
422 
423     /* The request is used as the AIO opaque value, so add a ref.  */
424     scsi_req_ref(&r->req);
425 
426     if (r->req.sg) {
427         dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_READ);
428         r->req.residual -= r->req.sg->size;
429         r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
430                                   r->req.sg, r->sector << BDRV_SECTOR_BITS,
431                                   BDRV_SECTOR_SIZE,
432                                   sdc->dma_readv, r, scsi_dma_complete, r,
433                                   DMA_DIRECTION_FROM_DEVICE);
434     } else {
435         scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
436         block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
437                          r->qiov.size, BLOCK_ACCT_READ);
438         r->req.aiocb = sdc->dma_readv(r->sector << BDRV_SECTOR_BITS, &r->qiov,
439                                       scsi_read_complete, r, r);
440     }
441 
442 done:
443     scsi_req_unref(&r->req);
444 }
445 
446 static void scsi_do_read_cb(void *opaque, int ret)
447 {
448     SCSIDiskReq *r = (SCSIDiskReq *)opaque;
449     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
450 
451     aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
452 
453     assert (r->req.aiocb != NULL);
454     r->req.aiocb = NULL;
455 
456     if (ret < 0) {
457         block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
458     } else {
459         block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
460     }
461     scsi_do_read(opaque, ret);
462     aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
463 }
464 
465 /* Read more data from scsi device into buffer.  */
466 static void scsi_read_data(SCSIRequest *req)
467 {
468     SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
469     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
470     bool first;
471 
472     trace_scsi_disk_read_data_count(r->sector_count);
473     if (r->sector_count == 0) {
474         /* This also clears the sense buffer for REQUEST SENSE.  */
475         scsi_req_complete(&r->req, GOOD);
476         return;
477     }
478 
479     /* No data transfer may already be in progress */
480     assert(r->req.aiocb == NULL);
481 
482     /* The request is used as the AIO opaque value, so add a ref.  */
483     scsi_req_ref(&r->req);
484     if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
485         trace_scsi_disk_read_data_invalid();
486         scsi_read_complete_noio(r, -EINVAL);
487         return;
488     }
489 
490     if (!blk_is_available(req->dev->conf.blk)) {
491         scsi_read_complete_noio(r, -ENOMEDIUM);
492         return;
493     }
494 
495     first = !r->started;
496     r->started = true;
497     if (first && r->need_fua_emulation) {
498         block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
499                          BLOCK_ACCT_FLUSH);
500         r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_do_read_cb, r);
501     } else {
502         scsi_do_read(r, 0);
503     }
504 }
505 
506 static void scsi_write_complete_noio(SCSIDiskReq *r, int ret)
507 {
508     uint32_t n;
509 
510     assert (r->req.aiocb == NULL);
511     if (scsi_disk_req_check_error(r, ret, false)) {
512         goto done;
513     }
514 
515     n = r->qiov.size / BDRV_SECTOR_SIZE;
516     r->sector += n;
517     r->sector_count -= n;
518     if (r->sector_count == 0) {
519         scsi_write_do_fua(r);
520         return;
521     } else {
522         scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
523         trace_scsi_disk_write_complete_noio(r->req.tag, r->qiov.size);
524         scsi_req_data(&r->req, r->qiov.size);
525     }
526 
527 done:
528     scsi_req_unref(&r->req);
529 }
530 
531 static void scsi_write_complete(void * opaque, int ret)
532 {
533     SCSIDiskReq *r = (SCSIDiskReq *)opaque;
534     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
535 
536     aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
537 
538     assert (r->req.aiocb != NULL);
539     r->req.aiocb = NULL;
540 
541     if (ret < 0) {
542         block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct);
543     } else {
544         block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
545     }
546     scsi_write_complete_noio(r, ret);
547     aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
548 }
549 
550 static void scsi_write_data(SCSIRequest *req)
551 {
552     SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
553     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
554     SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
555 
556     /* No data transfer may already be in progress */
557     assert(r->req.aiocb == NULL);
558 
559     /* The request is used as the AIO opaque value, so add a ref.  */
560     scsi_req_ref(&r->req);
561     if (r->req.cmd.mode != SCSI_XFER_TO_DEV) {
562         trace_scsi_disk_write_data_invalid();
563         scsi_write_complete_noio(r, -EINVAL);
564         return;
565     }
566 
567     if (!r->req.sg && !r->qiov.size) {
568         /* Called for the first time.  Ask the driver to send us more data.  */
569         r->started = true;
570         scsi_write_complete_noio(r, 0);
571         return;
572     }
573     if (!blk_is_available(req->dev->conf.blk)) {
574         scsi_write_complete_noio(r, -ENOMEDIUM);
575         return;
576     }
577 
578     if (r->req.cmd.buf[0] == VERIFY_10 || r->req.cmd.buf[0] == VERIFY_12 ||
579         r->req.cmd.buf[0] == VERIFY_16) {
580         if (r->req.sg) {
581             scsi_dma_complete_noio(r, 0);
582         } else {
583             scsi_write_complete_noio(r, 0);
584         }
585         return;
586     }
587 
588     if (r->req.sg) {
589         dma_acct_start(s->qdev.conf.blk, &r->acct, r->req.sg, BLOCK_ACCT_WRITE);
590         r->req.residual -= r->req.sg->size;
591         r->req.aiocb = dma_blk_io(blk_get_aio_context(s->qdev.conf.blk),
592                                   r->req.sg, r->sector << BDRV_SECTOR_BITS,
593                                   BDRV_SECTOR_SIZE,
594                                   sdc->dma_writev, r, scsi_dma_complete, r,
595                                   DMA_DIRECTION_TO_DEVICE);
596     } else {
597         block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
598                          r->qiov.size, BLOCK_ACCT_WRITE);
599         r->req.aiocb = sdc->dma_writev(r->sector << BDRV_SECTOR_BITS, &r->qiov,
600                                        scsi_write_complete, r, r);
601     }
602 }
603 
604 /* Return a pointer to the data buffer.  */
605 static uint8_t *scsi_get_buf(SCSIRequest *req)
606 {
607     SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
608 
609     return (uint8_t *)r->iov.iov_base;
610 }
611 
612 static int scsi_disk_emulate_vpd_page(SCSIRequest *req, uint8_t *outbuf)
613 {
614     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
615     uint8_t page_code = req->cmd.buf[2];
616     int start, buflen = 0;
617 
618     outbuf[buflen++] = s->qdev.type & 0x1f;
619     outbuf[buflen++] = page_code;
620     outbuf[buflen++] = 0x00;
621     outbuf[buflen++] = 0x00;
622     start = buflen;
623 
624     switch (page_code) {
625     case 0x00: /* Supported page codes, mandatory */
626     {
627         trace_scsi_disk_emulate_vpd_page_00(req->cmd.xfer);
628         outbuf[buflen++] = 0x00; /* list of supported pages (this page) */
629         if (s->serial) {
630             outbuf[buflen++] = 0x80; /* unit serial number */
631         }
632         outbuf[buflen++] = 0x83; /* device identification */
633         if (s->qdev.type == TYPE_DISK) {
634             outbuf[buflen++] = 0xb0; /* block limits */
635             outbuf[buflen++] = 0xb1; /* block device characteristics */
636             outbuf[buflen++] = 0xb2; /* thin provisioning */
637         }
638         break;
639     }
640     case 0x80: /* Device serial number, optional */
641     {
642         int l;
643 
644         if (!s->serial) {
645             trace_scsi_disk_emulate_vpd_page_80_not_supported();
646             return -1;
647         }
648 
649         l = strlen(s->serial);
650         if (l > 36) {
651             l = 36;
652         }
653 
654         trace_scsi_disk_emulate_vpd_page_80(req->cmd.xfer);
655         memcpy(outbuf + buflen, s->serial, l);
656         buflen += l;
657         break;
658     }
659 
660     case 0x83: /* Device identification page, mandatory */
661     {
662         int id_len = s->device_id ? MIN(strlen(s->device_id), 255 - 8) : 0;
663 
664         trace_scsi_disk_emulate_vpd_page_83(req->cmd.xfer);
665 
666         if (id_len) {
667             outbuf[buflen++] = 0x2; /* ASCII */
668             outbuf[buflen++] = 0;   /* not officially assigned */
669             outbuf[buflen++] = 0;   /* reserved */
670             outbuf[buflen++] = id_len; /* length of data following */
671             memcpy(outbuf + buflen, s->device_id, id_len);
672             buflen += id_len;
673         }
674 
675         if (s->qdev.wwn) {
676             outbuf[buflen++] = 0x1; /* Binary */
677             outbuf[buflen++] = 0x3; /* NAA */
678             outbuf[buflen++] = 0;   /* reserved */
679             outbuf[buflen++] = 8;
680             stq_be_p(&outbuf[buflen], s->qdev.wwn);
681             buflen += 8;
682         }
683 
684         if (s->qdev.port_wwn) {
685             outbuf[buflen++] = 0x61; /* SAS / Binary */
686             outbuf[buflen++] = 0x93; /* PIV / Target port / NAA */
687             outbuf[buflen++] = 0;    /* reserved */
688             outbuf[buflen++] = 8;
689             stq_be_p(&outbuf[buflen], s->qdev.port_wwn);
690             buflen += 8;
691         }
692 
693         if (s->port_index) {
694             outbuf[buflen++] = 0x61; /* SAS / Binary */
695 
696             /* PIV/Target port/relative target port */
697             outbuf[buflen++] = 0x94;
698 
699             outbuf[buflen++] = 0;    /* reserved */
700             outbuf[buflen++] = 4;
701             stw_be_p(&outbuf[buflen + 2], s->port_index);
702             buflen += 4;
703         }
704         break;
705     }
706     case 0xb0: /* block limits */
707     {
708         SCSIBlockLimits bl = {};
709 
710         if (s->qdev.type == TYPE_ROM) {
711             trace_scsi_disk_emulate_vpd_page_b0_not_supported();
712             return -1;
713         }
714         bl.wsnz = 1;
715         bl.unmap_sectors =
716             s->qdev.conf.discard_granularity / s->qdev.blocksize;
717         bl.min_io_size =
718             s->qdev.conf.min_io_size / s->qdev.blocksize;
719         bl.opt_io_size =
720             s->qdev.conf.opt_io_size / s->qdev.blocksize;
721         bl.max_unmap_sectors =
722             s->max_unmap_size / s->qdev.blocksize;
723         bl.max_io_sectors =
724             s->max_io_size / s->qdev.blocksize;
725         /* 255 descriptors fit in 4 KiB with an 8-byte header */
726         bl.max_unmap_descr = 255;
727 
728         if (s->qdev.type == TYPE_DISK) {
729             int max_transfer_blk = blk_get_max_transfer(s->qdev.conf.blk);
730             int max_io_sectors_blk =
731                 max_transfer_blk / s->qdev.blocksize;
732 
733             bl.max_io_sectors =
734                 MIN_NON_ZERO(max_io_sectors_blk, bl.max_io_sectors);
735         }
736         buflen += scsi_emulate_block_limits(outbuf + buflen, &bl);
737         break;
738     }
739     case 0xb1: /* block device characteristics */
740     {
741         buflen = 0x40;
742         outbuf[4] = (s->rotation_rate >> 8) & 0xff;
743         outbuf[5] = s->rotation_rate & 0xff;
744         outbuf[6] = 0; /* PRODUCT TYPE */
745         outbuf[7] = 0; /* WABEREQ | WACEREQ | NOMINAL FORM FACTOR */
746         outbuf[8] = 0; /* VBULS */
747         break;
748     }
749     case 0xb2: /* thin provisioning */
750     {
751         buflen = 8;
752         outbuf[4] = 0;
753         outbuf[5] = 0xe0; /* unmap & write_same 10/16 all supported */
754         outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
755         outbuf[7] = 0;
756         break;
757     }
758     default:
759         return -1;
760     }
761     /* done with EVPD */
762     assert(buflen - start <= 255);
763     outbuf[start - 1] = buflen - start;
764     return buflen;
765 }
766 
767 static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
768 {
769     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
770     int buflen = 0;
771 
772     if (req->cmd.buf[1] & 0x1) {
773         /* Vital product data */
774         return scsi_disk_emulate_vpd_page(req, outbuf);
775     }
776 
777     /* Standard INQUIRY data */
778     if (req->cmd.buf[2] != 0) {
779         return -1;
780     }
781 
782     /* PAGE CODE == 0 */
783     buflen = req->cmd.xfer;
784     if (buflen > SCSI_MAX_INQUIRY_LEN) {
785         buflen = SCSI_MAX_INQUIRY_LEN;
786     }
787 
788     outbuf[0] = s->qdev.type & 0x1f;
789     outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
790 
791     strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
792     strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
793 
794     memset(&outbuf[32], 0, 4);
795     memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
796     /*
797      * We claim conformance to SPC-3, which is required for guests
798      * to ask for modern features like READ CAPACITY(16) or the
799      * block characteristics VPD page by default.  Not all of SPC-3
800      * is actually implemented, but we're good enough.
801      */
802     outbuf[2] = s->qdev.default_scsi_version;
803     outbuf[3] = 2 | 0x10; /* Format 2, HiSup */
804 
805     if (buflen > 36) {
806         outbuf[4] = buflen - 5; /* Additional Length = (Len - 1) - 4 */
807     } else {
808         /* If the allocation length of CDB is too small,
809                the additional length is not adjusted */
810         outbuf[4] = 36 - 5;
811     }
812 
813     /* Sync data transfer and TCQ.  */
814     outbuf[7] = 0x10 | (req->bus->info->tcq ? 0x02 : 0);
815     return buflen;
816 }
817 
818 static inline bool media_is_dvd(SCSIDiskState *s)
819 {
820     uint64_t nb_sectors;
821     if (s->qdev.type != TYPE_ROM) {
822         return false;
823     }
824     if (!blk_is_available(s->qdev.conf.blk)) {
825         return false;
826     }
827     blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
828     return nb_sectors > CD_MAX_SECTORS;
829 }
830 
831 static inline bool media_is_cd(SCSIDiskState *s)
832 {
833     uint64_t nb_sectors;
834     if (s->qdev.type != TYPE_ROM) {
835         return false;
836     }
837     if (!blk_is_available(s->qdev.conf.blk)) {
838         return false;
839     }
840     blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
841     return nb_sectors <= CD_MAX_SECTORS;
842 }
843 
844 static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
845                                       uint8_t *outbuf)
846 {
847     uint8_t type = r->req.cmd.buf[1] & 7;
848 
849     if (s->qdev.type != TYPE_ROM) {
850         return -1;
851     }
852 
853     /* Types 1/2 are only defined for Blu-Ray.  */
854     if (type != 0) {
855         scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
856         return -1;
857     }
858 
859     memset(outbuf, 0, 34);
860     outbuf[1] = 32;
861     outbuf[2] = 0xe; /* last session complete, disc finalized */
862     outbuf[3] = 1;   /* first track on disc */
863     outbuf[4] = 1;   /* # of sessions */
864     outbuf[5] = 1;   /* first track of last session */
865     outbuf[6] = 1;   /* last track of last session */
866     outbuf[7] = 0x20; /* unrestricted use */
867     outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
868     /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
869     /* 12-23: not meaningful for CD-ROM or DVD-ROM */
870     /* 24-31: disc bar code */
871     /* 32: disc application code */
872     /* 33: number of OPC tables */
873 
874     return 34;
875 }
876 
877 static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
878                                    uint8_t *outbuf)
879 {
880     static const int rds_caps_size[5] = {
881         [0] = 2048 + 4,
882         [1] = 4 + 4,
883         [3] = 188 + 4,
884         [4] = 2048 + 4,
885     };
886 
887     uint8_t media = r->req.cmd.buf[1];
888     uint8_t layer = r->req.cmd.buf[6];
889     uint8_t format = r->req.cmd.buf[7];
890     int size = -1;
891 
892     if (s->qdev.type != TYPE_ROM) {
893         return -1;
894     }
895     if (media != 0) {
896         scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
897         return -1;
898     }
899 
900     if (format != 0xff) {
901         if (!blk_is_available(s->qdev.conf.blk)) {
902             scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
903             return -1;
904         }
905         if (media_is_cd(s)) {
906             scsi_check_condition(r, SENSE_CODE(INCOMPATIBLE_FORMAT));
907             return -1;
908         }
909         if (format >= ARRAY_SIZE(rds_caps_size)) {
910             return -1;
911         }
912         size = rds_caps_size[format];
913         memset(outbuf, 0, size);
914     }
915 
916     switch (format) {
917     case 0x00: {
918         /* Physical format information */
919         uint64_t nb_sectors;
920         if (layer != 0) {
921             goto fail;
922         }
923         blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
924 
925         outbuf[4] = 1;   /* DVD-ROM, part version 1 */
926         outbuf[5] = 0xf; /* 120mm disc, minimum rate unspecified */
927         outbuf[6] = 1;   /* one layer, read-only (per MMC-2 spec) */
928         outbuf[7] = 0;   /* default densities */
929 
930         stl_be_p(&outbuf[12], (nb_sectors >> 2) - 1); /* end sector */
931         stl_be_p(&outbuf[16], (nb_sectors >> 2) - 1); /* l0 end sector */
932         break;
933     }
934 
935     case 0x01: /* DVD copyright information, all zeros */
936         break;
937 
938     case 0x03: /* BCA information - invalid field for no BCA info */
939         return -1;
940 
941     case 0x04: /* DVD disc manufacturing information, all zeros */
942         break;
943 
944     case 0xff: { /* List capabilities */
945         int i;
946         size = 4;
947         for (i = 0; i < ARRAY_SIZE(rds_caps_size); i++) {
948             if (!rds_caps_size[i]) {
949                 continue;
950             }
951             outbuf[size] = i;
952             outbuf[size + 1] = 0x40; /* Not writable, readable */
953             stw_be_p(&outbuf[size + 2], rds_caps_size[i]);
954             size += 4;
955         }
956         break;
957      }
958 
959     default:
960         return -1;
961     }
962 
963     /* Size of buffer, not including 2 byte size field */
964     stw_be_p(outbuf, size - 2);
965     return size;
966 
967 fail:
968     return -1;
969 }
970 
971 static int scsi_event_status_media(SCSIDiskState *s, uint8_t *outbuf)
972 {
973     uint8_t event_code, media_status;
974 
975     media_status = 0;
976     if (s->tray_open) {
977         media_status = MS_TRAY_OPEN;
978     } else if (blk_is_inserted(s->qdev.conf.blk)) {
979         media_status = MS_MEDIA_PRESENT;
980     }
981 
982     /* Event notification descriptor */
983     event_code = MEC_NO_CHANGE;
984     if (media_status != MS_TRAY_OPEN) {
985         if (s->media_event) {
986             event_code = MEC_NEW_MEDIA;
987             s->media_event = false;
988         } else if (s->eject_request) {
989             event_code = MEC_EJECT_REQUESTED;
990             s->eject_request = false;
991         }
992     }
993 
994     outbuf[0] = event_code;
995     outbuf[1] = media_status;
996 
997     /* These fields are reserved, just clear them. */
998     outbuf[2] = 0;
999     outbuf[3] = 0;
1000     return 4;
1001 }
1002 
1003 static int scsi_get_event_status_notification(SCSIDiskState *s, SCSIDiskReq *r,
1004                                               uint8_t *outbuf)
1005 {
1006     int size;
1007     uint8_t *buf = r->req.cmd.buf;
1008     uint8_t notification_class_request = buf[4];
1009     if (s->qdev.type != TYPE_ROM) {
1010         return -1;
1011     }
1012     if ((buf[1] & 1) == 0) {
1013         /* asynchronous */
1014         return -1;
1015     }
1016 
1017     size = 4;
1018     outbuf[0] = outbuf[1] = 0;
1019     outbuf[3] = 1 << GESN_MEDIA; /* supported events */
1020     if (notification_class_request & (1 << GESN_MEDIA)) {
1021         outbuf[2] = GESN_MEDIA;
1022         size += scsi_event_status_media(s, &outbuf[size]);
1023     } else {
1024         outbuf[2] = 0x80;
1025     }
1026     stw_be_p(outbuf, size - 4);
1027     return size;
1028 }
1029 
1030 static int scsi_get_configuration(SCSIDiskState *s, uint8_t *outbuf)
1031 {
1032     int current;
1033 
1034     if (s->qdev.type != TYPE_ROM) {
1035         return -1;
1036     }
1037 
1038     if (media_is_dvd(s)) {
1039         current = MMC_PROFILE_DVD_ROM;
1040     } else if (media_is_cd(s)) {
1041         current = MMC_PROFILE_CD_ROM;
1042     } else {
1043         current = MMC_PROFILE_NONE;
1044     }
1045 
1046     memset(outbuf, 0, 40);
1047     stl_be_p(&outbuf[0], 36); /* Bytes after the data length field */
1048     stw_be_p(&outbuf[6], current);
1049     /* outbuf[8] - outbuf[19]: Feature 0 - Profile list */
1050     outbuf[10] = 0x03; /* persistent, current */
1051     outbuf[11] = 8; /* two profiles */
1052     stw_be_p(&outbuf[12], MMC_PROFILE_DVD_ROM);
1053     outbuf[14] = (current == MMC_PROFILE_DVD_ROM);
1054     stw_be_p(&outbuf[16], MMC_PROFILE_CD_ROM);
1055     outbuf[18] = (current == MMC_PROFILE_CD_ROM);
1056     /* outbuf[20] - outbuf[31]: Feature 1 - Core feature */
1057     stw_be_p(&outbuf[20], 1);
1058     outbuf[22] = 0x08 | 0x03; /* version 2, persistent, current */
1059     outbuf[23] = 8;
1060     stl_be_p(&outbuf[24], 1); /* SCSI */
1061     outbuf[28] = 1; /* DBE = 1, mandatory */
1062     /* outbuf[32] - outbuf[39]: Feature 3 - Removable media feature */
1063     stw_be_p(&outbuf[32], 3);
1064     outbuf[34] = 0x08 | 0x03; /* version 2, persistent, current */
1065     outbuf[35] = 4;
1066     outbuf[36] = 0x39; /* tray, load=1, eject=1, unlocked at powerup, lock=1 */
1067     /* TODO: Random readable, CD read, DVD read, drive serial number,
1068        power management */
1069     return 40;
1070 }
1071 
1072 static int scsi_emulate_mechanism_status(SCSIDiskState *s, uint8_t *outbuf)
1073 {
1074     if (s->qdev.type != TYPE_ROM) {
1075         return -1;
1076     }
1077     memset(outbuf, 0, 8);
1078     outbuf[5] = 1; /* CD-ROM */
1079     return 8;
1080 }
1081 
1082 static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
1083                            int page_control)
1084 {
1085     static const int mode_sense_valid[0x3f] = {
1086         [MODE_PAGE_VENDOR_SPECIFIC]        = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1087         [MODE_PAGE_HD_GEOMETRY]            = (1 << TYPE_DISK),
1088         [MODE_PAGE_FLEXIBLE_DISK_GEOMETRY] = (1 << TYPE_DISK),
1089         [MODE_PAGE_CACHING]                = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1090         [MODE_PAGE_R_W_ERROR]              = (1 << TYPE_DISK) | (1 << TYPE_ROM),
1091         [MODE_PAGE_AUDIO_CTL]              = (1 << TYPE_ROM),
1092         [MODE_PAGE_CAPABILITIES]           = (1 << TYPE_ROM),
1093         [MODE_PAGE_APPLE_VENDOR]           = (1 << TYPE_ROM),
1094     };
1095 
1096     uint8_t *p = *p_outbuf + 2;
1097     int length;
1098 
1099     assert(page < ARRAY_SIZE(mode_sense_valid));
1100     if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
1101         return -1;
1102     }
1103 
1104     /*
1105      * If Changeable Values are requested, a mask denoting those mode parameters
1106      * that are changeable shall be returned. As we currently don't support
1107      * parameter changes via MODE_SELECT all bits are returned set to zero.
1108      * The buffer was already menset to zero by the caller of this function.
1109      *
1110      * The offsets here are off by two compared to the descriptions in the
1111      * SCSI specs, because those include a 2-byte header.  This is unfortunate,
1112      * but it is done so that offsets are consistent within our implementation
1113      * of MODE SENSE and MODE SELECT.  MODE SELECT has to deal with both
1114      * 2-byte and 4-byte headers.
1115      */
1116     switch (page) {
1117     case MODE_PAGE_HD_GEOMETRY:
1118         length = 0x16;
1119         if (page_control == 1) { /* Changeable Values */
1120             break;
1121         }
1122         /* if a geometry hint is available, use it */
1123         p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
1124         p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
1125         p[2] = s->qdev.conf.cyls & 0xff;
1126         p[3] = s->qdev.conf.heads & 0xff;
1127         /* Write precomp start cylinder, disabled */
1128         p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
1129         p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
1130         p[6] = s->qdev.conf.cyls & 0xff;
1131         /* Reduced current start cylinder, disabled */
1132         p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
1133         p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1134         p[9] = s->qdev.conf.cyls & 0xff;
1135         /* Device step rate [ns], 200ns */
1136         p[10] = 0;
1137         p[11] = 200;
1138         /* Landing zone cylinder */
1139         p[12] = 0xff;
1140         p[13] =  0xff;
1141         p[14] = 0xff;
1142         /* Medium rotation rate [rpm], 5400 rpm */
1143         p[18] = (5400 >> 8) & 0xff;
1144         p[19] = 5400 & 0xff;
1145         break;
1146 
1147     case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
1148         length = 0x1e;
1149         if (page_control == 1) { /* Changeable Values */
1150             break;
1151         }
1152         /* Transfer rate [kbit/s], 5Mbit/s */
1153         p[0] = 5000 >> 8;
1154         p[1] = 5000 & 0xff;
1155         /* if a geometry hint is available, use it */
1156         p[2] = s->qdev.conf.heads & 0xff;
1157         p[3] = s->qdev.conf.secs & 0xff;
1158         p[4] = s->qdev.blocksize >> 8;
1159         p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
1160         p[7] = s->qdev.conf.cyls & 0xff;
1161         /* Write precomp start cylinder, disabled */
1162         p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
1163         p[9] = s->qdev.conf.cyls & 0xff;
1164         /* Reduced current start cylinder, disabled */
1165         p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
1166         p[11] = s->qdev.conf.cyls & 0xff;
1167         /* Device step rate [100us], 100us */
1168         p[12] = 0;
1169         p[13] = 1;
1170         /* Device step pulse width [us], 1us */
1171         p[14] = 1;
1172         /* Device head settle delay [100us], 100us */
1173         p[15] = 0;
1174         p[16] = 1;
1175         /* Motor on delay [0.1s], 0.1s */
1176         p[17] = 1;
1177         /* Motor off delay [0.1s], 0.1s */
1178         p[18] = 1;
1179         /* Medium rotation rate [rpm], 5400 rpm */
1180         p[26] = (5400 >> 8) & 0xff;
1181         p[27] = 5400 & 0xff;
1182         break;
1183 
1184     case MODE_PAGE_CACHING:
1185         length = 0x12;
1186         if (page_control == 1 || /* Changeable Values */
1187             blk_enable_write_cache(s->qdev.conf.blk)) {
1188             p[0] = 4; /* WCE */
1189         }
1190         break;
1191 
1192     case MODE_PAGE_R_W_ERROR:
1193         length = 10;
1194         if (page_control == 1) { /* Changeable Values */
1195             if (s->qdev.type == TYPE_ROM) {
1196                 /* Automatic Write Reallocation Enabled */
1197                 p[0] = 0x80;
1198             }
1199             break;
1200         }
1201         p[0] = 0x80; /* Automatic Write Reallocation Enabled */
1202         if (s->qdev.type == TYPE_ROM) {
1203             p[1] = 0x20; /* Read Retry Count */
1204         }
1205         break;
1206 
1207     case MODE_PAGE_AUDIO_CTL:
1208         length = 14;
1209         break;
1210 
1211     case MODE_PAGE_CAPABILITIES:
1212         length = 0x14;
1213         if (page_control == 1) { /* Changeable Values */
1214             break;
1215         }
1216 
1217         p[0] = 0x3b; /* CD-R & CD-RW read */
1218         p[1] = 0; /* Writing not supported */
1219         p[2] = 0x7f; /* Audio, composite, digital out,
1220                         mode 2 form 1&2, multi session */
1221         p[3] = 0xff; /* CD DA, DA accurate, RW supported,
1222                         RW corrected, C2 errors, ISRC,
1223                         UPC, Bar code */
1224         p[4] = 0x2d | (s->tray_locked ? 2 : 0);
1225         /* Locking supported, jumper present, eject, tray */
1226         p[5] = 0; /* no volume & mute control, no
1227                      changer */
1228         p[6] = (50 * 176) >> 8; /* 50x read speed */
1229         p[7] = (50 * 176) & 0xff;
1230         p[8] = 2 >> 8; /* Two volume levels */
1231         p[9] = 2 & 0xff;
1232         p[10] = 2048 >> 8; /* 2M buffer */
1233         p[11] = 2048 & 0xff;
1234         p[12] = (16 * 176) >> 8; /* 16x read speed current */
1235         p[13] = (16 * 176) & 0xff;
1236         p[16] = (16 * 176) >> 8; /* 16x write speed */
1237         p[17] = (16 * 176) & 0xff;
1238         p[18] = (16 * 176) >> 8; /* 16x write speed current */
1239         p[19] = (16 * 176) & 0xff;
1240         break;
1241 
1242      case MODE_PAGE_APPLE_VENDOR:
1243         if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR)) {
1244             length = 0x1e;
1245             if (page_control == 1) { /* Changeable Values */
1246                 break;
1247             }
1248 
1249             memset(p, 0, length);
1250             strcpy((char *)p + 8, "APPLE COMPUTER, INC   ");
1251             break;
1252         } else {
1253             return -1;
1254         }
1255 
1256     case MODE_PAGE_VENDOR_SPECIFIC:
1257         if (s->qdev.type == TYPE_DISK && (s->quirks &
1258             (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) {
1259             length = 0x2;
1260             if (page_control == 1) { /* Changeable Values */
1261                 p[0] = 0xff;
1262                 p[1] = 0xff;
1263                 break;
1264             }
1265             p[0] = 0;
1266             p[1] = 0;
1267             break;
1268         } else {
1269             return -1;
1270         }
1271 
1272     default:
1273         return -1;
1274     }
1275 
1276     assert(length < 256);
1277     (*p_outbuf)[0] = page;
1278     (*p_outbuf)[1] = length;
1279     *p_outbuf += length + 2;
1280     return length + 2;
1281 }
1282 
1283 static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
1284 {
1285     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1286     uint64_t nb_sectors;
1287     bool dbd;
1288     int page, buflen, ret, page_control;
1289     uint8_t *p;
1290     uint8_t dev_specific_param;
1291 
1292     dbd = (r->req.cmd.buf[1] & 0x8) != 0;
1293     page = r->req.cmd.buf[2] & 0x3f;
1294     page_control = (r->req.cmd.buf[2] & 0xc0) >> 6;
1295 
1296     trace_scsi_disk_emulate_mode_sense((r->req.cmd.buf[0] == MODE_SENSE) ? 6 :
1297                                        10, page, r->req.cmd.xfer, page_control);
1298     memset(outbuf, 0, r->req.cmd.xfer);
1299     p = outbuf;
1300 
1301     if (s->qdev.type == TYPE_DISK) {
1302         dev_specific_param = s->features & (1 << SCSI_DISK_F_DPOFUA) ? 0x10 : 0;
1303         if (!blk_is_writable(s->qdev.conf.blk)) {
1304             dev_specific_param |= 0x80; /* Readonly.  */
1305         }
1306     } else {
1307         if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD)) {
1308             /* Use DBD from the request... */
1309             dev_specific_param = 0x00;
1310 
1311             /*
1312              * ... unless we receive a request for MODE_PAGE_APPLE_VENDOR
1313              * which should never return a block descriptor even though DBD is
1314              * not set, otherwise CDROM detection fails in MacOS
1315              */
1316             if (s->quirks & (1 << SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR) &&
1317                 page == MODE_PAGE_APPLE_VENDOR) {
1318                 dbd = true;
1319             }
1320         } else {
1321             /*
1322              * MMC prescribes that CD/DVD drives have no block descriptors,
1323              * and defines no device-specific parameter.
1324              */
1325             dev_specific_param = 0x00;
1326             dbd = true;
1327         }
1328     }
1329 
1330     if (r->req.cmd.buf[0] == MODE_SENSE) {
1331         p[1] = 0; /* Default media type.  */
1332         p[2] = dev_specific_param;
1333         p[3] = 0; /* Block descriptor length.  */
1334         p += 4;
1335     } else { /* MODE_SENSE_10 */
1336         p[2] = 0; /* Default media type.  */
1337         p[3] = dev_specific_param;
1338         p[6] = p[7] = 0; /* Block descriptor length.  */
1339         p += 8;
1340     }
1341 
1342     blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1343     if (!dbd && nb_sectors) {
1344         if (r->req.cmd.buf[0] == MODE_SENSE) {
1345             outbuf[3] = 8; /* Block descriptor length  */
1346         } else { /* MODE_SENSE_10 */
1347             outbuf[7] = 8; /* Block descriptor length  */
1348         }
1349         nb_sectors /= (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1350         if (nb_sectors > 0xffffff) {
1351             nb_sectors = 0;
1352         }
1353         p[0] = 0; /* media density code */
1354         p[1] = (nb_sectors >> 16) & 0xff;
1355         p[2] = (nb_sectors >> 8) & 0xff;
1356         p[3] = nb_sectors & 0xff;
1357         p[4] = 0; /* reserved */
1358         p[5] = 0; /* bytes 5-7 are the sector size in bytes */
1359         p[6] = s->qdev.blocksize >> 8;
1360         p[7] = 0;
1361         p += 8;
1362     }
1363 
1364     if (page_control == 3) {
1365         /* Saved Values */
1366         scsi_check_condition(r, SENSE_CODE(SAVING_PARAMS_NOT_SUPPORTED));
1367         return -1;
1368     }
1369 
1370     if (page == 0x3f) {
1371         for (page = 0; page <= 0x3e; page++) {
1372             mode_sense_page(s, page, &p, page_control);
1373         }
1374     } else {
1375         ret = mode_sense_page(s, page, &p, page_control);
1376         if (ret == -1) {
1377             return -1;
1378         }
1379     }
1380 
1381     buflen = p - outbuf;
1382     /*
1383      * The mode data length field specifies the length in bytes of the
1384      * following data that is available to be transferred. The mode data
1385      * length does not include itself.
1386      */
1387     if (r->req.cmd.buf[0] == MODE_SENSE) {
1388         outbuf[0] = buflen - 1;
1389     } else { /* MODE_SENSE_10 */
1390         outbuf[0] = ((buflen - 2) >> 8) & 0xff;
1391         outbuf[1] = (buflen - 2) & 0xff;
1392     }
1393     return buflen;
1394 }
1395 
1396 static int scsi_disk_emulate_read_toc(SCSIRequest *req, uint8_t *outbuf)
1397 {
1398     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1399     int start_track, format, msf, toclen;
1400     uint64_t nb_sectors;
1401 
1402     msf = req->cmd.buf[1] & 2;
1403     format = req->cmd.buf[2] & 0xf;
1404     start_track = req->cmd.buf[6];
1405     blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
1406     trace_scsi_disk_emulate_read_toc(start_track, format, msf >> 1);
1407     nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
1408     switch (format) {
1409     case 0:
1410         toclen = cdrom_read_toc(nb_sectors, outbuf, msf, start_track);
1411         break;
1412     case 1:
1413         /* multi session : only a single session defined */
1414         toclen = 12;
1415         memset(outbuf, 0, 12);
1416         outbuf[1] = 0x0a;
1417         outbuf[2] = 0x01;
1418         outbuf[3] = 0x01;
1419         break;
1420     case 2:
1421         toclen = cdrom_read_toc_raw(nb_sectors, outbuf, msf, start_track);
1422         break;
1423     default:
1424         return -1;
1425     }
1426     return toclen;
1427 }
1428 
1429 static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
1430 {
1431     SCSIRequest *req = &r->req;
1432     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1433     bool start = req->cmd.buf[4] & 1;
1434     bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
1435     int pwrcnd = req->cmd.buf[4] & 0xf0;
1436 
1437     if (pwrcnd) {
1438         /* eject/load only happens for power condition == 0 */
1439         return 0;
1440     }
1441 
1442     if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
1443         if (!start && !s->tray_open && s->tray_locked) {
1444             scsi_check_condition(r,
1445                                  blk_is_inserted(s->qdev.conf.blk)
1446                                  ? SENSE_CODE(ILLEGAL_REQ_REMOVAL_PREVENTED)
1447                                  : SENSE_CODE(NOT_READY_REMOVAL_PREVENTED));
1448             return -1;
1449         }
1450 
1451         if (s->tray_open != !start) {
1452             blk_eject(s->qdev.conf.blk, !start);
1453             s->tray_open = !start;
1454         }
1455     }
1456     return 0;
1457 }
1458 
1459 static void scsi_disk_emulate_read_data(SCSIRequest *req)
1460 {
1461     SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1462     int buflen = r->iov.iov_len;
1463 
1464     if (buflen) {
1465         trace_scsi_disk_emulate_read_data(buflen);
1466         r->iov.iov_len = 0;
1467         r->started = true;
1468         scsi_req_data(&r->req, buflen);
1469         return;
1470     }
1471 
1472     /* This also clears the sense buffer for REQUEST SENSE.  */
1473     scsi_req_complete(&r->req, GOOD);
1474 }
1475 
1476 static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
1477                                        uint8_t *inbuf, int inlen)
1478 {
1479     uint8_t mode_current[SCSI_MAX_MODE_LEN];
1480     uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
1481     uint8_t *p;
1482     int len, expected_len, changeable_len, i;
1483 
1484     /* The input buffer does not include the page header, so it is
1485      * off by 2 bytes.
1486      */
1487     expected_len = inlen + 2;
1488     if (expected_len > SCSI_MAX_MODE_LEN) {
1489         return -1;
1490     }
1491 
1492     /* MODE_PAGE_ALLS is only valid for MODE SENSE commands */
1493     if (page == MODE_PAGE_ALLS) {
1494         return -1;
1495     }
1496 
1497     p = mode_current;
1498     memset(mode_current, 0, inlen + 2);
1499     len = mode_sense_page(s, page, &p, 0);
1500     if (len < 0 || len != expected_len) {
1501         return -1;
1502     }
1503 
1504     p = mode_changeable;
1505     memset(mode_changeable, 0, inlen + 2);
1506     changeable_len = mode_sense_page(s, page, &p, 1);
1507     assert(changeable_len == len);
1508 
1509     /* Check that unchangeable bits are the same as what MODE SENSE
1510      * would return.
1511      */
1512     for (i = 2; i < len; i++) {
1513         if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
1514             return -1;
1515         }
1516     }
1517     return 0;
1518 }
1519 
1520 static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
1521 {
1522     switch (page) {
1523     case MODE_PAGE_CACHING:
1524         blk_set_enable_write_cache(s->qdev.conf.blk, (p[0] & 4) != 0);
1525         break;
1526 
1527     default:
1528         break;
1529     }
1530 }
1531 
1532 static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
1533 {
1534     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1535 
1536     while (len > 0) {
1537         int page, subpage, page_len;
1538 
1539         /* Parse both possible formats for the mode page headers.  */
1540         page = p[0] & 0x3f;
1541         if (p[0] & 0x40) {
1542             if (len < 4) {
1543                 goto invalid_param_len;
1544             }
1545             subpage = p[1];
1546             page_len = lduw_be_p(&p[2]);
1547             p += 4;
1548             len -= 4;
1549         } else {
1550             if (len < 2) {
1551                 goto invalid_param_len;
1552             }
1553             subpage = 0;
1554             page_len = p[1];
1555             p += 2;
1556             len -= 2;
1557         }
1558 
1559         if (subpage) {
1560             goto invalid_param;
1561         }
1562         if (page_len > len) {
1563             if (!(s->quirks & SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED)) {
1564                 goto invalid_param_len;
1565             }
1566             trace_scsi_disk_mode_select_page_truncated(page, page_len, len);
1567         }
1568 
1569         if (!change) {
1570             if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
1571                 goto invalid_param;
1572             }
1573         } else {
1574             scsi_disk_apply_mode_select(s, page, p);
1575         }
1576 
1577         p += page_len;
1578         len -= page_len;
1579     }
1580     return 0;
1581 
1582 invalid_param:
1583     scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1584     return -1;
1585 
1586 invalid_param_len:
1587     scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1588     return -1;
1589 }
1590 
1591 static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
1592 {
1593     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1594     uint8_t *p = inbuf;
1595     int cmd = r->req.cmd.buf[0];
1596     int len = r->req.cmd.xfer;
1597     int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
1598     int bd_len, bs;
1599     int pass;
1600 
1601     if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
1602         if (!(s->quirks &
1603             (1 << SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE))) {
1604             /* We only support PF=1, SP=0.  */
1605             goto invalid_field;
1606         }
1607     }
1608 
1609     if (len < hdr_len) {
1610         goto invalid_param_len;
1611     }
1612 
1613     bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
1614     len -= hdr_len;
1615     p += hdr_len;
1616     if (len < bd_len) {
1617         goto invalid_param_len;
1618     }
1619     if (bd_len != 0 && bd_len != 8) {
1620         goto invalid_param;
1621     }
1622 
1623     /* Allow changing the block size */
1624     if (bd_len) {
1625         bs = p[5] << 16 | p[6] << 8 | p[7];
1626 
1627         /*
1628          * Since the existing code only checks/updates bits 8-15 of the block
1629          * size, restrict ourselves to the same requirement for now to ensure
1630          * that a block size set by a block descriptor and then read back by
1631          * a subsequent SCSI command will be the same
1632          */
1633         if (bs && !(bs & ~0xff00) && bs != s->qdev.blocksize) {
1634             s->qdev.blocksize = bs;
1635             trace_scsi_disk_mode_select_set_blocksize(s->qdev.blocksize);
1636         }
1637     }
1638 
1639     len -= bd_len;
1640     p += bd_len;
1641 
1642     /* Ensure no change is made if there is an error!  */
1643     for (pass = 0; pass < 2; pass++) {
1644         if (mode_select_pages(r, p, len, pass == 1) < 0) {
1645             assert(pass == 0);
1646             return;
1647         }
1648     }
1649     if (!blk_enable_write_cache(s->qdev.conf.blk)) {
1650         /* The request is used as the AIO opaque value, so add a ref.  */
1651         scsi_req_ref(&r->req);
1652         block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
1653                          BLOCK_ACCT_FLUSH);
1654         r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
1655         return;
1656     }
1657 
1658     scsi_req_complete(&r->req, GOOD);
1659     return;
1660 
1661 invalid_param:
1662     scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
1663     return;
1664 
1665 invalid_param_len:
1666     scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1667     return;
1668 
1669 invalid_field:
1670     scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1671 }
1672 
1673 /* sector_num and nb_sectors expected to be in qdev blocksize */
1674 static inline bool check_lba_range(SCSIDiskState *s,
1675                                    uint64_t sector_num, uint32_t nb_sectors)
1676 {
1677     /*
1678      * The first line tests that no overflow happens when computing the last
1679      * sector.  The second line tests that the last accessed sector is in
1680      * range.
1681      *
1682      * Careful, the computations should not underflow for nb_sectors == 0,
1683      * and a 0-block read to the first LBA beyond the end of device is
1684      * valid.
1685      */
1686     return (sector_num <= sector_num + nb_sectors &&
1687             sector_num + nb_sectors <= s->qdev.max_lba + 1);
1688 }
1689 
1690 typedef struct UnmapCBData {
1691     SCSIDiskReq *r;
1692     uint8_t *inbuf;
1693     int count;
1694 } UnmapCBData;
1695 
1696 static void scsi_unmap_complete(void *opaque, int ret);
1697 
1698 static void scsi_unmap_complete_noio(UnmapCBData *data, int ret)
1699 {
1700     SCSIDiskReq *r = data->r;
1701     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1702 
1703     assert(r->req.aiocb == NULL);
1704 
1705     if (data->count > 0) {
1706         uint64_t sector_num = ldq_be_p(&data->inbuf[0]);
1707         uint32_t nb_sectors = ldl_be_p(&data->inbuf[8]) & 0xffffffffULL;
1708         r->sector = sector_num * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1709         r->sector_count = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1710 
1711         if (!check_lba_range(s, sector_num, nb_sectors)) {
1712             block_acct_invalid(blk_get_stats(s->qdev.conf.blk),
1713                                BLOCK_ACCT_UNMAP);
1714             scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1715             goto done;
1716         }
1717 
1718         block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1719                          r->sector_count * BDRV_SECTOR_SIZE,
1720                          BLOCK_ACCT_UNMAP);
1721 
1722         r->req.aiocb = blk_aio_pdiscard(s->qdev.conf.blk,
1723                                         r->sector * BDRV_SECTOR_SIZE,
1724                                         r->sector_count * BDRV_SECTOR_SIZE,
1725                                         scsi_unmap_complete, data);
1726         data->count--;
1727         data->inbuf += 16;
1728         return;
1729     }
1730 
1731     scsi_req_complete(&r->req, GOOD);
1732 
1733 done:
1734     scsi_req_unref(&r->req);
1735     g_free(data);
1736 }
1737 
1738 static void scsi_unmap_complete(void *opaque, int ret)
1739 {
1740     UnmapCBData *data = opaque;
1741     SCSIDiskReq *r = data->r;
1742     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1743 
1744     aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1745 
1746     assert(r->req.aiocb != NULL);
1747     r->req.aiocb = NULL;
1748 
1749     if (scsi_disk_req_check_error(r, ret, true)) {
1750         scsi_req_unref(&r->req);
1751         g_free(data);
1752     } else {
1753         block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1754         scsi_unmap_complete_noio(data, ret);
1755     }
1756     aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1757 }
1758 
1759 static void scsi_disk_emulate_unmap(SCSIDiskReq *r, uint8_t *inbuf)
1760 {
1761     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1762     uint8_t *p = inbuf;
1763     int len = r->req.cmd.xfer;
1764     UnmapCBData *data;
1765 
1766     /* Reject ANCHOR=1.  */
1767     if (r->req.cmd.buf[1] & 0x1) {
1768         goto invalid_field;
1769     }
1770 
1771     if (len < 8) {
1772         goto invalid_param_len;
1773     }
1774     if (len < lduw_be_p(&p[0]) + 2) {
1775         goto invalid_param_len;
1776     }
1777     if (len < lduw_be_p(&p[2]) + 8) {
1778         goto invalid_param_len;
1779     }
1780     if (lduw_be_p(&p[2]) & 15) {
1781         goto invalid_param_len;
1782     }
1783 
1784     if (!blk_is_writable(s->qdev.conf.blk)) {
1785         block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1786         scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1787         return;
1788     }
1789 
1790     data = g_new0(UnmapCBData, 1);
1791     data->r = r;
1792     data->inbuf = &p[8];
1793     data->count = lduw_be_p(&p[2]) >> 4;
1794 
1795     /* The matching unref is in scsi_unmap_complete, before data is freed.  */
1796     scsi_req_ref(&r->req);
1797     scsi_unmap_complete_noio(data, 0);
1798     return;
1799 
1800 invalid_param_len:
1801     block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1802     scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
1803     return;
1804 
1805 invalid_field:
1806     block_acct_invalid(blk_get_stats(s->qdev.conf.blk), BLOCK_ACCT_UNMAP);
1807     scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1808 }
1809 
1810 typedef struct WriteSameCBData {
1811     SCSIDiskReq *r;
1812     int64_t sector;
1813     int nb_sectors;
1814     QEMUIOVector qiov;
1815     struct iovec iov;
1816 } WriteSameCBData;
1817 
1818 static void scsi_write_same_complete(void *opaque, int ret)
1819 {
1820     WriteSameCBData *data = opaque;
1821     SCSIDiskReq *r = data->r;
1822     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
1823 
1824     aio_context_acquire(blk_get_aio_context(s->qdev.conf.blk));
1825 
1826     assert(r->req.aiocb != NULL);
1827     r->req.aiocb = NULL;
1828 
1829     if (scsi_disk_req_check_error(r, ret, true)) {
1830         goto done;
1831     }
1832 
1833     block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct);
1834 
1835     data->nb_sectors -= data->iov.iov_len / BDRV_SECTOR_SIZE;
1836     data->sector += data->iov.iov_len / BDRV_SECTOR_SIZE;
1837     data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1838                             data->iov.iov_len);
1839     if (data->iov.iov_len) {
1840         block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1841                          data->iov.iov_len, BLOCK_ACCT_WRITE);
1842         /* Reinitialize qiov, to handle unaligned WRITE SAME request
1843          * where final qiov may need smaller size */
1844         qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1845         r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1846                                        data->sector << BDRV_SECTOR_BITS,
1847                                        &data->qiov, 0,
1848                                        scsi_write_same_complete, data);
1849         aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1850         return;
1851     }
1852 
1853     scsi_req_complete(&r->req, GOOD);
1854 
1855 done:
1856     scsi_req_unref(&r->req);
1857     qemu_vfree(data->iov.iov_base);
1858     g_free(data);
1859     aio_context_release(blk_get_aio_context(s->qdev.conf.blk));
1860 }
1861 
1862 static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf)
1863 {
1864     SCSIRequest *req = &r->req;
1865     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1866     uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf);
1867     WriteSameCBData *data;
1868     uint8_t *buf;
1869     int i, l;
1870 
1871     /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1.  */
1872     if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) {
1873         scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1874         return;
1875     }
1876 
1877     if (!blk_is_writable(s->qdev.conf.blk)) {
1878         scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
1879         return;
1880     }
1881     if (!check_lba_range(s, r->req.cmd.lba, nb_sectors)) {
1882         scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
1883         return;
1884     }
1885 
1886     if ((req->cmd.buf[1] & 0x1) || buffer_is_zero(inbuf, s->qdev.blocksize)) {
1887         int flags = (req->cmd.buf[1] & 0x8) ? BDRV_REQ_MAY_UNMAP : 0;
1888 
1889         /* The request is used as the AIO opaque value, so add a ref.  */
1890         scsi_req_ref(&r->req);
1891         block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1892                          nb_sectors * s->qdev.blocksize,
1893                         BLOCK_ACCT_WRITE);
1894         r->req.aiocb = blk_aio_pwrite_zeroes(s->qdev.conf.blk,
1895                                 r->req.cmd.lba * s->qdev.blocksize,
1896                                 nb_sectors * s->qdev.blocksize,
1897                                 flags, scsi_aio_complete, r);
1898         return;
1899     }
1900 
1901     data = g_new0(WriteSameCBData, 1);
1902     data->r = r;
1903     data->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1904     data->nb_sectors = nb_sectors * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
1905     data->iov.iov_len = MIN(data->nb_sectors * BDRV_SECTOR_SIZE,
1906                             SCSI_WRITE_SAME_MAX);
1907     data->iov.iov_base = buf = blk_blockalign(s->qdev.conf.blk,
1908                                               data->iov.iov_len);
1909     qemu_iovec_init_external(&data->qiov, &data->iov, 1);
1910 
1911     for (i = 0; i < data->iov.iov_len; i += l) {
1912         l = MIN(s->qdev.blocksize, data->iov.iov_len - i);
1913         memcpy(&buf[i], inbuf, l);
1914     }
1915 
1916     scsi_req_ref(&r->req);
1917     block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct,
1918                      data->iov.iov_len, BLOCK_ACCT_WRITE);
1919     r->req.aiocb = blk_aio_pwritev(s->qdev.conf.blk,
1920                                    data->sector << BDRV_SECTOR_BITS,
1921                                    &data->qiov, 0,
1922                                    scsi_write_same_complete, data);
1923 }
1924 
1925 static void scsi_disk_emulate_write_data(SCSIRequest *req)
1926 {
1927     SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1928 
1929     if (r->iov.iov_len) {
1930         int buflen = r->iov.iov_len;
1931         trace_scsi_disk_emulate_write_data(buflen);
1932         r->iov.iov_len = 0;
1933         scsi_req_data(&r->req, buflen);
1934         return;
1935     }
1936 
1937     switch (req->cmd.buf[0]) {
1938     case MODE_SELECT:
1939     case MODE_SELECT_10:
1940         /* This also clears the sense buffer for REQUEST SENSE.  */
1941         scsi_disk_emulate_mode_select(r, r->iov.iov_base);
1942         break;
1943 
1944     case UNMAP:
1945         scsi_disk_emulate_unmap(r, r->iov.iov_base);
1946         break;
1947 
1948     case VERIFY_10:
1949     case VERIFY_12:
1950     case VERIFY_16:
1951         if (r->req.status == -1) {
1952             scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
1953         }
1954         break;
1955 
1956     case WRITE_SAME_10:
1957     case WRITE_SAME_16:
1958         scsi_disk_emulate_write_same(r, r->iov.iov_base);
1959         break;
1960 
1961     default:
1962         abort();
1963     }
1964 }
1965 
1966 static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
1967 {
1968     SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
1969     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
1970     uint64_t nb_sectors;
1971     uint8_t *outbuf;
1972     int buflen;
1973 
1974     switch (req->cmd.buf[0]) {
1975     case INQUIRY:
1976     case MODE_SENSE:
1977     case MODE_SENSE_10:
1978     case RESERVE:
1979     case RESERVE_10:
1980     case RELEASE:
1981     case RELEASE_10:
1982     case START_STOP:
1983     case ALLOW_MEDIUM_REMOVAL:
1984     case GET_CONFIGURATION:
1985     case GET_EVENT_STATUS_NOTIFICATION:
1986     case MECHANISM_STATUS:
1987     case REQUEST_SENSE:
1988         break;
1989 
1990     default:
1991         if (!blk_is_available(s->qdev.conf.blk)) {
1992             scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
1993             return 0;
1994         }
1995         break;
1996     }
1997 
1998     /*
1999      * FIXME: we shouldn't return anything bigger than 4k, but the code
2000      * requires the buffer to be as big as req->cmd.xfer in several
2001      * places.  So, do not allow CDBs with a very large ALLOCATION
2002      * LENGTH.  The real fix would be to modify scsi_read_data and
2003      * dma_buf_read, so that they return data beyond the buflen
2004      * as all zeros.
2005      */
2006     if (req->cmd.xfer > 65536) {
2007         goto illegal_request;
2008     }
2009     r->buflen = MAX(4096, req->cmd.xfer);
2010 
2011     if (!r->iov.iov_base) {
2012         r->iov.iov_base = blk_blockalign(s->qdev.conf.blk, r->buflen);
2013     }
2014 
2015     outbuf = r->iov.iov_base;
2016     memset(outbuf, 0, r->buflen);
2017     switch (req->cmd.buf[0]) {
2018     case TEST_UNIT_READY:
2019         assert(blk_is_available(s->qdev.conf.blk));
2020         break;
2021     case INQUIRY:
2022         buflen = scsi_disk_emulate_inquiry(req, outbuf);
2023         if (buflen < 0) {
2024             goto illegal_request;
2025         }
2026         break;
2027     case MODE_SENSE:
2028     case MODE_SENSE_10:
2029         buflen = scsi_disk_emulate_mode_sense(r, outbuf);
2030         if (buflen < 0) {
2031             goto illegal_request;
2032         }
2033         break;
2034     case READ_TOC:
2035         buflen = scsi_disk_emulate_read_toc(req, outbuf);
2036         if (buflen < 0) {
2037             goto illegal_request;
2038         }
2039         break;
2040     case RESERVE:
2041         if (req->cmd.buf[1] & 1) {
2042             goto illegal_request;
2043         }
2044         break;
2045     case RESERVE_10:
2046         if (req->cmd.buf[1] & 3) {
2047             goto illegal_request;
2048         }
2049         break;
2050     case RELEASE:
2051         if (req->cmd.buf[1] & 1) {
2052             goto illegal_request;
2053         }
2054         break;
2055     case RELEASE_10:
2056         if (req->cmd.buf[1] & 3) {
2057             goto illegal_request;
2058         }
2059         break;
2060     case START_STOP:
2061         if (scsi_disk_emulate_start_stop(r) < 0) {
2062             return 0;
2063         }
2064         break;
2065     case ALLOW_MEDIUM_REMOVAL:
2066         s->tray_locked = req->cmd.buf[4] & 1;
2067         blk_lock_medium(s->qdev.conf.blk, req->cmd.buf[4] & 1);
2068         break;
2069     case READ_CAPACITY_10:
2070         /* The normal LEN field for this command is zero.  */
2071         memset(outbuf, 0, 8);
2072         blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2073         if (!nb_sectors) {
2074             scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2075             return 0;
2076         }
2077         if ((req->cmd.buf[8] & 1) == 0 && req->cmd.lba) {
2078             goto illegal_request;
2079         }
2080         nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2081         /* Returned value is the address of the last sector.  */
2082         nb_sectors--;
2083         /* Remember the new size for read/write sanity checking. */
2084         s->qdev.max_lba = nb_sectors;
2085         /* Clip to 2TB, instead of returning capacity modulo 2TB. */
2086         if (nb_sectors > UINT32_MAX) {
2087             nb_sectors = UINT32_MAX;
2088         }
2089         outbuf[0] = (nb_sectors >> 24) & 0xff;
2090         outbuf[1] = (nb_sectors >> 16) & 0xff;
2091         outbuf[2] = (nb_sectors >> 8) & 0xff;
2092         outbuf[3] = nb_sectors & 0xff;
2093         outbuf[4] = 0;
2094         outbuf[5] = 0;
2095         outbuf[6] = s->qdev.blocksize >> 8;
2096         outbuf[7] = 0;
2097         break;
2098     case REQUEST_SENSE:
2099         /* Just return "NO SENSE".  */
2100         buflen = scsi_convert_sense(NULL, 0, outbuf, r->buflen,
2101                                     (req->cmd.buf[1] & 1) == 0);
2102         if (buflen < 0) {
2103             goto illegal_request;
2104         }
2105         break;
2106     case MECHANISM_STATUS:
2107         buflen = scsi_emulate_mechanism_status(s, outbuf);
2108         if (buflen < 0) {
2109             goto illegal_request;
2110         }
2111         break;
2112     case GET_CONFIGURATION:
2113         buflen = scsi_get_configuration(s, outbuf);
2114         if (buflen < 0) {
2115             goto illegal_request;
2116         }
2117         break;
2118     case GET_EVENT_STATUS_NOTIFICATION:
2119         buflen = scsi_get_event_status_notification(s, r, outbuf);
2120         if (buflen < 0) {
2121             goto illegal_request;
2122         }
2123         break;
2124     case READ_DISC_INFORMATION:
2125         buflen = scsi_read_disc_information(s, r, outbuf);
2126         if (buflen < 0) {
2127             goto illegal_request;
2128         }
2129         break;
2130     case READ_DVD_STRUCTURE:
2131         buflen = scsi_read_dvd_structure(s, r, outbuf);
2132         if (buflen < 0) {
2133             goto illegal_request;
2134         }
2135         break;
2136     case SERVICE_ACTION_IN_16:
2137         /* Service Action In subcommands. */
2138         if ((req->cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) {
2139             trace_scsi_disk_emulate_command_SAI_16();
2140             memset(outbuf, 0, req->cmd.xfer);
2141             blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2142             if (!nb_sectors) {
2143                 scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
2144                 return 0;
2145             }
2146             if ((req->cmd.buf[14] & 1) == 0 && req->cmd.lba) {
2147                 goto illegal_request;
2148             }
2149             nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2150             /* Returned value is the address of the last sector.  */
2151             nb_sectors--;
2152             /* Remember the new size for read/write sanity checking. */
2153             s->qdev.max_lba = nb_sectors;
2154             outbuf[0] = (nb_sectors >> 56) & 0xff;
2155             outbuf[1] = (nb_sectors >> 48) & 0xff;
2156             outbuf[2] = (nb_sectors >> 40) & 0xff;
2157             outbuf[3] = (nb_sectors >> 32) & 0xff;
2158             outbuf[4] = (nb_sectors >> 24) & 0xff;
2159             outbuf[5] = (nb_sectors >> 16) & 0xff;
2160             outbuf[6] = (nb_sectors >> 8) & 0xff;
2161             outbuf[7] = nb_sectors & 0xff;
2162             outbuf[8] = 0;
2163             outbuf[9] = 0;
2164             outbuf[10] = s->qdev.blocksize >> 8;
2165             outbuf[11] = 0;
2166             outbuf[12] = 0;
2167             outbuf[13] = get_physical_block_exp(&s->qdev.conf);
2168 
2169             /* set TPE bit if the format supports discard */
2170             if (s->qdev.conf.discard_granularity) {
2171                 outbuf[14] = 0x80;
2172             }
2173 
2174             /* Protection, exponent and lowest lba field left blank. */
2175             break;
2176         }
2177         trace_scsi_disk_emulate_command_SAI_unsupported();
2178         goto illegal_request;
2179     case SYNCHRONIZE_CACHE:
2180         /* The request is used as the AIO opaque value, so add a ref.  */
2181         scsi_req_ref(&r->req);
2182         block_acct_start(blk_get_stats(s->qdev.conf.blk), &r->acct, 0,
2183                          BLOCK_ACCT_FLUSH);
2184         r->req.aiocb = blk_aio_flush(s->qdev.conf.blk, scsi_aio_complete, r);
2185         return 0;
2186     case SEEK_10:
2187         trace_scsi_disk_emulate_command_SEEK_10(r->req.cmd.lba);
2188         if (r->req.cmd.lba > s->qdev.max_lba) {
2189             goto illegal_lba;
2190         }
2191         break;
2192     case MODE_SELECT:
2193         trace_scsi_disk_emulate_command_MODE_SELECT(r->req.cmd.xfer);
2194         break;
2195     case MODE_SELECT_10:
2196         trace_scsi_disk_emulate_command_MODE_SELECT_10(r->req.cmd.xfer);
2197         break;
2198     case UNMAP:
2199         trace_scsi_disk_emulate_command_UNMAP(r->req.cmd.xfer);
2200         break;
2201     case VERIFY_10:
2202     case VERIFY_12:
2203     case VERIFY_16:
2204         trace_scsi_disk_emulate_command_VERIFY((req->cmd.buf[1] >> 1) & 3);
2205         if (req->cmd.buf[1] & 6) {
2206             goto illegal_request;
2207         }
2208         break;
2209     case WRITE_SAME_10:
2210     case WRITE_SAME_16:
2211         trace_scsi_disk_emulate_command_WRITE_SAME(
2212                 req->cmd.buf[0] == WRITE_SAME_10 ? 10 : 16, r->req.cmd.xfer);
2213         break;
2214     case FORMAT_UNIT:
2215         trace_scsi_disk_emulate_command_FORMAT_UNIT(r->req.cmd.xfer);
2216         break;
2217     default:
2218         trace_scsi_disk_emulate_command_UNKNOWN(buf[0],
2219                                                 scsi_command_name(buf[0]));
2220         scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
2221         return 0;
2222     }
2223     assert(!r->req.aiocb);
2224     r->iov.iov_len = MIN(r->buflen, req->cmd.xfer);
2225     if (r->iov.iov_len == 0) {
2226         scsi_req_complete(&r->req, GOOD);
2227     }
2228     if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2229         assert(r->iov.iov_len == req->cmd.xfer);
2230         return -r->iov.iov_len;
2231     } else {
2232         return r->iov.iov_len;
2233     }
2234 
2235 illegal_request:
2236     if (r->req.status == -1) {
2237         scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2238     }
2239     return 0;
2240 
2241 illegal_lba:
2242     scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2243     return 0;
2244 }
2245 
2246 /* Execute a scsi command.  Returns the length of the data expected by the
2247    command.  This will be Positive for data transfers from the device
2248    (eg. disk reads), negative for transfers to the device (eg. disk writes),
2249    and zero if the command does not transfer any data.  */
2250 
2251 static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
2252 {
2253     SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
2254     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2255     SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s));
2256     uint32_t len;
2257     uint8_t command;
2258 
2259     command = buf[0];
2260 
2261     if (!blk_is_available(s->qdev.conf.blk)) {
2262         scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
2263         return 0;
2264     }
2265 
2266     len = scsi_data_cdb_xfer(r->req.cmd.buf);
2267     switch (command) {
2268     case READ_6:
2269     case READ_10:
2270     case READ_12:
2271     case READ_16:
2272         trace_scsi_disk_dma_command_READ(r->req.cmd.lba, len);
2273         /* Protection information is not supported.  For SCSI versions 2 and
2274          * older (as determined by snooping the guest's INQUIRY commands),
2275          * there is no RD/WR/VRPROTECT, so skip this check in these versions.
2276          */
2277         if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2278             goto illegal_request;
2279         }
2280         if (!check_lba_range(s, r->req.cmd.lba, len)) {
2281             goto illegal_lba;
2282         }
2283         r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2284         r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2285         break;
2286     case WRITE_6:
2287     case WRITE_10:
2288     case WRITE_12:
2289     case WRITE_16:
2290     case WRITE_VERIFY_10:
2291     case WRITE_VERIFY_12:
2292     case WRITE_VERIFY_16:
2293         if (!blk_is_writable(s->qdev.conf.blk)) {
2294             scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
2295             return 0;
2296         }
2297         trace_scsi_disk_dma_command_WRITE(
2298                 (command & 0xe) == 0xe ? "And Verify " : "",
2299                 r->req.cmd.lba, len);
2300         /* fall through */
2301     case VERIFY_10:
2302     case VERIFY_12:
2303     case VERIFY_16:
2304         /* We get here only for BYTCHK == 0x01 and only for scsi-block.
2305          * As far as DMA is concerned, we can treat it the same as a write;
2306          * scsi_block_do_sgio will send VERIFY commands.
2307          */
2308         if (s->qdev.scsi_version > 2 && (r->req.cmd.buf[1] & 0xe0)) {
2309             goto illegal_request;
2310         }
2311         if (!check_lba_range(s, r->req.cmd.lba, len)) {
2312             goto illegal_lba;
2313         }
2314         r->sector = r->req.cmd.lba * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2315         r->sector_count = len * (s->qdev.blocksize / BDRV_SECTOR_SIZE);
2316         break;
2317     default:
2318         abort();
2319     illegal_request:
2320         scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
2321         return 0;
2322     illegal_lba:
2323         scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
2324         return 0;
2325     }
2326     r->need_fua_emulation = sdc->need_fua_emulation(&r->req.cmd);
2327     if (r->sector_count == 0) {
2328         scsi_req_complete(&r->req, GOOD);
2329     }
2330     assert(r->iov.iov_len == 0);
2331     if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
2332         return -r->sector_count * BDRV_SECTOR_SIZE;
2333     } else {
2334         return r->sector_count * BDRV_SECTOR_SIZE;
2335     }
2336 }
2337 
2338 static void scsi_disk_reset(DeviceState *dev)
2339 {
2340     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev.qdev, dev);
2341     uint64_t nb_sectors;
2342     AioContext *ctx;
2343 
2344     scsi_device_purge_requests(&s->qdev, SENSE_CODE(RESET));
2345 
2346     ctx = blk_get_aio_context(s->qdev.conf.blk);
2347     aio_context_acquire(ctx);
2348     blk_get_geometry(s->qdev.conf.blk, &nb_sectors);
2349     aio_context_release(ctx);
2350 
2351     nb_sectors /= s->qdev.blocksize / BDRV_SECTOR_SIZE;
2352     if (nb_sectors) {
2353         nb_sectors--;
2354     }
2355     s->qdev.max_lba = nb_sectors;
2356     /* reset tray statuses */
2357     s->tray_locked = 0;
2358     s->tray_open = 0;
2359 
2360     s->qdev.scsi_version = s->qdev.default_scsi_version;
2361 }
2362 
2363 static void scsi_disk_resize_cb(void *opaque)
2364 {
2365     SCSIDiskState *s = opaque;
2366 
2367     /* SPC lists this sense code as available only for
2368      * direct-access devices.
2369      */
2370     if (s->qdev.type == TYPE_DISK) {
2371         scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
2372     }
2373 }
2374 
2375 static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp)
2376 {
2377     SCSIDiskState *s = opaque;
2378 
2379     /*
2380      * When a CD gets changed, we have to report an ejected state and
2381      * then a loaded state to guests so that they detect tray
2382      * open/close and media change events.  Guests that do not use
2383      * GET_EVENT_STATUS_NOTIFICATION to detect such tray open/close
2384      * states rely on this behavior.
2385      *
2386      * media_changed governs the state machine used for unit attention
2387      * report.  media_event is used by GET EVENT STATUS NOTIFICATION.
2388      */
2389     s->media_changed = load;
2390     s->tray_open = !load;
2391     scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
2392     s->media_event = true;
2393     s->eject_request = false;
2394 }
2395 
2396 static void scsi_cd_eject_request_cb(void *opaque, bool force)
2397 {
2398     SCSIDiskState *s = opaque;
2399 
2400     s->eject_request = true;
2401     if (force) {
2402         s->tray_locked = false;
2403     }
2404 }
2405 
2406 static bool scsi_cd_is_tray_open(void *opaque)
2407 {
2408     return ((SCSIDiskState *)opaque)->tray_open;
2409 }
2410 
2411 static bool scsi_cd_is_medium_locked(void *opaque)
2412 {
2413     return ((SCSIDiskState *)opaque)->tray_locked;
2414 }
2415 
2416 static const BlockDevOps scsi_disk_removable_block_ops = {
2417     .change_media_cb = scsi_cd_change_media_cb,
2418     .eject_request_cb = scsi_cd_eject_request_cb,
2419     .is_tray_open = scsi_cd_is_tray_open,
2420     .is_medium_locked = scsi_cd_is_medium_locked,
2421 
2422     .resize_cb = scsi_disk_resize_cb,
2423 };
2424 
2425 static const BlockDevOps scsi_disk_block_ops = {
2426     .resize_cb = scsi_disk_resize_cb,
2427 };
2428 
2429 static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
2430 {
2431     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2432     if (s->media_changed) {
2433         s->media_changed = false;
2434         scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
2435     }
2436 }
2437 
2438 static void scsi_realize(SCSIDevice *dev, Error **errp)
2439 {
2440     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2441     bool read_only;
2442 
2443     if (!s->qdev.conf.blk) {
2444         error_setg(errp, "drive property not set");
2445         return;
2446     }
2447 
2448     if (!(s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2449         !blk_is_inserted(s->qdev.conf.blk)) {
2450         error_setg(errp, "Device needs media, but drive is empty");
2451         return;
2452     }
2453 
2454     if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2455         return;
2456     }
2457 
2458     if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() &&
2459         !s->qdev.hba_supports_iothread)
2460     {
2461         error_setg(errp, "HBA does not support iothreads");
2462         return;
2463     }
2464 
2465     if (dev->type == TYPE_DISK) {
2466         if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) {
2467             return;
2468         }
2469     }
2470 
2471     read_only = !blk_supports_write_perm(s->qdev.conf.blk);
2472     if (dev->type == TYPE_ROM) {
2473         read_only = true;
2474     }
2475 
2476     if (!blkconf_apply_backend_options(&dev->conf, read_only,
2477                                        dev->type == TYPE_DISK, errp)) {
2478         return;
2479     }
2480 
2481     if (s->qdev.conf.discard_granularity == -1) {
2482         s->qdev.conf.discard_granularity =
2483             MAX(s->qdev.conf.logical_block_size, DEFAULT_DISCARD_GRANULARITY);
2484     }
2485 
2486     if (!s->version) {
2487         s->version = g_strdup(qemu_hw_version());
2488     }
2489     if (!s->vendor) {
2490         s->vendor = g_strdup("QEMU");
2491     }
2492     if (!s->device_id) {
2493         if (s->serial) {
2494             s->device_id = g_strdup_printf("%.20s", s->serial);
2495         } else {
2496             const char *str = blk_name(s->qdev.conf.blk);
2497             if (str && *str) {
2498                 s->device_id = g_strdup(str);
2499             }
2500         }
2501     }
2502 
2503     if (blk_is_sg(s->qdev.conf.blk)) {
2504         error_setg(errp, "unwanted /dev/sg*");
2505         return;
2506     }
2507 
2508     if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) &&
2509             !(s->features & (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS))) {
2510         blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_removable_block_ops, s);
2511     } else {
2512         blk_set_dev_ops(s->qdev.conf.blk, &scsi_disk_block_ops, s);
2513     }
2514 
2515     blk_iostatus_enable(s->qdev.conf.blk);
2516 
2517     add_boot_device_lchs(&dev->qdev, NULL,
2518                          dev->conf.lcyls,
2519                          dev->conf.lheads,
2520                          dev->conf.lsecs);
2521 }
2522 
2523 static void scsi_unrealize(SCSIDevice *dev)
2524 {
2525     del_boot_device_lchs(&dev->qdev, NULL);
2526 }
2527 
2528 static void scsi_hd_realize(SCSIDevice *dev, Error **errp)
2529 {
2530     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2531     AioContext *ctx = NULL;
2532     /* can happen for devices without drive. The error message for missing
2533      * backend will be issued in scsi_realize
2534      */
2535     if (s->qdev.conf.blk) {
2536         ctx = blk_get_aio_context(s->qdev.conf.blk);
2537         aio_context_acquire(ctx);
2538         if (!blkconf_blocksizes(&s->qdev.conf, errp)) {
2539             goto out;
2540         }
2541     }
2542     s->qdev.blocksize = s->qdev.conf.logical_block_size;
2543     s->qdev.type = TYPE_DISK;
2544     if (!s->product) {
2545         s->product = g_strdup("QEMU HARDDISK");
2546     }
2547     scsi_realize(&s->qdev, errp);
2548 out:
2549     if (ctx) {
2550         aio_context_release(ctx);
2551     }
2552 }
2553 
2554 static void scsi_cd_realize(SCSIDevice *dev, Error **errp)
2555 {
2556     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2557     AioContext *ctx;
2558     int ret;
2559     uint32_t blocksize = 2048;
2560 
2561     if (!dev->conf.blk) {
2562         /* Anonymous BlockBackend for an empty drive. As we put it into
2563          * dev->conf, qdev takes care of detaching on unplug. */
2564         dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL);
2565         ret = blk_attach_dev(dev->conf.blk, &dev->qdev);
2566         assert(ret == 0);
2567     }
2568 
2569     if (dev->conf.physical_block_size != 0) {
2570         blocksize = dev->conf.physical_block_size;
2571     }
2572 
2573     ctx = blk_get_aio_context(dev->conf.blk);
2574     aio_context_acquire(ctx);
2575     s->qdev.blocksize = blocksize;
2576     s->qdev.type = TYPE_ROM;
2577     s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2578     if (!s->product) {
2579         s->product = g_strdup("QEMU CD-ROM");
2580     }
2581     scsi_realize(&s->qdev, errp);
2582     aio_context_release(ctx);
2583 }
2584 
2585 
2586 static const SCSIReqOps scsi_disk_emulate_reqops = {
2587     .size         = sizeof(SCSIDiskReq),
2588     .free_req     = scsi_free_request,
2589     .send_command = scsi_disk_emulate_command,
2590     .read_data    = scsi_disk_emulate_read_data,
2591     .write_data   = scsi_disk_emulate_write_data,
2592     .get_buf      = scsi_get_buf,
2593 };
2594 
2595 static const SCSIReqOps scsi_disk_dma_reqops = {
2596     .size         = sizeof(SCSIDiskReq),
2597     .free_req     = scsi_free_request,
2598     .send_command = scsi_disk_dma_command,
2599     .read_data    = scsi_read_data,
2600     .write_data   = scsi_write_data,
2601     .get_buf      = scsi_get_buf,
2602     .load_request = scsi_disk_load_request,
2603     .save_request = scsi_disk_save_request,
2604 };
2605 
2606 static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
2607     [TEST_UNIT_READY]                 = &scsi_disk_emulate_reqops,
2608     [INQUIRY]                         = &scsi_disk_emulate_reqops,
2609     [MODE_SENSE]                      = &scsi_disk_emulate_reqops,
2610     [MODE_SENSE_10]                   = &scsi_disk_emulate_reqops,
2611     [START_STOP]                      = &scsi_disk_emulate_reqops,
2612     [ALLOW_MEDIUM_REMOVAL]            = &scsi_disk_emulate_reqops,
2613     [READ_CAPACITY_10]                = &scsi_disk_emulate_reqops,
2614     [READ_TOC]                        = &scsi_disk_emulate_reqops,
2615     [READ_DVD_STRUCTURE]              = &scsi_disk_emulate_reqops,
2616     [READ_DISC_INFORMATION]           = &scsi_disk_emulate_reqops,
2617     [GET_CONFIGURATION]               = &scsi_disk_emulate_reqops,
2618     [GET_EVENT_STATUS_NOTIFICATION]   = &scsi_disk_emulate_reqops,
2619     [MECHANISM_STATUS]                = &scsi_disk_emulate_reqops,
2620     [SERVICE_ACTION_IN_16]            = &scsi_disk_emulate_reqops,
2621     [REQUEST_SENSE]                   = &scsi_disk_emulate_reqops,
2622     [SYNCHRONIZE_CACHE]               = &scsi_disk_emulate_reqops,
2623     [SEEK_10]                         = &scsi_disk_emulate_reqops,
2624     [MODE_SELECT]                     = &scsi_disk_emulate_reqops,
2625     [MODE_SELECT_10]                  = &scsi_disk_emulate_reqops,
2626     [UNMAP]                           = &scsi_disk_emulate_reqops,
2627     [WRITE_SAME_10]                   = &scsi_disk_emulate_reqops,
2628     [WRITE_SAME_16]                   = &scsi_disk_emulate_reqops,
2629     [VERIFY_10]                       = &scsi_disk_emulate_reqops,
2630     [VERIFY_12]                       = &scsi_disk_emulate_reqops,
2631     [VERIFY_16]                       = &scsi_disk_emulate_reqops,
2632     [FORMAT_UNIT]                     = &scsi_disk_emulate_reqops,
2633 
2634     [READ_6]                          = &scsi_disk_dma_reqops,
2635     [READ_10]                         = &scsi_disk_dma_reqops,
2636     [READ_12]                         = &scsi_disk_dma_reqops,
2637     [READ_16]                         = &scsi_disk_dma_reqops,
2638     [WRITE_6]                         = &scsi_disk_dma_reqops,
2639     [WRITE_10]                        = &scsi_disk_dma_reqops,
2640     [WRITE_12]                        = &scsi_disk_dma_reqops,
2641     [WRITE_16]                        = &scsi_disk_dma_reqops,
2642     [WRITE_VERIFY_10]                 = &scsi_disk_dma_reqops,
2643     [WRITE_VERIFY_12]                 = &scsi_disk_dma_reqops,
2644     [WRITE_VERIFY_16]                 = &scsi_disk_dma_reqops,
2645 };
2646 
2647 static void scsi_disk_new_request_dump(uint32_t lun, uint32_t tag, uint8_t *buf)
2648 {
2649     int i;
2650     int len = scsi_cdb_length(buf);
2651     char *line_buffer, *p;
2652 
2653     assert(len > 0 && len <= 16);
2654     line_buffer = g_malloc(len * 5 + 1);
2655 
2656     for (i = 0, p = line_buffer; i < len; i++) {
2657         p += sprintf(p, " 0x%02x", buf[i]);
2658     }
2659     trace_scsi_disk_new_request(lun, tag, line_buffer);
2660 
2661     g_free(line_buffer);
2662 }
2663 
2664 static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
2665                                      uint8_t *buf, void *hba_private)
2666 {
2667     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
2668     SCSIRequest *req;
2669     const SCSIReqOps *ops;
2670     uint8_t command;
2671 
2672     command = buf[0];
2673     ops = scsi_disk_reqops_dispatch[command];
2674     if (!ops) {
2675         ops = &scsi_disk_emulate_reqops;
2676     }
2677     req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
2678 
2679     if (trace_event_get_state_backends(TRACE_SCSI_DISK_NEW_REQUEST)) {
2680         scsi_disk_new_request_dump(lun, tag, buf);
2681     }
2682 
2683     return req;
2684 }
2685 
2686 #ifdef __linux__
2687 static int get_device_type(SCSIDiskState *s)
2688 {
2689     uint8_t cmd[16];
2690     uint8_t buf[36];
2691     int ret;
2692 
2693     memset(cmd, 0, sizeof(cmd));
2694     memset(buf, 0, sizeof(buf));
2695     cmd[0] = INQUIRY;
2696     cmd[4] = sizeof(buf);
2697 
2698     ret = scsi_SG_IO_FROM_DEV(s->qdev.conf.blk, cmd, sizeof(cmd),
2699                               buf, sizeof(buf), s->qdev.io_timeout);
2700     if (ret < 0) {
2701         return -1;
2702     }
2703     s->qdev.type = buf[0];
2704     if (buf[1] & 0x80) {
2705         s->features |= 1 << SCSI_DISK_F_REMOVABLE;
2706     }
2707     return 0;
2708 }
2709 
2710 static void scsi_block_realize(SCSIDevice *dev, Error **errp)
2711 {
2712     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
2713     AioContext *ctx;
2714     int sg_version;
2715     int rc;
2716 
2717     if (!s->qdev.conf.blk) {
2718         error_setg(errp, "drive property not set");
2719         return;
2720     }
2721 
2722     if (s->rotation_rate) {
2723         error_report_once("rotation_rate is specified for scsi-block but is "
2724                           "not implemented. This option is deprecated and will "
2725                           "be removed in a future version");
2726     }
2727 
2728     ctx = blk_get_aio_context(s->qdev.conf.blk);
2729     aio_context_acquire(ctx);
2730 
2731     /* check we are using a driver managing SG_IO (version 3 and after) */
2732     rc = blk_ioctl(s->qdev.conf.blk, SG_GET_VERSION_NUM, &sg_version);
2733     if (rc < 0) {
2734         error_setg_errno(errp, -rc, "cannot get SG_IO version number");
2735         if (rc != -EPERM) {
2736             error_append_hint(errp, "Is this a SCSI device?\n");
2737         }
2738         goto out;
2739     }
2740     if (sg_version < 30000) {
2741         error_setg(errp, "scsi generic interface too old");
2742         goto out;
2743     }
2744 
2745     /* get device type from INQUIRY data */
2746     rc = get_device_type(s);
2747     if (rc < 0) {
2748         error_setg(errp, "INQUIRY failed");
2749         goto out;
2750     }
2751 
2752     /* Make a guess for the block size, we'll fix it when the guest sends.
2753      * READ CAPACITY.  If they don't, they likely would assume these sizes
2754      * anyway. (TODO: check in /sys).
2755      */
2756     if (s->qdev.type == TYPE_ROM || s->qdev.type == TYPE_WORM) {
2757         s->qdev.blocksize = 2048;
2758     } else {
2759         s->qdev.blocksize = 512;
2760     }
2761 
2762     /* Makes the scsi-block device not removable by using HMP and QMP eject
2763      * command.
2764      */
2765     s->features |= (1 << SCSI_DISK_F_NO_REMOVABLE_DEVOPS);
2766 
2767     scsi_realize(&s->qdev, errp);
2768     scsi_generic_read_device_inquiry(&s->qdev);
2769 
2770 out:
2771     aio_context_release(ctx);
2772 }
2773 
2774 typedef struct SCSIBlockReq {
2775     SCSIDiskReq req;
2776     sg_io_hdr_t io_header;
2777 
2778     /* Selected bytes of the original CDB, copied into our own CDB.  */
2779     uint8_t cmd, cdb1, group_number;
2780 
2781     /* CDB passed to SG_IO.  */
2782     uint8_t cdb[16];
2783     BlockCompletionFunc *cb;
2784     void *cb_opaque;
2785 } SCSIBlockReq;
2786 
2787 static void scsi_block_sgio_complete(void *opaque, int ret)
2788 {
2789     SCSIBlockReq *req = (SCSIBlockReq *)opaque;
2790     SCSIDiskReq *r = &req->req;
2791     SCSIDevice *s = r->req.dev;
2792     sg_io_hdr_t *io_hdr = &req->io_header;
2793 
2794     if (ret == 0) {
2795         if (io_hdr->host_status != SCSI_HOST_OK) {
2796             scsi_req_complete_failed(&r->req, io_hdr->host_status);
2797             scsi_req_unref(&r->req);
2798             return;
2799         }
2800 
2801         if (io_hdr->driver_status & SG_ERR_DRIVER_TIMEOUT) {
2802             ret = BUSY;
2803         } else {
2804             ret = io_hdr->status;
2805         }
2806 
2807         if (ret > 0) {
2808             aio_context_acquire(blk_get_aio_context(s->conf.blk));
2809             if (scsi_handle_rw_error(r, ret, true)) {
2810                 aio_context_release(blk_get_aio_context(s->conf.blk));
2811                 scsi_req_unref(&r->req);
2812                 return;
2813             }
2814             aio_context_release(blk_get_aio_context(s->conf.blk));
2815 
2816             /* Ignore error.  */
2817             ret = 0;
2818         }
2819     }
2820 
2821     req->cb(req->cb_opaque, ret);
2822 }
2823 
2824 static BlockAIOCB *scsi_block_do_sgio(SCSIBlockReq *req,
2825                                       int64_t offset, QEMUIOVector *iov,
2826                                       int direction,
2827                                       BlockCompletionFunc *cb, void *opaque)
2828 {
2829     sg_io_hdr_t *io_header = &req->io_header;
2830     SCSIDiskReq *r = &req->req;
2831     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
2832     int nb_logical_blocks;
2833     uint64_t lba;
2834     BlockAIOCB *aiocb;
2835 
2836     /* This is not supported yet.  It can only happen if the guest does
2837      * reads and writes that are not aligned to one logical sectors
2838      * _and_ cover multiple MemoryRegions.
2839      */
2840     assert(offset % s->qdev.blocksize == 0);
2841     assert(iov->size % s->qdev.blocksize == 0);
2842 
2843     io_header->interface_id = 'S';
2844 
2845     /* The data transfer comes from the QEMUIOVector.  */
2846     io_header->dxfer_direction = direction;
2847     io_header->dxfer_len = iov->size;
2848     io_header->dxferp = (void *)iov->iov;
2849     io_header->iovec_count = iov->niov;
2850     assert(io_header->iovec_count == iov->niov); /* no overflow! */
2851 
2852     /* Build a new CDB with the LBA and length patched in, in case
2853      * DMA helpers split the transfer in multiple segments.  Do not
2854      * build a CDB smaller than what the guest wanted, and only build
2855      * a larger one if strictly necessary.
2856      */
2857     io_header->cmdp = req->cdb;
2858     lba = offset / s->qdev.blocksize;
2859     nb_logical_blocks = io_header->dxfer_len / s->qdev.blocksize;
2860 
2861     if ((req->cmd >> 5) == 0 && lba <= 0x1ffff) {
2862         /* 6-byte CDB */
2863         stl_be_p(&req->cdb[0], lba | (req->cmd << 24));
2864         req->cdb[4] = nb_logical_blocks;
2865         req->cdb[5] = 0;
2866         io_header->cmd_len = 6;
2867     } else if ((req->cmd >> 5) <= 1 && lba <= 0xffffffffULL) {
2868         /* 10-byte CDB */
2869         req->cdb[0] = (req->cmd & 0x1f) | 0x20;
2870         req->cdb[1] = req->cdb1;
2871         stl_be_p(&req->cdb[2], lba);
2872         req->cdb[6] = req->group_number;
2873         stw_be_p(&req->cdb[7], nb_logical_blocks);
2874         req->cdb[9] = 0;
2875         io_header->cmd_len = 10;
2876     } else if ((req->cmd >> 5) != 4 && lba <= 0xffffffffULL) {
2877         /* 12-byte CDB */
2878         req->cdb[0] = (req->cmd & 0x1f) | 0xA0;
2879         req->cdb[1] = req->cdb1;
2880         stl_be_p(&req->cdb[2], lba);
2881         stl_be_p(&req->cdb[6], nb_logical_blocks);
2882         req->cdb[10] = req->group_number;
2883         req->cdb[11] = 0;
2884         io_header->cmd_len = 12;
2885     } else {
2886         /* 16-byte CDB */
2887         req->cdb[0] = (req->cmd & 0x1f) | 0x80;
2888         req->cdb[1] = req->cdb1;
2889         stq_be_p(&req->cdb[2], lba);
2890         stl_be_p(&req->cdb[10], nb_logical_blocks);
2891         req->cdb[14] = req->group_number;
2892         req->cdb[15] = 0;
2893         io_header->cmd_len = 16;
2894     }
2895 
2896     /* The rest is as in scsi-generic.c.  */
2897     io_header->mx_sb_len = sizeof(r->req.sense);
2898     io_header->sbp = r->req.sense;
2899     io_header->timeout = s->qdev.io_timeout * 1000;
2900     io_header->usr_ptr = r;
2901     io_header->flags |= SG_FLAG_DIRECT_IO;
2902     req->cb = cb;
2903     req->cb_opaque = opaque;
2904     trace_scsi_disk_aio_sgio_command(r->req.tag, req->cdb[0], lba,
2905                                      nb_logical_blocks, io_header->timeout);
2906     aiocb = blk_aio_ioctl(s->qdev.conf.blk, SG_IO, io_header, scsi_block_sgio_complete, req);
2907     assert(aiocb != NULL);
2908     return aiocb;
2909 }
2910 
2911 static bool scsi_block_no_fua(SCSICommand *cmd)
2912 {
2913     return false;
2914 }
2915 
2916 static BlockAIOCB *scsi_block_dma_readv(int64_t offset,
2917                                         QEMUIOVector *iov,
2918                                         BlockCompletionFunc *cb, void *cb_opaque,
2919                                         void *opaque)
2920 {
2921     SCSIBlockReq *r = opaque;
2922     return scsi_block_do_sgio(r, offset, iov,
2923                               SG_DXFER_FROM_DEV, cb, cb_opaque);
2924 }
2925 
2926 static BlockAIOCB *scsi_block_dma_writev(int64_t offset,
2927                                          QEMUIOVector *iov,
2928                                          BlockCompletionFunc *cb, void *cb_opaque,
2929                                          void *opaque)
2930 {
2931     SCSIBlockReq *r = opaque;
2932     return scsi_block_do_sgio(r, offset, iov,
2933                               SG_DXFER_TO_DEV, cb, cb_opaque);
2934 }
2935 
2936 static bool scsi_block_is_passthrough(SCSIDiskState *s, uint8_t *buf)
2937 {
2938     switch (buf[0]) {
2939     case VERIFY_10:
2940     case VERIFY_12:
2941     case VERIFY_16:
2942         /* Check if BYTCHK == 0x01 (data-out buffer contains data
2943          * for the number of logical blocks specified in the length
2944          * field).  For other modes, do not use scatter/gather operation.
2945          */
2946         if ((buf[1] & 6) == 2) {
2947             return false;
2948         }
2949         break;
2950 
2951     case READ_6:
2952     case READ_10:
2953     case READ_12:
2954     case READ_16:
2955     case WRITE_6:
2956     case WRITE_10:
2957     case WRITE_12:
2958     case WRITE_16:
2959     case WRITE_VERIFY_10:
2960     case WRITE_VERIFY_12:
2961     case WRITE_VERIFY_16:
2962         /* MMC writing cannot be done via DMA helpers, because it sometimes
2963          * involves writing beyond the maximum LBA or to negative LBA (lead-in).
2964          * We might use scsi_block_dma_reqops as long as no writing commands are
2965          * seen, but performance usually isn't paramount on optical media.  So,
2966          * just make scsi-block operate the same as scsi-generic for them.
2967          */
2968         if (s->qdev.type != TYPE_ROM) {
2969             return false;
2970         }
2971         break;
2972 
2973     default:
2974         break;
2975     }
2976 
2977     return true;
2978 }
2979 
2980 
2981 static int32_t scsi_block_dma_command(SCSIRequest *req, uint8_t *buf)
2982 {
2983     SCSIBlockReq *r = (SCSIBlockReq *)req;
2984     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
2985 
2986     r->cmd = req->cmd.buf[0];
2987     switch (r->cmd >> 5) {
2988     case 0:
2989         /* 6-byte CDB.  */
2990         r->cdb1 = r->group_number = 0;
2991         break;
2992     case 1:
2993         /* 10-byte CDB.  */
2994         r->cdb1 = req->cmd.buf[1];
2995         r->group_number = req->cmd.buf[6];
2996         break;
2997     case 4:
2998         /* 12-byte CDB.  */
2999         r->cdb1 = req->cmd.buf[1];
3000         r->group_number = req->cmd.buf[10];
3001         break;
3002     case 5:
3003         /* 16-byte CDB.  */
3004         r->cdb1 = req->cmd.buf[1];
3005         r->group_number = req->cmd.buf[14];
3006         break;
3007     default:
3008         abort();
3009     }
3010 
3011     /* Protection information is not supported.  For SCSI versions 2 and
3012      * older (as determined by snooping the guest's INQUIRY commands),
3013      * there is no RD/WR/VRPROTECT, so skip this check in these versions.
3014      */
3015     if (s->qdev.scsi_version > 2 && (req->cmd.buf[1] & 0xe0)) {
3016         scsi_check_condition(&r->req, SENSE_CODE(INVALID_FIELD));
3017         return 0;
3018     }
3019 
3020     return scsi_disk_dma_command(req, buf);
3021 }
3022 
3023 static const SCSIReqOps scsi_block_dma_reqops = {
3024     .size         = sizeof(SCSIBlockReq),
3025     .free_req     = scsi_free_request,
3026     .send_command = scsi_block_dma_command,
3027     .read_data    = scsi_read_data,
3028     .write_data   = scsi_write_data,
3029     .get_buf      = scsi_get_buf,
3030     .load_request = scsi_disk_load_request,
3031     .save_request = scsi_disk_save_request,
3032 };
3033 
3034 static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
3035                                            uint32_t lun, uint8_t *buf,
3036                                            void *hba_private)
3037 {
3038     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
3039 
3040     if (scsi_block_is_passthrough(s, buf)) {
3041         return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
3042                               hba_private);
3043     } else {
3044         return scsi_req_alloc(&scsi_block_dma_reqops, &s->qdev, tag, lun,
3045                               hba_private);
3046     }
3047 }
3048 
3049 static int scsi_block_parse_cdb(SCSIDevice *d, SCSICommand *cmd,
3050                                   uint8_t *buf, size_t buf_len,
3051                                   void *hba_private)
3052 {
3053     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
3054 
3055     if (scsi_block_is_passthrough(s, buf)) {
3056         return scsi_bus_parse_cdb(&s->qdev, cmd, buf, buf_len, hba_private);
3057     } else {
3058         return scsi_req_parse_cdb(&s->qdev, cmd, buf, buf_len);
3059     }
3060 }
3061 
3062 static void scsi_block_update_sense(SCSIRequest *req)
3063 {
3064     SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
3065     SCSIBlockReq *br = DO_UPCAST(SCSIBlockReq, req, r);
3066     r->req.sense_len = MIN(br->io_header.sb_len_wr, sizeof(r->req.sense));
3067 }
3068 #endif
3069 
3070 static
3071 BlockAIOCB *scsi_dma_readv(int64_t offset, QEMUIOVector *iov,
3072                            BlockCompletionFunc *cb, void *cb_opaque,
3073                            void *opaque)
3074 {
3075     SCSIDiskReq *r = opaque;
3076     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
3077     return blk_aio_preadv(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
3078 }
3079 
3080 static
3081 BlockAIOCB *scsi_dma_writev(int64_t offset, QEMUIOVector *iov,
3082                             BlockCompletionFunc *cb, void *cb_opaque,
3083                             void *opaque)
3084 {
3085     SCSIDiskReq *r = opaque;
3086     SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
3087     return blk_aio_pwritev(s->qdev.conf.blk, offset, iov, 0, cb, cb_opaque);
3088 }
3089 
3090 static void scsi_disk_base_class_initfn(ObjectClass *klass, void *data)
3091 {
3092     DeviceClass *dc = DEVICE_CLASS(klass);
3093     SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3094 
3095     dc->fw_name = "disk";
3096     dc->reset = scsi_disk_reset;
3097     sdc->dma_readv = scsi_dma_readv;
3098     sdc->dma_writev = scsi_dma_writev;
3099     sdc->need_fua_emulation = scsi_is_cmd_fua;
3100 }
3101 
3102 static const TypeInfo scsi_disk_base_info = {
3103     .name          = TYPE_SCSI_DISK_BASE,
3104     .parent        = TYPE_SCSI_DEVICE,
3105     .class_init    = scsi_disk_base_class_initfn,
3106     .instance_size = sizeof(SCSIDiskState),
3107     .class_size    = sizeof(SCSIDiskClass),
3108     .abstract      = true,
3109 };
3110 
3111 #define DEFINE_SCSI_DISK_PROPERTIES()                                   \
3112     DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk),  \
3113     DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf),             \
3114     DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf),            \
3115     DEFINE_PROP_STRING("ver", SCSIDiskState, version),                  \
3116     DEFINE_PROP_STRING("serial", SCSIDiskState, serial),                \
3117     DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor),                \
3118     DEFINE_PROP_STRING("product", SCSIDiskState, product),              \
3119     DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id)
3120 
3121 
3122 static Property scsi_hd_properties[] = {
3123     DEFINE_SCSI_DISK_PROPERTIES(),
3124     DEFINE_PROP_BIT("removable", SCSIDiskState, features,
3125                     SCSI_DISK_F_REMOVABLE, false),
3126     DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
3127                     SCSI_DISK_F_DPOFUA, false),
3128     DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3129     DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3130     DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3131     DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3132                        DEFAULT_MAX_UNMAP_SIZE),
3133     DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3134                        DEFAULT_MAX_IO_SIZE),
3135     DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3136     DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3137                       5),
3138     DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState,
3139                     quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE,
3140                     0),
3141     DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
3142     DEFINE_PROP_END_OF_LIST(),
3143 };
3144 
3145 static const VMStateDescription vmstate_scsi_disk_state = {
3146     .name = "scsi-disk",
3147     .version_id = 1,
3148     .minimum_version_id = 1,
3149     .fields = (VMStateField[]) {
3150         VMSTATE_SCSI_DEVICE(qdev, SCSIDiskState),
3151         VMSTATE_BOOL(media_changed, SCSIDiskState),
3152         VMSTATE_BOOL(media_event, SCSIDiskState),
3153         VMSTATE_BOOL(eject_request, SCSIDiskState),
3154         VMSTATE_BOOL(tray_open, SCSIDiskState),
3155         VMSTATE_BOOL(tray_locked, SCSIDiskState),
3156         VMSTATE_END_OF_LIST()
3157     }
3158 };
3159 
3160 static void scsi_hd_class_initfn(ObjectClass *klass, void *data)
3161 {
3162     DeviceClass *dc = DEVICE_CLASS(klass);
3163     SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3164 
3165     sc->realize      = scsi_hd_realize;
3166     sc->unrealize    = scsi_unrealize;
3167     sc->alloc_req    = scsi_new_request;
3168     sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3169     dc->desc = "virtual SCSI disk";
3170     device_class_set_props(dc, scsi_hd_properties);
3171     dc->vmsd  = &vmstate_scsi_disk_state;
3172 }
3173 
3174 static const TypeInfo scsi_hd_info = {
3175     .name          = "scsi-hd",
3176     .parent        = TYPE_SCSI_DISK_BASE,
3177     .class_init    = scsi_hd_class_initfn,
3178 };
3179 
3180 static Property scsi_cd_properties[] = {
3181     DEFINE_SCSI_DISK_PROPERTIES(),
3182     DEFINE_PROP_UINT64("wwn", SCSIDiskState, qdev.wwn, 0),
3183     DEFINE_PROP_UINT64("port_wwn", SCSIDiskState, qdev.port_wwn, 0),
3184     DEFINE_PROP_UINT16("port_index", SCSIDiskState, port_index, 0),
3185     DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3186                        DEFAULT_MAX_IO_SIZE),
3187     DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3188                       5),
3189     DEFINE_PROP_BIT("quirk_mode_page_apple_vendor", SCSIDiskState, quirks,
3190                     SCSI_DISK_QUIRK_MODE_PAGE_APPLE_VENDOR, 0),
3191     DEFINE_PROP_BIT("quirk_mode_sense_rom_use_dbd", SCSIDiskState, quirks,
3192                     SCSI_DISK_QUIRK_MODE_SENSE_ROM_USE_DBD, 0),
3193     DEFINE_PROP_BIT("quirk_mode_page_vendor_specific_apple", SCSIDiskState,
3194                     quirks, SCSI_DISK_QUIRK_MODE_PAGE_VENDOR_SPECIFIC_APPLE,
3195                     0),
3196     DEFINE_PROP_BIT("quirk_mode_page_truncated", SCSIDiskState, quirks,
3197                     SCSI_DISK_QUIRK_MODE_PAGE_TRUNCATED, 0),
3198     DEFINE_PROP_END_OF_LIST(),
3199 };
3200 
3201 static void scsi_cd_class_initfn(ObjectClass *klass, void *data)
3202 {
3203     DeviceClass *dc = DEVICE_CLASS(klass);
3204     SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3205 
3206     sc->realize      = scsi_cd_realize;
3207     sc->alloc_req    = scsi_new_request;
3208     sc->unit_attention_reported = scsi_disk_unit_attention_reported;
3209     dc->desc = "virtual SCSI CD-ROM";
3210     device_class_set_props(dc, scsi_cd_properties);
3211     dc->vmsd  = &vmstate_scsi_disk_state;
3212 }
3213 
3214 static const TypeInfo scsi_cd_info = {
3215     .name          = "scsi-cd",
3216     .parent        = TYPE_SCSI_DISK_BASE,
3217     .class_init    = scsi_cd_class_initfn,
3218 };
3219 
3220 #ifdef __linux__
3221 static Property scsi_block_properties[] = {
3222     DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf),
3223     DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk),
3224     DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false),
3225     DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0),
3226     DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size,
3227                        DEFAULT_MAX_UNMAP_SIZE),
3228     DEFINE_PROP_UINT64("max_io_size", SCSIDiskState, max_io_size,
3229                        DEFAULT_MAX_IO_SIZE),
3230     DEFINE_PROP_INT32("scsi_version", SCSIDiskState, qdev.default_scsi_version,
3231                       -1),
3232     DEFINE_PROP_UINT32("io_timeout", SCSIDiskState, qdev.io_timeout,
3233                        DEFAULT_IO_TIMEOUT),
3234     DEFINE_PROP_END_OF_LIST(),
3235 };
3236 
3237 static void scsi_block_class_initfn(ObjectClass *klass, void *data)
3238 {
3239     DeviceClass *dc = DEVICE_CLASS(klass);
3240     SCSIDeviceClass *sc = SCSI_DEVICE_CLASS(klass);
3241     SCSIDiskClass *sdc = SCSI_DISK_BASE_CLASS(klass);
3242 
3243     sc->realize      = scsi_block_realize;
3244     sc->alloc_req    = scsi_block_new_request;
3245     sc->parse_cdb    = scsi_block_parse_cdb;
3246     sdc->dma_readv   = scsi_block_dma_readv;
3247     sdc->dma_writev  = scsi_block_dma_writev;
3248     sdc->update_sense = scsi_block_update_sense;
3249     sdc->need_fua_emulation = scsi_block_no_fua;
3250     dc->desc = "SCSI block device passthrough";
3251     device_class_set_props(dc, scsi_block_properties);
3252     dc->vmsd  = &vmstate_scsi_disk_state;
3253 }
3254 
3255 static const TypeInfo scsi_block_info = {
3256     .name          = "scsi-block",
3257     .parent        = TYPE_SCSI_DISK_BASE,
3258     .class_init    = scsi_block_class_initfn,
3259 };
3260 #endif
3261 
3262 static void scsi_disk_register_types(void)
3263 {
3264     type_register_static(&scsi_disk_base_info);
3265     type_register_static(&scsi_hd_info);
3266     type_register_static(&scsi_cd_info);
3267 #ifdef __linux__
3268     type_register_static(&scsi_block_info);
3269 #endif
3270 }
3271 
3272 type_init(scsi_disk_register_types)
3273