xref: /openbmc/qemu/hw/block/virtio-blk.c (revision 40119eff)
1 /*
2  * Virtio Block Device
3  *
4  * Copyright IBM, Corp. 2007
5  *
6  * Authors:
7  *  Anthony Liguori   <aliguori@us.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  */
13 
14 #include "qemu-common.h"
15 #include "qemu/iov.h"
16 #include "qemu/error-report.h"
17 #include "trace.h"
18 #include "hw/block/block.h"
19 #include "sysemu/block-backend.h"
20 #include "sysemu/blockdev.h"
21 #include "hw/virtio/virtio-blk.h"
22 #include "dataplane/virtio-blk.h"
23 #include "migration/migration.h"
24 #include "block/scsi.h"
25 #ifdef __linux__
26 # include <scsi/sg.h>
27 #endif
28 #include "hw/virtio/virtio-bus.h"
29 #include "hw/virtio/virtio-access.h"
30 
31 VirtIOBlockReq *virtio_blk_alloc_request(VirtIOBlock *s)
32 {
33     VirtIOBlockReq *req = g_new(VirtIOBlockReq, 1);
34     req->dev = s;
35     req->qiov.size = 0;
36     req->in_len = 0;
37     req->next = NULL;
38     req->mr_next = NULL;
39     return req;
40 }
41 
42 void virtio_blk_free_request(VirtIOBlockReq *req)
43 {
44     if (req) {
45         g_free(req);
46     }
47 }
48 
49 static void virtio_blk_complete_request(VirtIOBlockReq *req,
50                                         unsigned char status)
51 {
52     VirtIOBlock *s = req->dev;
53     VirtIODevice *vdev = VIRTIO_DEVICE(s);
54 
55     trace_virtio_blk_req_complete(req, status);
56 
57     stb_p(&req->in->status, status);
58     virtqueue_push(s->vq, &req->elem, req->in_len);
59     virtio_notify(vdev, s->vq);
60 }
61 
62 static void virtio_blk_req_complete(VirtIOBlockReq *req, unsigned char status)
63 {
64     req->dev->complete_request(req, status);
65 }
66 
67 static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error,
68     bool is_read)
69 {
70     BlockErrorAction action = blk_get_error_action(req->dev->blk,
71                                                    is_read, error);
72     VirtIOBlock *s = req->dev;
73 
74     if (action == BLOCK_ERROR_ACTION_STOP) {
75         req->next = s->rq;
76         s->rq = req;
77     } else if (action == BLOCK_ERROR_ACTION_REPORT) {
78         virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
79         block_acct_failed(blk_get_stats(s->blk), &req->acct);
80         virtio_blk_free_request(req);
81     }
82 
83     blk_error_action(s->blk, action, is_read, error);
84     return action != BLOCK_ERROR_ACTION_IGNORE;
85 }
86 
87 static void virtio_blk_rw_complete(void *opaque, int ret)
88 {
89     VirtIOBlockReq *next = opaque;
90 
91     while (next) {
92         VirtIOBlockReq *req = next;
93         next = req->mr_next;
94         trace_virtio_blk_rw_complete(req, ret);
95 
96         if (req->qiov.nalloc != -1) {
97             /* If nalloc is != 1 req->qiov is a local copy of the original
98              * external iovec. It was allocated in submit_merged_requests
99              * to be able to merge requests. */
100             qemu_iovec_destroy(&req->qiov);
101         }
102 
103         if (ret) {
104             int p = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type);
105             bool is_read = !(p & VIRTIO_BLK_T_OUT);
106             /* Note that memory may be dirtied on read failure.  If the
107              * virtio request is not completed here, as is the case for
108              * BLOCK_ERROR_ACTION_STOP, the memory may not be copied
109              * correctly during live migration.  While this is ugly,
110              * it is acceptable because the device is free to write to
111              * the memory until the request is completed (which will
112              * happen on the other side of the migration).
113              */
114             if (virtio_blk_handle_rw_error(req, -ret, is_read)) {
115                 continue;
116             }
117         }
118 
119         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
120         block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
121         virtio_blk_free_request(req);
122     }
123 }
124 
125 static void virtio_blk_flush_complete(void *opaque, int ret)
126 {
127     VirtIOBlockReq *req = opaque;
128 
129     if (ret) {
130         if (virtio_blk_handle_rw_error(req, -ret, 0)) {
131             return;
132         }
133     }
134 
135     virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
136     block_acct_done(blk_get_stats(req->dev->blk), &req->acct);
137     virtio_blk_free_request(req);
138 }
139 
140 #ifdef __linux__
141 
142 typedef struct {
143     VirtIOBlockReq *req;
144     struct sg_io_hdr hdr;
145 } VirtIOBlockIoctlReq;
146 
147 static void virtio_blk_ioctl_complete(void *opaque, int status)
148 {
149     VirtIOBlockIoctlReq *ioctl_req = opaque;
150     VirtIOBlockReq *req = ioctl_req->req;
151     VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
152     struct virtio_scsi_inhdr *scsi;
153     struct sg_io_hdr *hdr;
154 
155     scsi = (void *)req->elem.in_sg[req->elem.in_num - 2].iov_base;
156 
157     if (status) {
158         status = VIRTIO_BLK_S_UNSUPP;
159         virtio_stl_p(vdev, &scsi->errors, 255);
160         goto out;
161     }
162 
163     hdr = &ioctl_req->hdr;
164     /*
165      * From SCSI-Generic-HOWTO: "Some lower level drivers (e.g. ide-scsi)
166      * clear the masked_status field [hence status gets cleared too, see
167      * block/scsi_ioctl.c] even when a CHECK_CONDITION or COMMAND_TERMINATED
168      * status has occurred.  However they do set DRIVER_SENSE in driver_status
169      * field. Also a (sb_len_wr > 0) indicates there is a sense buffer.
170      */
171     if (hdr->status == 0 && hdr->sb_len_wr > 0) {
172         hdr->status = CHECK_CONDITION;
173     }
174 
175     virtio_stl_p(vdev, &scsi->errors,
176                  hdr->status | (hdr->msg_status << 8) |
177                  (hdr->host_status << 16) | (hdr->driver_status << 24));
178     virtio_stl_p(vdev, &scsi->residual, hdr->resid);
179     virtio_stl_p(vdev, &scsi->sense_len, hdr->sb_len_wr);
180     virtio_stl_p(vdev, &scsi->data_len, hdr->dxfer_len);
181 
182 out:
183     virtio_blk_req_complete(req, status);
184     virtio_blk_free_request(req);
185     g_free(ioctl_req);
186 }
187 
188 #endif
189 
190 static VirtIOBlockReq *virtio_blk_get_request(VirtIOBlock *s)
191 {
192     VirtIOBlockReq *req = virtio_blk_alloc_request(s);
193 
194     if (!virtqueue_pop(s->vq, &req->elem)) {
195         virtio_blk_free_request(req);
196         return NULL;
197     }
198 
199     return req;
200 }
201 
202 static int virtio_blk_handle_scsi_req(VirtIOBlockReq *req)
203 {
204     int status = VIRTIO_BLK_S_OK;
205     struct virtio_scsi_inhdr *scsi = NULL;
206     VirtIODevice *vdev = VIRTIO_DEVICE(req->dev);
207     VirtQueueElement *elem = &req->elem;
208     VirtIOBlock *blk = req->dev;
209 
210 #ifdef __linux__
211     int i;
212     VirtIOBlockIoctlReq *ioctl_req;
213     BlockAIOCB *acb;
214 #endif
215 
216     /*
217      * We require at least one output segment each for the virtio_blk_outhdr
218      * and the SCSI command block.
219      *
220      * We also at least require the virtio_blk_inhdr, the virtio_scsi_inhdr
221      * and the sense buffer pointer in the input segments.
222      */
223     if (elem->out_num < 2 || elem->in_num < 3) {
224         status = VIRTIO_BLK_S_IOERR;
225         goto fail;
226     }
227 
228     /*
229      * The scsi inhdr is placed in the second-to-last input segment, just
230      * before the regular inhdr.
231      */
232     scsi = (void *)elem->in_sg[elem->in_num - 2].iov_base;
233 
234     if (!blk->conf.scsi) {
235         status = VIRTIO_BLK_S_UNSUPP;
236         goto fail;
237     }
238 
239     /*
240      * No support for bidirection commands yet.
241      */
242     if (elem->out_num > 2 && elem->in_num > 3) {
243         status = VIRTIO_BLK_S_UNSUPP;
244         goto fail;
245     }
246 
247 #ifdef __linux__
248     ioctl_req = g_new0(VirtIOBlockIoctlReq, 1);
249     ioctl_req->req = req;
250     ioctl_req->hdr.interface_id = 'S';
251     ioctl_req->hdr.cmd_len = elem->out_sg[1].iov_len;
252     ioctl_req->hdr.cmdp = elem->out_sg[1].iov_base;
253     ioctl_req->hdr.dxfer_len = 0;
254 
255     if (elem->out_num > 2) {
256         /*
257          * If there are more than the minimally required 2 output segments
258          * there is write payload starting from the third iovec.
259          */
260         ioctl_req->hdr.dxfer_direction = SG_DXFER_TO_DEV;
261         ioctl_req->hdr.iovec_count = elem->out_num - 2;
262 
263         for (i = 0; i < ioctl_req->hdr.iovec_count; i++) {
264             ioctl_req->hdr.dxfer_len += elem->out_sg[i + 2].iov_len;
265         }
266 
267         ioctl_req->hdr.dxferp = elem->out_sg + 2;
268 
269     } else if (elem->in_num > 3) {
270         /*
271          * If we have more than 3 input segments the guest wants to actually
272          * read data.
273          */
274         ioctl_req->hdr.dxfer_direction = SG_DXFER_FROM_DEV;
275         ioctl_req->hdr.iovec_count = elem->in_num - 3;
276         for (i = 0; i < ioctl_req->hdr.iovec_count; i++) {
277             ioctl_req->hdr.dxfer_len += elem->in_sg[i].iov_len;
278         }
279 
280         ioctl_req->hdr.dxferp = elem->in_sg;
281     } else {
282         /*
283          * Some SCSI commands don't actually transfer any data.
284          */
285         ioctl_req->hdr.dxfer_direction = SG_DXFER_NONE;
286     }
287 
288     ioctl_req->hdr.sbp = elem->in_sg[elem->in_num - 3].iov_base;
289     ioctl_req->hdr.mx_sb_len = elem->in_sg[elem->in_num - 3].iov_len;
290 
291     acb = blk_aio_ioctl(blk->blk, SG_IO, &ioctl_req->hdr,
292                         virtio_blk_ioctl_complete, ioctl_req);
293     if (!acb) {
294         g_free(ioctl_req);
295         status = VIRTIO_BLK_S_UNSUPP;
296         goto fail;
297     }
298     return -EINPROGRESS;
299 #else
300     abort();
301 #endif
302 
303 fail:
304     /* Just put anything nonzero so that the ioctl fails in the guest.  */
305     if (scsi) {
306         virtio_stl_p(vdev, &scsi->errors, 255);
307     }
308     return status;
309 }
310 
311 static void virtio_blk_handle_scsi(VirtIOBlockReq *req)
312 {
313     int status;
314 
315     status = virtio_blk_handle_scsi_req(req);
316     if (status != -EINPROGRESS) {
317         virtio_blk_req_complete(req, status);
318         virtio_blk_free_request(req);
319     }
320 }
321 
322 static inline void submit_requests(BlockBackend *blk, MultiReqBuffer *mrb,
323                                    int start, int num_reqs, int niov)
324 {
325     QEMUIOVector *qiov = &mrb->reqs[start]->qiov;
326     int64_t sector_num = mrb->reqs[start]->sector_num;
327     int nb_sectors = mrb->reqs[start]->qiov.size / BDRV_SECTOR_SIZE;
328     bool is_write = mrb->is_write;
329 
330     if (num_reqs > 1) {
331         int i;
332         struct iovec *tmp_iov = qiov->iov;
333         int tmp_niov = qiov->niov;
334 
335         /* mrb->reqs[start]->qiov was initialized from external so we can't
336          * modifiy it here. We need to initialize it locally and then add the
337          * external iovecs. */
338         qemu_iovec_init(qiov, niov);
339 
340         for (i = 0; i < tmp_niov; i++) {
341             qemu_iovec_add(qiov, tmp_iov[i].iov_base, tmp_iov[i].iov_len);
342         }
343 
344         for (i = start + 1; i < start + num_reqs; i++) {
345             qemu_iovec_concat(qiov, &mrb->reqs[i]->qiov, 0,
346                               mrb->reqs[i]->qiov.size);
347             mrb->reqs[i - 1]->mr_next = mrb->reqs[i];
348             nb_sectors += mrb->reqs[i]->qiov.size / BDRV_SECTOR_SIZE;
349         }
350         assert(nb_sectors == qiov->size / BDRV_SECTOR_SIZE);
351 
352         trace_virtio_blk_submit_multireq(mrb, start, num_reqs, sector_num,
353                                          nb_sectors, is_write);
354         block_acct_merge_done(blk_get_stats(blk),
355                               is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ,
356                               num_reqs - 1);
357     }
358 
359     if (is_write) {
360         blk_aio_writev(blk, sector_num, qiov, nb_sectors,
361                        virtio_blk_rw_complete, mrb->reqs[start]);
362     } else {
363         blk_aio_readv(blk, sector_num, qiov, nb_sectors,
364                       virtio_blk_rw_complete, mrb->reqs[start]);
365     }
366 }
367 
368 static int multireq_compare(const void *a, const void *b)
369 {
370     const VirtIOBlockReq *req1 = *(VirtIOBlockReq **)a,
371                          *req2 = *(VirtIOBlockReq **)b;
372 
373     /*
374      * Note that we can't simply subtract sector_num1 from sector_num2
375      * here as that could overflow the return value.
376      */
377     if (req1->sector_num > req2->sector_num) {
378         return 1;
379     } else if (req1->sector_num < req2->sector_num) {
380         return -1;
381     } else {
382         return 0;
383     }
384 }
385 
386 void virtio_blk_submit_multireq(BlockBackend *blk, MultiReqBuffer *mrb)
387 {
388     int i = 0, start = 0, num_reqs = 0, niov = 0, nb_sectors = 0;
389     int max_xfer_len = 0;
390     int64_t sector_num = 0;
391 
392     if (mrb->num_reqs == 1) {
393         submit_requests(blk, mrb, 0, 1, -1);
394         mrb->num_reqs = 0;
395         return;
396     }
397 
398     max_xfer_len = blk_get_max_transfer_length(mrb->reqs[0]->dev->blk);
399     max_xfer_len = MIN_NON_ZERO(max_xfer_len, BDRV_REQUEST_MAX_SECTORS);
400 
401     qsort(mrb->reqs, mrb->num_reqs, sizeof(*mrb->reqs),
402           &multireq_compare);
403 
404     for (i = 0; i < mrb->num_reqs; i++) {
405         VirtIOBlockReq *req = mrb->reqs[i];
406         if (num_reqs > 0) {
407             bool merge = true;
408 
409             /* merge would exceed maximum number of IOVs */
410             if (niov + req->qiov.niov > IOV_MAX) {
411                 merge = false;
412             }
413 
414             /* merge would exceed maximum transfer length of backend device */
415             if (req->qiov.size / BDRV_SECTOR_SIZE + nb_sectors > max_xfer_len) {
416                 merge = false;
417             }
418 
419             /* requests are not sequential */
420             if (sector_num + nb_sectors != req->sector_num) {
421                 merge = false;
422             }
423 
424             if (!merge) {
425                 submit_requests(blk, mrb, start, num_reqs, niov);
426                 num_reqs = 0;
427             }
428         }
429 
430         if (num_reqs == 0) {
431             sector_num = req->sector_num;
432             nb_sectors = niov = 0;
433             start = i;
434         }
435 
436         nb_sectors += req->qiov.size / BDRV_SECTOR_SIZE;
437         niov += req->qiov.niov;
438         num_reqs++;
439     }
440 
441     submit_requests(blk, mrb, start, num_reqs, niov);
442     mrb->num_reqs = 0;
443 }
444 
445 static void virtio_blk_handle_flush(VirtIOBlockReq *req, MultiReqBuffer *mrb)
446 {
447     block_acct_start(blk_get_stats(req->dev->blk), &req->acct, 0,
448                      BLOCK_ACCT_FLUSH);
449 
450     /*
451      * Make sure all outstanding writes are posted to the backing device.
452      */
453     if (mrb->is_write && mrb->num_reqs > 0) {
454         virtio_blk_submit_multireq(req->dev->blk, mrb);
455     }
456     blk_aio_flush(req->dev->blk, virtio_blk_flush_complete, req);
457 }
458 
459 static bool virtio_blk_sect_range_ok(VirtIOBlock *dev,
460                                      uint64_t sector, size_t size)
461 {
462     uint64_t nb_sectors = size >> BDRV_SECTOR_BITS;
463     uint64_t total_sectors;
464 
465     if (nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
466         return false;
467     }
468     if (sector & dev->sector_mask) {
469         return false;
470     }
471     if (size % dev->conf.conf.logical_block_size) {
472         return false;
473     }
474     blk_get_geometry(dev->blk, &total_sectors);
475     if (sector > total_sectors || nb_sectors > total_sectors - sector) {
476         return false;
477     }
478     return true;
479 }
480 
481 void virtio_blk_handle_request(VirtIOBlockReq *req, MultiReqBuffer *mrb)
482 {
483     uint32_t type;
484     struct iovec *in_iov = req->elem.in_sg;
485     struct iovec *iov = req->elem.out_sg;
486     unsigned in_num = req->elem.in_num;
487     unsigned out_num = req->elem.out_num;
488 
489     if (req->elem.out_num < 1 || req->elem.in_num < 1) {
490         error_report("virtio-blk missing headers");
491         exit(1);
492     }
493 
494     if (unlikely(iov_to_buf(iov, out_num, 0, &req->out,
495                             sizeof(req->out)) != sizeof(req->out))) {
496         error_report("virtio-blk request outhdr too short");
497         exit(1);
498     }
499 
500     iov_discard_front(&iov, &out_num, sizeof(req->out));
501 
502     if (in_iov[in_num - 1].iov_len < sizeof(struct virtio_blk_inhdr)) {
503         error_report("virtio-blk request inhdr too short");
504         exit(1);
505     }
506 
507     /* We always touch the last byte, so just see how big in_iov is.  */
508     req->in_len = iov_size(in_iov, in_num);
509     req->in = (void *)in_iov[in_num - 1].iov_base
510               + in_iov[in_num - 1].iov_len
511               - sizeof(struct virtio_blk_inhdr);
512     iov_discard_back(in_iov, &in_num, sizeof(struct virtio_blk_inhdr));
513 
514     type = virtio_ldl_p(VIRTIO_DEVICE(req->dev), &req->out.type);
515 
516     /* VIRTIO_BLK_T_OUT defines the command direction. VIRTIO_BLK_T_BARRIER
517      * is an optional flag. Although a guest should not send this flag if
518      * not negotiated we ignored it in the past. So keep ignoring it. */
519     switch (type & ~(VIRTIO_BLK_T_OUT | VIRTIO_BLK_T_BARRIER)) {
520     case VIRTIO_BLK_T_IN:
521     {
522         bool is_write = type & VIRTIO_BLK_T_OUT;
523         req->sector_num = virtio_ldq_p(VIRTIO_DEVICE(req->dev),
524                                        &req->out.sector);
525 
526         if (is_write) {
527             qemu_iovec_init_external(&req->qiov, iov, out_num);
528             trace_virtio_blk_handle_write(req, req->sector_num,
529                                           req->qiov.size / BDRV_SECTOR_SIZE);
530         } else {
531             qemu_iovec_init_external(&req->qiov, in_iov, in_num);
532             trace_virtio_blk_handle_read(req, req->sector_num,
533                                          req->qiov.size / BDRV_SECTOR_SIZE);
534         }
535 
536         if (!virtio_blk_sect_range_ok(req->dev, req->sector_num,
537                                       req->qiov.size)) {
538             virtio_blk_req_complete(req, VIRTIO_BLK_S_IOERR);
539             block_acct_invalid(blk_get_stats(req->dev->blk),
540                                is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
541             virtio_blk_free_request(req);
542             return;
543         }
544 
545         block_acct_start(blk_get_stats(req->dev->blk),
546                          &req->acct, req->qiov.size,
547                          is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ);
548 
549         /* merge would exceed maximum number of requests or IO direction
550          * changes */
551         if (mrb->num_reqs > 0 && (mrb->num_reqs == VIRTIO_BLK_MAX_MERGE_REQS ||
552                                   is_write != mrb->is_write ||
553                                   !req->dev->conf.request_merging)) {
554             virtio_blk_submit_multireq(req->dev->blk, mrb);
555         }
556 
557         assert(mrb->num_reqs < VIRTIO_BLK_MAX_MERGE_REQS);
558         mrb->reqs[mrb->num_reqs++] = req;
559         mrb->is_write = is_write;
560         break;
561     }
562     case VIRTIO_BLK_T_FLUSH:
563         virtio_blk_handle_flush(req, mrb);
564         break;
565     case VIRTIO_BLK_T_SCSI_CMD:
566         virtio_blk_handle_scsi(req);
567         break;
568     case VIRTIO_BLK_T_GET_ID:
569     {
570         VirtIOBlock *s = req->dev;
571 
572         /*
573          * NB: per existing s/n string convention the string is
574          * terminated by '\0' only when shorter than buffer.
575          */
576         const char *serial = s->conf.serial ? s->conf.serial : "";
577         size_t size = MIN(strlen(serial) + 1,
578                           MIN(iov_size(in_iov, in_num),
579                               VIRTIO_BLK_ID_BYTES));
580         iov_from_buf(in_iov, in_num, 0, serial, size);
581         virtio_blk_req_complete(req, VIRTIO_BLK_S_OK);
582         virtio_blk_free_request(req);
583         break;
584     }
585     default:
586         virtio_blk_req_complete(req, VIRTIO_BLK_S_UNSUPP);
587         virtio_blk_free_request(req);
588     }
589 }
590 
591 static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq)
592 {
593     VirtIOBlock *s = VIRTIO_BLK(vdev);
594     VirtIOBlockReq *req;
595     MultiReqBuffer mrb = {};
596 
597     /* Some guests kick before setting VIRTIO_CONFIG_S_DRIVER_OK so start
598      * dataplane here instead of waiting for .set_status().
599      */
600     if (s->dataplane) {
601         virtio_blk_data_plane_start(s->dataplane);
602         return;
603     }
604 
605     blk_io_plug(s->blk);
606 
607     while ((req = virtio_blk_get_request(s))) {
608         virtio_blk_handle_request(req, &mrb);
609     }
610 
611     if (mrb.num_reqs) {
612         virtio_blk_submit_multireq(s->blk, &mrb);
613     }
614 
615     blk_io_unplug(s->blk);
616 }
617 
618 static void virtio_blk_dma_restart_bh(void *opaque)
619 {
620     VirtIOBlock *s = opaque;
621     VirtIOBlockReq *req = s->rq;
622     MultiReqBuffer mrb = {};
623 
624     qemu_bh_delete(s->bh);
625     s->bh = NULL;
626 
627     s->rq = NULL;
628 
629     while (req) {
630         VirtIOBlockReq *next = req->next;
631         virtio_blk_handle_request(req, &mrb);
632         req = next;
633     }
634 
635     if (mrb.num_reqs) {
636         virtio_blk_submit_multireq(s->blk, &mrb);
637     }
638 }
639 
640 static void virtio_blk_dma_restart_cb(void *opaque, int running,
641                                       RunState state)
642 {
643     VirtIOBlock *s = opaque;
644 
645     if (!running) {
646         return;
647     }
648 
649     if (!s->bh) {
650         s->bh = aio_bh_new(blk_get_aio_context(s->conf.conf.blk),
651                            virtio_blk_dma_restart_bh, s);
652         qemu_bh_schedule(s->bh);
653     }
654 }
655 
656 static void virtio_blk_reset(VirtIODevice *vdev)
657 {
658     VirtIOBlock *s = VIRTIO_BLK(vdev);
659     AioContext *ctx;
660 
661     /*
662      * This should cancel pending requests, but can't do nicely until there
663      * are per-device request lists.
664      */
665     ctx = blk_get_aio_context(s->blk);
666     aio_context_acquire(ctx);
667     blk_drain(s->blk);
668 
669     if (s->dataplane) {
670         virtio_blk_data_plane_stop(s->dataplane);
671     }
672     aio_context_release(ctx);
673 
674     blk_set_enable_write_cache(s->blk, s->original_wce);
675 }
676 
677 /* coalesce internal state, copy to pci i/o region 0
678  */
679 static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
680 {
681     VirtIOBlock *s = VIRTIO_BLK(vdev);
682     BlockConf *conf = &s->conf.conf;
683     struct virtio_blk_config blkcfg;
684     uint64_t capacity;
685     int blk_size = conf->logical_block_size;
686 
687     blk_get_geometry(s->blk, &capacity);
688     memset(&blkcfg, 0, sizeof(blkcfg));
689     virtio_stq_p(vdev, &blkcfg.capacity, capacity);
690     virtio_stl_p(vdev, &blkcfg.seg_max, 128 - 2);
691     virtio_stw_p(vdev, &blkcfg.geometry.cylinders, conf->cyls);
692     virtio_stl_p(vdev, &blkcfg.blk_size, blk_size);
693     virtio_stw_p(vdev, &blkcfg.min_io_size, conf->min_io_size / blk_size);
694     virtio_stw_p(vdev, &blkcfg.opt_io_size, conf->opt_io_size / blk_size);
695     blkcfg.geometry.heads = conf->heads;
696     /*
697      * We must ensure that the block device capacity is a multiple of
698      * the logical block size. If that is not the case, let's use
699      * sector_mask to adopt the geometry to have a correct picture.
700      * For those devices where the capacity is ok for the given geometry
701      * we don't touch the sector value of the geometry, since some devices
702      * (like s390 dasd) need a specific value. Here the capacity is already
703      * cyls*heads*secs*blk_size and the sector value is not block size
704      * divided by 512 - instead it is the amount of blk_size blocks
705      * per track (cylinder).
706      */
707     if (blk_getlength(s->blk) /  conf->heads / conf->secs % blk_size) {
708         blkcfg.geometry.sectors = conf->secs & ~s->sector_mask;
709     } else {
710         blkcfg.geometry.sectors = conf->secs;
711     }
712     blkcfg.size_max = 0;
713     blkcfg.physical_block_exp = get_physical_block_exp(conf);
714     blkcfg.alignment_offset = 0;
715     blkcfg.wce = blk_enable_write_cache(s->blk);
716     memcpy(config, &blkcfg, sizeof(struct virtio_blk_config));
717 }
718 
719 static void virtio_blk_set_config(VirtIODevice *vdev, const uint8_t *config)
720 {
721     VirtIOBlock *s = VIRTIO_BLK(vdev);
722     struct virtio_blk_config blkcfg;
723 
724     memcpy(&blkcfg, config, sizeof(blkcfg));
725 
726     aio_context_acquire(blk_get_aio_context(s->blk));
727     blk_set_enable_write_cache(s->blk, blkcfg.wce != 0);
728     aio_context_release(blk_get_aio_context(s->blk));
729 }
730 
731 static uint64_t virtio_blk_get_features(VirtIODevice *vdev, uint64_t features,
732                                         Error **errp)
733 {
734     VirtIOBlock *s = VIRTIO_BLK(vdev);
735 
736     virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX);
737     virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY);
738     virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY);
739     virtio_add_feature(&features, VIRTIO_BLK_F_BLK_SIZE);
740     if (virtio_has_feature(features, VIRTIO_F_VERSION_1)) {
741         if (s->conf.scsi) {
742             error_setg(errp, "Please set scsi=off for virtio-blk devices in order to use virtio 1.0");
743             return 0;
744         }
745     } else {
746         virtio_clear_feature(&features, VIRTIO_F_ANY_LAYOUT);
747         virtio_add_feature(&features, VIRTIO_BLK_F_SCSI);
748     }
749 
750     if (s->conf.config_wce) {
751         virtio_add_feature(&features, VIRTIO_BLK_F_CONFIG_WCE);
752     }
753     if (blk_enable_write_cache(s->blk)) {
754         virtio_add_feature(&features, VIRTIO_BLK_F_WCE);
755     }
756     if (blk_is_read_only(s->blk)) {
757         virtio_add_feature(&features, VIRTIO_BLK_F_RO);
758     }
759 
760     return features;
761 }
762 
763 static void virtio_blk_set_status(VirtIODevice *vdev, uint8_t status)
764 {
765     VirtIOBlock *s = VIRTIO_BLK(vdev);
766 
767     if (s->dataplane && !(status & (VIRTIO_CONFIG_S_DRIVER |
768                                     VIRTIO_CONFIG_S_DRIVER_OK))) {
769         virtio_blk_data_plane_stop(s->dataplane);
770     }
771 
772     if (!(status & VIRTIO_CONFIG_S_DRIVER_OK)) {
773         return;
774     }
775 
776     /* A guest that supports VIRTIO_BLK_F_CONFIG_WCE must be able to send
777      * cache flushes.  Thus, the "auto writethrough" behavior is never
778      * necessary for guests that support the VIRTIO_BLK_F_CONFIG_WCE feature.
779      * Leaving it enabled would break the following sequence:
780      *
781      *     Guest started with "-drive cache=writethrough"
782      *     Guest sets status to 0
783      *     Guest sets DRIVER bit in status field
784      *     Guest reads host features (WCE=0, CONFIG_WCE=1)
785      *     Guest writes guest features (WCE=0, CONFIG_WCE=1)
786      *     Guest writes 1 to the WCE configuration field (writeback mode)
787      *     Guest sets DRIVER_OK bit in status field
788      *
789      * s->blk would erroneously be placed in writethrough mode.
790      */
791     if (!virtio_vdev_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) {
792         aio_context_acquire(blk_get_aio_context(s->blk));
793         blk_set_enable_write_cache(s->blk,
794                                    virtio_vdev_has_feature(vdev,
795                                                            VIRTIO_BLK_F_WCE));
796         aio_context_release(blk_get_aio_context(s->blk));
797     }
798 }
799 
800 static void virtio_blk_save(QEMUFile *f, void *opaque)
801 {
802     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
803     VirtIOBlock *s = VIRTIO_BLK(vdev);
804 
805     if (s->dataplane) {
806         virtio_blk_data_plane_stop(s->dataplane);
807     }
808 
809     virtio_save(vdev, f);
810 }
811 
812 static void virtio_blk_save_device(VirtIODevice *vdev, QEMUFile *f)
813 {
814     VirtIOBlock *s = VIRTIO_BLK(vdev);
815     VirtIOBlockReq *req = s->rq;
816 
817     while (req) {
818         qemu_put_sbyte(f, 1);
819         qemu_put_buffer(f, (unsigned char *)&req->elem,
820                         sizeof(VirtQueueElement));
821         req = req->next;
822     }
823     qemu_put_sbyte(f, 0);
824 }
825 
826 static int virtio_blk_load(QEMUFile *f, void *opaque, int version_id)
827 {
828     VirtIOBlock *s = opaque;
829     VirtIODevice *vdev = VIRTIO_DEVICE(s);
830 
831     if (version_id != 2)
832         return -EINVAL;
833 
834     return virtio_load(vdev, f, version_id);
835 }
836 
837 static int virtio_blk_load_device(VirtIODevice *vdev, QEMUFile *f,
838                                   int version_id)
839 {
840     VirtIOBlock *s = VIRTIO_BLK(vdev);
841 
842     while (qemu_get_sbyte(f)) {
843         VirtIOBlockReq *req = virtio_blk_alloc_request(s);
844         qemu_get_buffer(f, (unsigned char *)&req->elem,
845                         sizeof(VirtQueueElement));
846         req->next = s->rq;
847         s->rq = req;
848 
849         virtqueue_map(&req->elem);
850     }
851 
852     return 0;
853 }
854 
855 static void virtio_blk_resize(void *opaque)
856 {
857     VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
858 
859     virtio_notify_config(vdev);
860 }
861 
862 static const BlockDevOps virtio_block_ops = {
863     .resize_cb = virtio_blk_resize,
864 };
865 
866 /* Disable dataplane thread during live migration since it does not
867  * update the dirty memory bitmap yet.
868  */
869 static void virtio_blk_migration_state_changed(Notifier *notifier, void *data)
870 {
871     VirtIOBlock *s = container_of(notifier, VirtIOBlock,
872                                   migration_state_notifier);
873     MigrationState *mig = data;
874     Error *err = NULL;
875 
876     if (migration_in_setup(mig)) {
877         if (!s->dataplane) {
878             return;
879         }
880         virtio_blk_data_plane_destroy(s->dataplane);
881         s->dataplane = NULL;
882     } else if (migration_has_finished(mig) ||
883                migration_has_failed(mig)) {
884         if (s->dataplane) {
885             return;
886         }
887         blk_drain_all(); /* complete in-flight non-dataplane requests */
888         virtio_blk_data_plane_create(VIRTIO_DEVICE(s), &s->conf,
889                                      &s->dataplane, &err);
890         if (err != NULL) {
891             error_report_err(err);
892         }
893     }
894 }
895 
896 static void virtio_blk_device_realize(DeviceState *dev, Error **errp)
897 {
898     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
899     VirtIOBlock *s = VIRTIO_BLK(dev);
900     VirtIOBlkConf *conf = &s->conf;
901     Error *err = NULL;
902     static int virtio_blk_id;
903 
904     if (!conf->conf.blk) {
905         error_setg(errp, "drive property not set");
906         return;
907     }
908     if (!blk_is_inserted(conf->conf.blk)) {
909         error_setg(errp, "Device needs media, but drive is empty");
910         return;
911     }
912 
913     blkconf_serial(&conf->conf, &conf->serial);
914     s->original_wce = blk_enable_write_cache(conf->conf.blk);
915     blkconf_geometry(&conf->conf, NULL, 65535, 255, 255, &err);
916     if (err) {
917         error_propagate(errp, err);
918         return;
919     }
920     blkconf_blocksizes(&conf->conf);
921 
922     virtio_init(vdev, "virtio-blk", VIRTIO_ID_BLOCK,
923                 sizeof(struct virtio_blk_config));
924 
925     s->blk = conf->conf.blk;
926     s->rq = NULL;
927     s->sector_mask = (s->conf.conf.logical_block_size / BDRV_SECTOR_SIZE) - 1;
928 
929     s->vq = virtio_add_queue(vdev, 128, virtio_blk_handle_output);
930     s->complete_request = virtio_blk_complete_request;
931     virtio_blk_data_plane_create(vdev, conf, &s->dataplane, &err);
932     if (err != NULL) {
933         error_propagate(errp, err);
934         virtio_cleanup(vdev);
935         return;
936     }
937     s->migration_state_notifier.notify = virtio_blk_migration_state_changed;
938     add_migration_state_change_notifier(&s->migration_state_notifier);
939 
940     s->change = qemu_add_vm_change_state_handler(virtio_blk_dma_restart_cb, s);
941     register_savevm(dev, "virtio-blk", virtio_blk_id++, 2,
942                     virtio_blk_save, virtio_blk_load, s);
943     blk_set_dev_ops(s->blk, &virtio_block_ops, s);
944     blk_set_guest_block_size(s->blk, s->conf.conf.logical_block_size);
945 
946     blk_iostatus_enable(s->blk);
947 }
948 
949 static void virtio_blk_device_unrealize(DeviceState *dev, Error **errp)
950 {
951     VirtIODevice *vdev = VIRTIO_DEVICE(dev);
952     VirtIOBlock *s = VIRTIO_BLK(dev);
953 
954     remove_migration_state_change_notifier(&s->migration_state_notifier);
955     virtio_blk_data_plane_destroy(s->dataplane);
956     s->dataplane = NULL;
957     qemu_del_vm_change_state_handler(s->change);
958     unregister_savevm(dev, "virtio-blk", s);
959     blockdev_mark_auto_del(s->blk);
960     virtio_cleanup(vdev);
961 }
962 
963 static void virtio_blk_instance_init(Object *obj)
964 {
965     VirtIOBlock *s = VIRTIO_BLK(obj);
966 
967     object_property_add_link(obj, "iothread", TYPE_IOTHREAD,
968                              (Object **)&s->conf.iothread,
969                              qdev_prop_allow_set_link_before_realize,
970                              OBJ_PROP_LINK_UNREF_ON_RELEASE, NULL);
971     device_add_bootindex_property(obj, &s->conf.conf.bootindex,
972                                   "bootindex", "/disk@0,0",
973                                   DEVICE(obj), NULL);
974 }
975 
976 static Property virtio_blk_properties[] = {
977     DEFINE_BLOCK_PROPERTIES(VirtIOBlock, conf.conf),
978     DEFINE_BLOCK_CHS_PROPERTIES(VirtIOBlock, conf.conf),
979     DEFINE_PROP_STRING("serial", VirtIOBlock, conf.serial),
980     DEFINE_PROP_BIT("config-wce", VirtIOBlock, conf.config_wce, 0, true),
981 #ifdef __linux__
982     DEFINE_PROP_BIT("scsi", VirtIOBlock, conf.scsi, 0, false),
983 #endif
984     DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
985                     true),
986     DEFINE_PROP_BIT("x-data-plane", VirtIOBlock, conf.data_plane, 0, false),
987     DEFINE_PROP_END_OF_LIST(),
988 };
989 
990 static void virtio_blk_class_init(ObjectClass *klass, void *data)
991 {
992     DeviceClass *dc = DEVICE_CLASS(klass);
993     VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
994 
995     dc->props = virtio_blk_properties;
996     set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
997     vdc->realize = virtio_blk_device_realize;
998     vdc->unrealize = virtio_blk_device_unrealize;
999     vdc->get_config = virtio_blk_update_config;
1000     vdc->set_config = virtio_blk_set_config;
1001     vdc->get_features = virtio_blk_get_features;
1002     vdc->set_status = virtio_blk_set_status;
1003     vdc->reset = virtio_blk_reset;
1004     vdc->save = virtio_blk_save_device;
1005     vdc->load = virtio_blk_load_device;
1006 }
1007 
1008 static const TypeInfo virtio_device_info = {
1009     .name = TYPE_VIRTIO_BLK,
1010     .parent = TYPE_VIRTIO_DEVICE,
1011     .instance_size = sizeof(VirtIOBlock),
1012     .instance_init = virtio_blk_instance_init,
1013     .class_init = virtio_blk_class_init,
1014 };
1015 
1016 static void virtio_register_types(void)
1017 {
1018     type_register_static(&virtio_device_info);
1019 }
1020 
1021 type_init(virtio_register_types)
1022