xref: /openbmc/qemu/block/io.c (revision fe1a9cbc)
1 /*
2  * Block layer I/O functions
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "block/throttle-groups.h"
31 #include "qemu/error-report.h"
32 
33 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
34 
35 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
36         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
37         BlockCompletionFunc *cb, void *opaque);
38 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
39         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
40         BlockCompletionFunc *cb, void *opaque);
41 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
42                                          int64_t sector_num, int nb_sectors,
43                                          QEMUIOVector *iov);
44 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
45                                          int64_t sector_num, int nb_sectors,
46                                          QEMUIOVector *iov);
47 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
48     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
49     BdrvRequestFlags flags);
50 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
51     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
52     BdrvRequestFlags flags);
53 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
54                                          int64_t sector_num,
55                                          QEMUIOVector *qiov,
56                                          int nb_sectors,
57                                          BdrvRequestFlags flags,
58                                          BlockCompletionFunc *cb,
59                                          void *opaque,
60                                          bool is_write);
61 static void coroutine_fn bdrv_co_do_rw(void *opaque);
62 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
63     int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
64 
65 /* throttling disk I/O limits */
66 void bdrv_set_io_limits(BlockDriverState *bs,
67                         ThrottleConfig *cfg)
68 {
69     int i;
70 
71     throttle_group_config(bs, cfg);
72 
73     for (i = 0; i < 2; i++) {
74         qemu_co_enter_next(&bs->throttled_reqs[i]);
75     }
76 }
77 
78 /* this function drain all the throttled IOs */
79 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
80 {
81     bool drained = false;
82     bool enabled = bs->io_limits_enabled;
83     int i;
84 
85     bs->io_limits_enabled = false;
86 
87     for (i = 0; i < 2; i++) {
88         while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
89             drained = true;
90         }
91     }
92 
93     bs->io_limits_enabled = enabled;
94 
95     return drained;
96 }
97 
98 void bdrv_io_limits_disable(BlockDriverState *bs)
99 {
100     bs->io_limits_enabled = false;
101     bdrv_start_throttled_reqs(bs);
102     throttle_group_unregister_bs(bs);
103 }
104 
105 /* should be called before bdrv_set_io_limits if a limit is set */
106 void bdrv_io_limits_enable(BlockDriverState *bs, const char *group)
107 {
108     assert(!bs->io_limits_enabled);
109     throttle_group_register_bs(bs, group);
110     bs->io_limits_enabled = true;
111 }
112 
113 void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group)
114 {
115     /* this bs is not part of any group */
116     if (!bs->throttle_state) {
117         return;
118     }
119 
120     /* this bs is a part of the same group than the one we want */
121     if (!g_strcmp0(throttle_group_get_name(bs), group)) {
122         return;
123     }
124 
125     /* need to change the group this bs belong to */
126     bdrv_io_limits_disable(bs);
127     bdrv_io_limits_enable(bs, group);
128 }
129 
130 void bdrv_setup_io_funcs(BlockDriver *bdrv)
131 {
132     /* Block drivers without coroutine functions need emulation */
133     if (!bdrv->bdrv_co_readv) {
134         bdrv->bdrv_co_readv = bdrv_co_readv_em;
135         bdrv->bdrv_co_writev = bdrv_co_writev_em;
136 
137         /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
138          * the block driver lacks aio we need to emulate that too.
139          */
140         if (!bdrv->bdrv_aio_readv) {
141             /* add AIO emulation layer */
142             bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
143             bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
144         }
145     }
146 }
147 
148 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
149 {
150     BlockDriver *drv = bs->drv;
151     Error *local_err = NULL;
152 
153     memset(&bs->bl, 0, sizeof(bs->bl));
154 
155     if (!drv) {
156         return;
157     }
158 
159     /* Take some limits from the children as a default */
160     if (bs->file) {
161         bdrv_refresh_limits(bs->file->bs, &local_err);
162         if (local_err) {
163             error_propagate(errp, local_err);
164             return;
165         }
166         bs->bl.opt_transfer_length = bs->file->bs->bl.opt_transfer_length;
167         bs->bl.max_transfer_length = bs->file->bs->bl.max_transfer_length;
168         bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment;
169         bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment;
170         bs->bl.max_iov = bs->file->bs->bl.max_iov;
171     } else {
172         bs->bl.min_mem_alignment = 512;
173         bs->bl.opt_mem_alignment = getpagesize();
174 
175         /* Safe default since most protocols use readv()/writev()/etc */
176         bs->bl.max_iov = IOV_MAX;
177     }
178 
179     if (bs->backing) {
180         bdrv_refresh_limits(bs->backing->bs, &local_err);
181         if (local_err) {
182             error_propagate(errp, local_err);
183             return;
184         }
185         bs->bl.opt_transfer_length =
186             MAX(bs->bl.opt_transfer_length,
187                 bs->backing->bs->bl.opt_transfer_length);
188         bs->bl.max_transfer_length =
189             MIN_NON_ZERO(bs->bl.max_transfer_length,
190                          bs->backing->bs->bl.max_transfer_length);
191         bs->bl.opt_mem_alignment =
192             MAX(bs->bl.opt_mem_alignment,
193                 bs->backing->bs->bl.opt_mem_alignment);
194         bs->bl.min_mem_alignment =
195             MAX(bs->bl.min_mem_alignment,
196                 bs->backing->bs->bl.min_mem_alignment);
197         bs->bl.max_iov =
198             MIN(bs->bl.max_iov,
199                 bs->backing->bs->bl.max_iov);
200     }
201 
202     /* Then let the driver override it */
203     if (drv->bdrv_refresh_limits) {
204         drv->bdrv_refresh_limits(bs, errp);
205     }
206 }
207 
208 /**
209  * The copy-on-read flag is actually a reference count so multiple users may
210  * use the feature without worrying about clobbering its previous state.
211  * Copy-on-read stays enabled until all users have called to disable it.
212  */
213 void bdrv_enable_copy_on_read(BlockDriverState *bs)
214 {
215     bs->copy_on_read++;
216 }
217 
218 void bdrv_disable_copy_on_read(BlockDriverState *bs)
219 {
220     assert(bs->copy_on_read > 0);
221     bs->copy_on_read--;
222 }
223 
224 /* Check if any requests are in-flight (including throttled requests) */
225 bool bdrv_requests_pending(BlockDriverState *bs)
226 {
227     BdrvChild *child;
228 
229     if (!QLIST_EMPTY(&bs->tracked_requests)) {
230         return true;
231     }
232     if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
233         return true;
234     }
235     if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
236         return true;
237     }
238 
239     QLIST_FOREACH(child, &bs->children, next) {
240         if (bdrv_requests_pending(child->bs)) {
241             return true;
242         }
243     }
244 
245     return false;
246 }
247 
248 static void bdrv_drain_recurse(BlockDriverState *bs)
249 {
250     BdrvChild *child;
251 
252     if (bs->drv && bs->drv->bdrv_drain) {
253         bs->drv->bdrv_drain(bs);
254     }
255     QLIST_FOREACH(child, &bs->children, next) {
256         bdrv_drain_recurse(child->bs);
257     }
258 }
259 
260 /*
261  * Wait for pending requests to complete on a single BlockDriverState subtree,
262  * and suspend block driver's internal I/O until next request arrives.
263  *
264  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
265  * AioContext.
266  *
267  * Only this BlockDriverState's AioContext is run, so in-flight requests must
268  * not depend on events in other AioContexts.  In that case, use
269  * bdrv_drain_all() instead.
270  */
271 void bdrv_drain(BlockDriverState *bs)
272 {
273     bool busy = true;
274 
275     bdrv_drain_recurse(bs);
276     while (busy) {
277         /* Keep iterating */
278          bdrv_flush_io_queue(bs);
279          busy = bdrv_requests_pending(bs);
280          busy |= aio_poll(bdrv_get_aio_context(bs), busy);
281     }
282 }
283 
284 /*
285  * Wait for pending requests to complete across all BlockDriverStates
286  *
287  * This function does not flush data to disk, use bdrv_flush_all() for that
288  * after calling this function.
289  */
290 void bdrv_drain_all(void)
291 {
292     /* Always run first iteration so any pending completion BHs run */
293     bool busy = true;
294     BlockDriverState *bs = NULL;
295     GSList *aio_ctxs = NULL, *ctx;
296 
297     while ((bs = bdrv_next(bs))) {
298         AioContext *aio_context = bdrv_get_aio_context(bs);
299 
300         aio_context_acquire(aio_context);
301         if (bs->job) {
302             block_job_pause(bs->job);
303         }
304         bdrv_drain_recurse(bs);
305         aio_context_release(aio_context);
306 
307         if (!g_slist_find(aio_ctxs, aio_context)) {
308             aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
309         }
310     }
311 
312     /* Note that completion of an asynchronous I/O operation can trigger any
313      * number of other I/O operations on other devices---for example a
314      * coroutine can submit an I/O request to another device in response to
315      * request completion.  Therefore we must keep looping until there was no
316      * more activity rather than simply draining each device independently.
317      */
318     while (busy) {
319         busy = false;
320 
321         for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
322             AioContext *aio_context = ctx->data;
323             bs = NULL;
324 
325             aio_context_acquire(aio_context);
326             while ((bs = bdrv_next(bs))) {
327                 if (aio_context == bdrv_get_aio_context(bs)) {
328                     bdrv_flush_io_queue(bs);
329                     if (bdrv_requests_pending(bs)) {
330                         busy = true;
331                         aio_poll(aio_context, busy);
332                     }
333                 }
334             }
335             busy |= aio_poll(aio_context, false);
336             aio_context_release(aio_context);
337         }
338     }
339 
340     bs = NULL;
341     while ((bs = bdrv_next(bs))) {
342         AioContext *aio_context = bdrv_get_aio_context(bs);
343 
344         aio_context_acquire(aio_context);
345         if (bs->job) {
346             block_job_resume(bs->job);
347         }
348         aio_context_release(aio_context);
349     }
350     g_slist_free(aio_ctxs);
351 }
352 
353 /**
354  * Remove an active request from the tracked requests list
355  *
356  * This function should be called when a tracked request is completing.
357  */
358 static void tracked_request_end(BdrvTrackedRequest *req)
359 {
360     if (req->serialising) {
361         req->bs->serialising_in_flight--;
362     }
363 
364     QLIST_REMOVE(req, list);
365     qemu_co_queue_restart_all(&req->wait_queue);
366 }
367 
368 /**
369  * Add an active request to the tracked requests list
370  */
371 static void tracked_request_begin(BdrvTrackedRequest *req,
372                                   BlockDriverState *bs,
373                                   int64_t offset,
374                                   unsigned int bytes,
375                                   enum BdrvTrackedRequestType type)
376 {
377     *req = (BdrvTrackedRequest){
378         .bs = bs,
379         .offset         = offset,
380         .bytes          = bytes,
381         .type           = type,
382         .co             = qemu_coroutine_self(),
383         .serialising    = false,
384         .overlap_offset = offset,
385         .overlap_bytes  = bytes,
386     };
387 
388     qemu_co_queue_init(&req->wait_queue);
389 
390     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
391 }
392 
393 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
394 {
395     int64_t overlap_offset = req->offset & ~(align - 1);
396     unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
397                                - overlap_offset;
398 
399     if (!req->serialising) {
400         req->bs->serialising_in_flight++;
401         req->serialising = true;
402     }
403 
404     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
405     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
406 }
407 
408 /**
409  * Round a region to cluster boundaries
410  */
411 void bdrv_round_to_clusters(BlockDriverState *bs,
412                             int64_t sector_num, int nb_sectors,
413                             int64_t *cluster_sector_num,
414                             int *cluster_nb_sectors)
415 {
416     BlockDriverInfo bdi;
417 
418     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
419         *cluster_sector_num = sector_num;
420         *cluster_nb_sectors = nb_sectors;
421     } else {
422         int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
423         *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
424         *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
425                                             nb_sectors, c);
426     }
427 }
428 
429 static int bdrv_get_cluster_size(BlockDriverState *bs)
430 {
431     BlockDriverInfo bdi;
432     int ret;
433 
434     ret = bdrv_get_info(bs, &bdi);
435     if (ret < 0 || bdi.cluster_size == 0) {
436         return bs->request_alignment;
437     } else {
438         return bdi.cluster_size;
439     }
440 }
441 
442 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
443                                      int64_t offset, unsigned int bytes)
444 {
445     /*        aaaa   bbbb */
446     if (offset >= req->overlap_offset + req->overlap_bytes) {
447         return false;
448     }
449     /* bbbb   aaaa        */
450     if (req->overlap_offset >= offset + bytes) {
451         return false;
452     }
453     return true;
454 }
455 
456 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
457 {
458     BlockDriverState *bs = self->bs;
459     BdrvTrackedRequest *req;
460     bool retry;
461     bool waited = false;
462 
463     if (!bs->serialising_in_flight) {
464         return false;
465     }
466 
467     do {
468         retry = false;
469         QLIST_FOREACH(req, &bs->tracked_requests, list) {
470             if (req == self || (!req->serialising && !self->serialising)) {
471                 continue;
472             }
473             if (tracked_request_overlaps(req, self->overlap_offset,
474                                          self->overlap_bytes))
475             {
476                 /* Hitting this means there was a reentrant request, for
477                  * example, a block driver issuing nested requests.  This must
478                  * never happen since it means deadlock.
479                  */
480                 assert(qemu_coroutine_self() != req->co);
481 
482                 /* If the request is already (indirectly) waiting for us, or
483                  * will wait for us as soon as it wakes up, then just go on
484                  * (instead of producing a deadlock in the former case). */
485                 if (!req->waiting_for) {
486                     self->waiting_for = req;
487                     qemu_co_queue_wait(&req->wait_queue);
488                     self->waiting_for = NULL;
489                     retry = true;
490                     waited = true;
491                     break;
492                 }
493             }
494         }
495     } while (retry);
496 
497     return waited;
498 }
499 
500 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
501                                    size_t size)
502 {
503     if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
504         return -EIO;
505     }
506 
507     if (!bdrv_is_inserted(bs)) {
508         return -ENOMEDIUM;
509     }
510 
511     if (offset < 0) {
512         return -EIO;
513     }
514 
515     return 0;
516 }
517 
518 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
519                               int nb_sectors)
520 {
521     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
522         return -EIO;
523     }
524 
525     return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
526                                    nb_sectors * BDRV_SECTOR_SIZE);
527 }
528 
529 typedef struct RwCo {
530     BlockDriverState *bs;
531     int64_t offset;
532     QEMUIOVector *qiov;
533     bool is_write;
534     int ret;
535     BdrvRequestFlags flags;
536 } RwCo;
537 
538 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
539 {
540     RwCo *rwco = opaque;
541 
542     if (!rwco->is_write) {
543         rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
544                                       rwco->qiov->size, rwco->qiov,
545                                       rwco->flags);
546     } else {
547         rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
548                                        rwco->qiov->size, rwco->qiov,
549                                        rwco->flags);
550     }
551 }
552 
553 /*
554  * Process a vectored synchronous request using coroutines
555  */
556 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
557                         QEMUIOVector *qiov, bool is_write,
558                         BdrvRequestFlags flags)
559 {
560     Coroutine *co;
561     RwCo rwco = {
562         .bs = bs,
563         .offset = offset,
564         .qiov = qiov,
565         .is_write = is_write,
566         .ret = NOT_DONE,
567         .flags = flags,
568     };
569 
570     /**
571      * In sync call context, when the vcpu is blocked, this throttling timer
572      * will not fire; so the I/O throttling function has to be disabled here
573      * if it has been enabled.
574      */
575     if (bs->io_limits_enabled) {
576         fprintf(stderr, "Disabling I/O throttling on '%s' due "
577                         "to synchronous I/O.\n", bdrv_get_device_name(bs));
578         bdrv_io_limits_disable(bs);
579     }
580 
581     if (qemu_in_coroutine()) {
582         /* Fast-path if already in coroutine context */
583         bdrv_rw_co_entry(&rwco);
584     } else {
585         AioContext *aio_context = bdrv_get_aio_context(bs);
586 
587         co = qemu_coroutine_create(bdrv_rw_co_entry);
588         qemu_coroutine_enter(co, &rwco);
589         while (rwco.ret == NOT_DONE) {
590             aio_poll(aio_context, true);
591         }
592     }
593     return rwco.ret;
594 }
595 
596 /*
597  * Process a synchronous request using coroutines
598  */
599 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
600                       int nb_sectors, bool is_write, BdrvRequestFlags flags)
601 {
602     QEMUIOVector qiov;
603     struct iovec iov = {
604         .iov_base = (void *)buf,
605         .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
606     };
607 
608     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
609         return -EINVAL;
610     }
611 
612     qemu_iovec_init_external(&qiov, &iov, 1);
613     return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
614                         &qiov, is_write, flags);
615 }
616 
617 /* return < 0 if error. See bdrv_write() for the return codes */
618 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
619               uint8_t *buf, int nb_sectors)
620 {
621     return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
622 }
623 
624 /* Just like bdrv_read(), but with I/O throttling temporarily disabled */
625 int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
626                           uint8_t *buf, int nb_sectors)
627 {
628     bool enabled;
629     int ret;
630 
631     enabled = bs->io_limits_enabled;
632     bs->io_limits_enabled = false;
633     ret = bdrv_read(bs, sector_num, buf, nb_sectors);
634     bs->io_limits_enabled = enabled;
635     return ret;
636 }
637 
638 /* Return < 0 if error. Important errors are:
639   -EIO         generic I/O error (may happen for all errors)
640   -ENOMEDIUM   No media inserted.
641   -EINVAL      Invalid sector number or nb_sectors
642   -EACCES      Trying to write a read-only device
643 */
644 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
645                const uint8_t *buf, int nb_sectors)
646 {
647     return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
648 }
649 
650 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
651                       int nb_sectors, BdrvRequestFlags flags)
652 {
653     return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
654                       BDRV_REQ_ZERO_WRITE | flags);
655 }
656 
657 /*
658  * Completely zero out a block device with the help of bdrv_write_zeroes.
659  * The operation is sped up by checking the block status and only writing
660  * zeroes to the device if they currently do not return zeroes. Optional
661  * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
662  *
663  * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
664  */
665 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
666 {
667     int64_t target_sectors, ret, nb_sectors, sector_num = 0;
668     BlockDriverState *file;
669     int n;
670 
671     target_sectors = bdrv_nb_sectors(bs);
672     if (target_sectors < 0) {
673         return target_sectors;
674     }
675 
676     for (;;) {
677         nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
678         if (nb_sectors <= 0) {
679             return 0;
680         }
681         ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
682         if (ret < 0) {
683             error_report("error getting block status at sector %" PRId64 ": %s",
684                          sector_num, strerror(-ret));
685             return ret;
686         }
687         if (ret & BDRV_BLOCK_ZERO) {
688             sector_num += n;
689             continue;
690         }
691         ret = bdrv_write_zeroes(bs, sector_num, n, flags);
692         if (ret < 0) {
693             error_report("error writing zeroes at sector %" PRId64 ": %s",
694                          sector_num, strerror(-ret));
695             return ret;
696         }
697         sector_num += n;
698     }
699 }
700 
701 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
702 {
703     QEMUIOVector qiov;
704     struct iovec iov = {
705         .iov_base = (void *)buf,
706         .iov_len = bytes,
707     };
708     int ret;
709 
710     if (bytes < 0) {
711         return -EINVAL;
712     }
713 
714     qemu_iovec_init_external(&qiov, &iov, 1);
715     ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
716     if (ret < 0) {
717         return ret;
718     }
719 
720     return bytes;
721 }
722 
723 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
724 {
725     int ret;
726 
727     ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
728     if (ret < 0) {
729         return ret;
730     }
731 
732     return qiov->size;
733 }
734 
735 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
736                 const void *buf, int bytes)
737 {
738     QEMUIOVector qiov;
739     struct iovec iov = {
740         .iov_base   = (void *) buf,
741         .iov_len    = bytes,
742     };
743 
744     if (bytes < 0) {
745         return -EINVAL;
746     }
747 
748     qemu_iovec_init_external(&qiov, &iov, 1);
749     return bdrv_pwritev(bs, offset, &qiov);
750 }
751 
752 /*
753  * Writes to the file and ensures that no writes are reordered across this
754  * request (acts as a barrier)
755  *
756  * Returns 0 on success, -errno in error cases.
757  */
758 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
759     const void *buf, int count)
760 {
761     int ret;
762 
763     ret = bdrv_pwrite(bs, offset, buf, count);
764     if (ret < 0) {
765         return ret;
766     }
767 
768     /* No flush needed for cache modes that already do it */
769     if (bs->enable_write_cache) {
770         bdrv_flush(bs);
771     }
772 
773     return 0;
774 }
775 
776 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
777         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
778 {
779     /* Perform I/O through a temporary buffer so that users who scribble over
780      * their read buffer while the operation is in progress do not end up
781      * modifying the image file.  This is critical for zero-copy guest I/O
782      * where anything might happen inside guest memory.
783      */
784     void *bounce_buffer;
785 
786     BlockDriver *drv = bs->drv;
787     struct iovec iov;
788     QEMUIOVector bounce_qiov;
789     int64_t cluster_sector_num;
790     int cluster_nb_sectors;
791     size_t skip_bytes;
792     int ret;
793 
794     /* Cover entire cluster so no additional backing file I/O is required when
795      * allocating cluster in the image file.
796      */
797     bdrv_round_to_clusters(bs, sector_num, nb_sectors,
798                            &cluster_sector_num, &cluster_nb_sectors);
799 
800     trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
801                                    cluster_sector_num, cluster_nb_sectors);
802 
803     iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
804     iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
805     if (bounce_buffer == NULL) {
806         ret = -ENOMEM;
807         goto err;
808     }
809 
810     qemu_iovec_init_external(&bounce_qiov, &iov, 1);
811 
812     ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
813                              &bounce_qiov);
814     if (ret < 0) {
815         goto err;
816     }
817 
818     if (drv->bdrv_co_write_zeroes &&
819         buffer_is_zero(bounce_buffer, iov.iov_len)) {
820         ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
821                                       cluster_nb_sectors, 0);
822     } else {
823         /* This does not change the data on the disk, it is not necessary
824          * to flush even in cache=writethrough mode.
825          */
826         ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
827                                   &bounce_qiov);
828     }
829 
830     if (ret < 0) {
831         /* It might be okay to ignore write errors for guest requests.  If this
832          * is a deliberate copy-on-read then we don't want to ignore the error.
833          * Simply report it in all cases.
834          */
835         goto err;
836     }
837 
838     skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
839     qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
840                         nb_sectors * BDRV_SECTOR_SIZE);
841 
842 err:
843     qemu_vfree(bounce_buffer);
844     return ret;
845 }
846 
847 /*
848  * Forwards an already correctly aligned request to the BlockDriver. This
849  * handles copy on read and zeroing after EOF; any other features must be
850  * implemented by the caller.
851  */
852 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
853     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
854     int64_t align, QEMUIOVector *qiov, int flags)
855 {
856     BlockDriver *drv = bs->drv;
857     int ret;
858 
859     int64_t sector_num = offset >> BDRV_SECTOR_BITS;
860     unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
861 
862     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
863     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
864     assert(!qiov || bytes == qiov->size);
865 
866     /* Handle Copy on Read and associated serialisation */
867     if (flags & BDRV_REQ_COPY_ON_READ) {
868         /* If we touch the same cluster it counts as an overlap.  This
869          * guarantees that allocating writes will be serialized and not race
870          * with each other for the same cluster.  For example, in copy-on-read
871          * it ensures that the CoR read and write operations are atomic and
872          * guest writes cannot interleave between them. */
873         mark_request_serialising(req, bdrv_get_cluster_size(bs));
874     }
875 
876     if (!(flags & BDRV_REQ_NO_SERIALISING)) {
877         wait_serialising_requests(req);
878     }
879 
880     if (flags & BDRV_REQ_COPY_ON_READ) {
881         int pnum;
882 
883         ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
884         if (ret < 0) {
885             goto out;
886         }
887 
888         if (!ret || pnum != nb_sectors) {
889             ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
890             goto out;
891         }
892     }
893 
894     /* Forward the request to the BlockDriver */
895     if (!bs->zero_beyond_eof) {
896         ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
897     } else {
898         /* Read zeros after EOF */
899         int64_t total_sectors, max_nb_sectors;
900 
901         total_sectors = bdrv_nb_sectors(bs);
902         if (total_sectors < 0) {
903             ret = total_sectors;
904             goto out;
905         }
906 
907         max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
908                                   align >> BDRV_SECTOR_BITS);
909         if (nb_sectors < max_nb_sectors) {
910             ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
911         } else if (max_nb_sectors > 0) {
912             QEMUIOVector local_qiov;
913 
914             qemu_iovec_init(&local_qiov, qiov->niov);
915             qemu_iovec_concat(&local_qiov, qiov, 0,
916                               max_nb_sectors * BDRV_SECTOR_SIZE);
917 
918             ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors,
919                                      &local_qiov);
920 
921             qemu_iovec_destroy(&local_qiov);
922         } else {
923             ret = 0;
924         }
925 
926         /* Reading beyond end of file is supposed to produce zeroes */
927         if (ret == 0 && total_sectors < sector_num + nb_sectors) {
928             uint64_t offset = MAX(0, total_sectors - sector_num);
929             uint64_t bytes = (sector_num + nb_sectors - offset) *
930                               BDRV_SECTOR_SIZE;
931             qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
932         }
933     }
934 
935 out:
936     return ret;
937 }
938 
939 /*
940  * Handle a read request in coroutine context
941  */
942 static int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
943     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
944     BdrvRequestFlags flags)
945 {
946     BlockDriver *drv = bs->drv;
947     BdrvTrackedRequest req;
948 
949     /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
950     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
951     uint8_t *head_buf = NULL;
952     uint8_t *tail_buf = NULL;
953     QEMUIOVector local_qiov;
954     bool use_local_qiov = false;
955     int ret;
956 
957     if (!drv) {
958         return -ENOMEDIUM;
959     }
960 
961     ret = bdrv_check_byte_request(bs, offset, bytes);
962     if (ret < 0) {
963         return ret;
964     }
965 
966     /* Don't do copy-on-read if we read data before write operation */
967     if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
968         flags |= BDRV_REQ_COPY_ON_READ;
969     }
970 
971     /* throttling disk I/O */
972     if (bs->io_limits_enabled) {
973         throttle_group_co_io_limits_intercept(bs, bytes, false);
974     }
975 
976     /* Align read if necessary by padding qiov */
977     if (offset & (align - 1)) {
978         head_buf = qemu_blockalign(bs, align);
979         qemu_iovec_init(&local_qiov, qiov->niov + 2);
980         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
981         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
982         use_local_qiov = true;
983 
984         bytes += offset & (align - 1);
985         offset = offset & ~(align - 1);
986     }
987 
988     if ((offset + bytes) & (align - 1)) {
989         if (!use_local_qiov) {
990             qemu_iovec_init(&local_qiov, qiov->niov + 1);
991             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
992             use_local_qiov = true;
993         }
994         tail_buf = qemu_blockalign(bs, align);
995         qemu_iovec_add(&local_qiov, tail_buf,
996                        align - ((offset + bytes) & (align - 1)));
997 
998         bytes = ROUND_UP(bytes, align);
999     }
1000 
1001     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1002     ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
1003                               use_local_qiov ? &local_qiov : qiov,
1004                               flags);
1005     tracked_request_end(&req);
1006 
1007     if (use_local_qiov) {
1008         qemu_iovec_destroy(&local_qiov);
1009         qemu_vfree(head_buf);
1010         qemu_vfree(tail_buf);
1011     }
1012 
1013     return ret;
1014 }
1015 
1016 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
1017     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1018     BdrvRequestFlags flags)
1019 {
1020     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1021         return -EINVAL;
1022     }
1023 
1024     return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
1025                              nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1026 }
1027 
1028 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
1029     int nb_sectors, QEMUIOVector *qiov)
1030 {
1031     trace_bdrv_co_readv(bs, sector_num, nb_sectors);
1032 
1033     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
1034 }
1035 
1036 int coroutine_fn bdrv_co_readv_no_serialising(BlockDriverState *bs,
1037     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1038 {
1039     trace_bdrv_co_readv_no_serialising(bs, sector_num, nb_sectors);
1040 
1041     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1042                             BDRV_REQ_NO_SERIALISING);
1043 }
1044 
1045 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
1046     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1047 {
1048     trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
1049 
1050     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1051                             BDRV_REQ_COPY_ON_READ);
1052 }
1053 
1054 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1055 
1056 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
1057     int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
1058 {
1059     BlockDriver *drv = bs->drv;
1060     QEMUIOVector qiov;
1061     struct iovec iov = {0};
1062     int ret = 0;
1063 
1064     int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
1065                                         BDRV_REQUEST_MAX_SECTORS);
1066 
1067     while (nb_sectors > 0 && !ret) {
1068         int num = nb_sectors;
1069 
1070         /* Align request.  Block drivers can expect the "bulk" of the request
1071          * to be aligned.
1072          */
1073         if (bs->bl.write_zeroes_alignment
1074             && num > bs->bl.write_zeroes_alignment) {
1075             if (sector_num % bs->bl.write_zeroes_alignment != 0) {
1076                 /* Make a small request up to the first aligned sector.  */
1077                 num = bs->bl.write_zeroes_alignment;
1078                 num -= sector_num % bs->bl.write_zeroes_alignment;
1079             } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
1080                 /* Shorten the request to the last aligned sector.  num cannot
1081                  * underflow because num > bs->bl.write_zeroes_alignment.
1082                  */
1083                 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
1084             }
1085         }
1086 
1087         /* limit request size */
1088         if (num > max_write_zeroes) {
1089             num = max_write_zeroes;
1090         }
1091 
1092         ret = -ENOTSUP;
1093         /* First try the efficient write zeroes operation */
1094         if (drv->bdrv_co_write_zeroes) {
1095             ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
1096         }
1097 
1098         if (ret == -ENOTSUP) {
1099             /* Fall back to bounce buffer if write zeroes is unsupported */
1100             int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
1101                                             MAX_WRITE_ZEROES_BOUNCE_BUFFER);
1102             num = MIN(num, max_xfer_len);
1103             iov.iov_len = num * BDRV_SECTOR_SIZE;
1104             if (iov.iov_base == NULL) {
1105                 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
1106                 if (iov.iov_base == NULL) {
1107                     ret = -ENOMEM;
1108                     goto fail;
1109                 }
1110                 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
1111             }
1112             qemu_iovec_init_external(&qiov, &iov, 1);
1113 
1114             ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
1115 
1116             /* Keep bounce buffer around if it is big enough for all
1117              * all future requests.
1118              */
1119             if (num < max_xfer_len) {
1120                 qemu_vfree(iov.iov_base);
1121                 iov.iov_base = NULL;
1122             }
1123         }
1124 
1125         sector_num += num;
1126         nb_sectors -= num;
1127     }
1128 
1129 fail:
1130     qemu_vfree(iov.iov_base);
1131     return ret;
1132 }
1133 
1134 /*
1135  * Forwards an already correctly aligned write request to the BlockDriver.
1136  */
1137 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
1138     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1139     QEMUIOVector *qiov, int flags)
1140 {
1141     BlockDriver *drv = bs->drv;
1142     bool waited;
1143     int ret;
1144 
1145     int64_t sector_num = offset >> BDRV_SECTOR_BITS;
1146     unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
1147 
1148     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1149     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1150     assert(!qiov || bytes == qiov->size);
1151 
1152     waited = wait_serialising_requests(req);
1153     assert(!waited || !req->serialising);
1154     assert(req->overlap_offset <= offset);
1155     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1156 
1157     ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1158 
1159     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1160         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
1161         qemu_iovec_is_zero(qiov)) {
1162         flags |= BDRV_REQ_ZERO_WRITE;
1163         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1164             flags |= BDRV_REQ_MAY_UNMAP;
1165         }
1166     }
1167 
1168     if (ret < 0) {
1169         /* Do nothing, write notifier decided to fail this request */
1170     } else if (flags & BDRV_REQ_ZERO_WRITE) {
1171         bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1172         ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
1173     } else {
1174         bdrv_debug_event(bs, BLKDBG_PWRITEV);
1175         ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
1176     }
1177     bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1178 
1179     if (ret == 0 && !bs->enable_write_cache) {
1180         ret = bdrv_co_flush(bs);
1181     }
1182 
1183     bdrv_set_dirty(bs, sector_num, nb_sectors);
1184 
1185     if (bs->wr_highest_offset < offset + bytes) {
1186         bs->wr_highest_offset = offset + bytes;
1187     }
1188 
1189     if (ret >= 0) {
1190         bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
1191     }
1192 
1193     return ret;
1194 }
1195 
1196 static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
1197                                                 int64_t offset,
1198                                                 unsigned int bytes,
1199                                                 BdrvRequestFlags flags,
1200                                                 BdrvTrackedRequest *req)
1201 {
1202     uint8_t *buf = NULL;
1203     QEMUIOVector local_qiov;
1204     struct iovec iov;
1205     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1206     unsigned int head_padding_bytes, tail_padding_bytes;
1207     int ret = 0;
1208 
1209     head_padding_bytes = offset & (align - 1);
1210     tail_padding_bytes = align - ((offset + bytes) & (align - 1));
1211 
1212 
1213     assert(flags & BDRV_REQ_ZERO_WRITE);
1214     if (head_padding_bytes || tail_padding_bytes) {
1215         buf = qemu_blockalign(bs, align);
1216         iov = (struct iovec) {
1217             .iov_base   = buf,
1218             .iov_len    = align,
1219         };
1220         qemu_iovec_init_external(&local_qiov, &iov, 1);
1221     }
1222     if (head_padding_bytes) {
1223         uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1224 
1225         /* RMW the unaligned part before head. */
1226         mark_request_serialising(req, align);
1227         wait_serialising_requests(req);
1228         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1229         ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align,
1230                                   align, &local_qiov, 0);
1231         if (ret < 0) {
1232             goto fail;
1233         }
1234         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1235 
1236         memset(buf + head_padding_bytes, 0, zero_bytes);
1237         ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
1238                                    &local_qiov,
1239                                    flags & ~BDRV_REQ_ZERO_WRITE);
1240         if (ret < 0) {
1241             goto fail;
1242         }
1243         offset += zero_bytes;
1244         bytes -= zero_bytes;
1245     }
1246 
1247     assert(!bytes || (offset & (align - 1)) == 0);
1248     if (bytes >= align) {
1249         /* Write the aligned part in the middle. */
1250         uint64_t aligned_bytes = bytes & ~(align - 1);
1251         ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes,
1252                                    NULL, flags);
1253         if (ret < 0) {
1254             goto fail;
1255         }
1256         bytes -= aligned_bytes;
1257         offset += aligned_bytes;
1258     }
1259 
1260     assert(!bytes || (offset & (align - 1)) == 0);
1261     if (bytes) {
1262         assert(align == tail_padding_bytes + bytes);
1263         /* RMW the unaligned part after tail. */
1264         mark_request_serialising(req, align);
1265         wait_serialising_requests(req);
1266         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1267         ret = bdrv_aligned_preadv(bs, req, offset, align,
1268                                   align, &local_qiov, 0);
1269         if (ret < 0) {
1270             goto fail;
1271         }
1272         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1273 
1274         memset(buf, 0, bytes);
1275         ret = bdrv_aligned_pwritev(bs, req, offset, align,
1276                                    &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1277     }
1278 fail:
1279     qemu_vfree(buf);
1280     return ret;
1281 
1282 }
1283 
1284 /*
1285  * Handle a write request in coroutine context
1286  */
1287 static int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
1288     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1289     BdrvRequestFlags flags)
1290 {
1291     BdrvTrackedRequest req;
1292     /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1293     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1294     uint8_t *head_buf = NULL;
1295     uint8_t *tail_buf = NULL;
1296     QEMUIOVector local_qiov;
1297     bool use_local_qiov = false;
1298     int ret;
1299 
1300     if (!bs->drv) {
1301         return -ENOMEDIUM;
1302     }
1303     if (bs->read_only) {
1304         return -EPERM;
1305     }
1306     assert(!(bs->open_flags & BDRV_O_INACTIVE));
1307 
1308     ret = bdrv_check_byte_request(bs, offset, bytes);
1309     if (ret < 0) {
1310         return ret;
1311     }
1312 
1313     /* throttling disk I/O */
1314     if (bs->io_limits_enabled) {
1315         throttle_group_co_io_limits_intercept(bs, bytes, true);
1316     }
1317 
1318     /*
1319      * Align write if necessary by performing a read-modify-write cycle.
1320      * Pad qiov with the read parts and be sure to have a tracked request not
1321      * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1322      */
1323     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1324 
1325     if (!qiov) {
1326         ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req);
1327         goto out;
1328     }
1329 
1330     if (offset & (align - 1)) {
1331         QEMUIOVector head_qiov;
1332         struct iovec head_iov;
1333 
1334         mark_request_serialising(&req, align);
1335         wait_serialising_requests(&req);
1336 
1337         head_buf = qemu_blockalign(bs, align);
1338         head_iov = (struct iovec) {
1339             .iov_base   = head_buf,
1340             .iov_len    = align,
1341         };
1342         qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1343 
1344         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1345         ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
1346                                   align, &head_qiov, 0);
1347         if (ret < 0) {
1348             goto fail;
1349         }
1350         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1351 
1352         qemu_iovec_init(&local_qiov, qiov->niov + 2);
1353         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1354         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1355         use_local_qiov = true;
1356 
1357         bytes += offset & (align - 1);
1358         offset = offset & ~(align - 1);
1359     }
1360 
1361     if ((offset + bytes) & (align - 1)) {
1362         QEMUIOVector tail_qiov;
1363         struct iovec tail_iov;
1364         size_t tail_bytes;
1365         bool waited;
1366 
1367         mark_request_serialising(&req, align);
1368         waited = wait_serialising_requests(&req);
1369         assert(!waited || !use_local_qiov);
1370 
1371         tail_buf = qemu_blockalign(bs, align);
1372         tail_iov = (struct iovec) {
1373             .iov_base   = tail_buf,
1374             .iov_len    = align,
1375         };
1376         qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1377 
1378         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1379         ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
1380                                   align, &tail_qiov, 0);
1381         if (ret < 0) {
1382             goto fail;
1383         }
1384         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1385 
1386         if (!use_local_qiov) {
1387             qemu_iovec_init(&local_qiov, qiov->niov + 1);
1388             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1389             use_local_qiov = true;
1390         }
1391 
1392         tail_bytes = (offset + bytes) & (align - 1);
1393         qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1394 
1395         bytes = ROUND_UP(bytes, align);
1396     }
1397 
1398     ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
1399                                use_local_qiov ? &local_qiov : qiov,
1400                                flags);
1401 
1402 fail:
1403 
1404     if (use_local_qiov) {
1405         qemu_iovec_destroy(&local_qiov);
1406     }
1407     qemu_vfree(head_buf);
1408     qemu_vfree(tail_buf);
1409 out:
1410     tracked_request_end(&req);
1411     return ret;
1412 }
1413 
1414 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
1415     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1416     BdrvRequestFlags flags)
1417 {
1418     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1419         return -EINVAL;
1420     }
1421 
1422     return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
1423                               nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1424 }
1425 
1426 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
1427     int nb_sectors, QEMUIOVector *qiov)
1428 {
1429     trace_bdrv_co_writev(bs, sector_num, nb_sectors);
1430 
1431     return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
1432 }
1433 
1434 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
1435                                       int64_t sector_num, int nb_sectors,
1436                                       BdrvRequestFlags flags)
1437 {
1438     trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
1439 
1440     if (!(bs->open_flags & BDRV_O_UNMAP)) {
1441         flags &= ~BDRV_REQ_MAY_UNMAP;
1442     }
1443 
1444     return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
1445                              BDRV_REQ_ZERO_WRITE | flags);
1446 }
1447 
1448 typedef struct BdrvCoGetBlockStatusData {
1449     BlockDriverState *bs;
1450     BlockDriverState *base;
1451     BlockDriverState **file;
1452     int64_t sector_num;
1453     int nb_sectors;
1454     int *pnum;
1455     int64_t ret;
1456     bool done;
1457 } BdrvCoGetBlockStatusData;
1458 
1459 /*
1460  * Returns the allocation status of the specified sectors.
1461  * Drivers not implementing the functionality are assumed to not support
1462  * backing files, hence all their sectors are reported as allocated.
1463  *
1464  * If 'sector_num' is beyond the end of the disk image the return value is 0
1465  * and 'pnum' is set to 0.
1466  *
1467  * 'pnum' is set to the number of sectors (including and immediately following
1468  * the specified sector) that are known to be in the same
1469  * allocated/unallocated state.
1470  *
1471  * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
1472  * beyond the end of the disk image it will be clamped.
1473  *
1474  * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1475  * points to the BDS which the sector range is allocated in.
1476  */
1477 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1478                                                      int64_t sector_num,
1479                                                      int nb_sectors, int *pnum,
1480                                                      BlockDriverState **file)
1481 {
1482     int64_t total_sectors;
1483     int64_t n;
1484     int64_t ret, ret2;
1485 
1486     total_sectors = bdrv_nb_sectors(bs);
1487     if (total_sectors < 0) {
1488         return total_sectors;
1489     }
1490 
1491     if (sector_num >= total_sectors) {
1492         *pnum = 0;
1493         return 0;
1494     }
1495 
1496     n = total_sectors - sector_num;
1497     if (n < nb_sectors) {
1498         nb_sectors = n;
1499     }
1500 
1501     if (!bs->drv->bdrv_co_get_block_status) {
1502         *pnum = nb_sectors;
1503         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1504         if (bs->drv->protocol_name) {
1505             ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1506         }
1507         return ret;
1508     }
1509 
1510     *file = NULL;
1511     ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1512                                             file);
1513     if (ret < 0) {
1514         *pnum = 0;
1515         return ret;
1516     }
1517 
1518     if (ret & BDRV_BLOCK_RAW) {
1519         assert(ret & BDRV_BLOCK_OFFSET_VALID);
1520         return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
1521                                      *pnum, pnum, file);
1522     }
1523 
1524     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1525         ret |= BDRV_BLOCK_ALLOCATED;
1526     } else {
1527         if (bdrv_unallocated_blocks_are_zero(bs)) {
1528             ret |= BDRV_BLOCK_ZERO;
1529         } else if (bs->backing) {
1530             BlockDriverState *bs2 = bs->backing->bs;
1531             int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1532             if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1533                 ret |= BDRV_BLOCK_ZERO;
1534             }
1535         }
1536     }
1537 
1538     if (*file && *file != bs &&
1539         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1540         (ret & BDRV_BLOCK_OFFSET_VALID)) {
1541         BlockDriverState *file2;
1542         int file_pnum;
1543 
1544         ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
1545                                         *pnum, &file_pnum, &file2);
1546         if (ret2 >= 0) {
1547             /* Ignore errors.  This is just providing extra information, it
1548              * is useful but not necessary.
1549              */
1550             if (!file_pnum) {
1551                 /* !file_pnum indicates an offset at or beyond the EOF; it is
1552                  * perfectly valid for the format block driver to point to such
1553                  * offsets, so catch it and mark everything as zero */
1554                 ret |= BDRV_BLOCK_ZERO;
1555             } else {
1556                 /* Limit request to the range reported by the protocol driver */
1557                 *pnum = file_pnum;
1558                 ret |= (ret2 & BDRV_BLOCK_ZERO);
1559             }
1560         }
1561     }
1562 
1563     return ret;
1564 }
1565 
1566 static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1567         BlockDriverState *base,
1568         int64_t sector_num,
1569         int nb_sectors,
1570         int *pnum,
1571         BlockDriverState **file)
1572 {
1573     BlockDriverState *p;
1574     int64_t ret = 0;
1575 
1576     assert(bs != base);
1577     for (p = bs; p != base; p = backing_bs(p)) {
1578         ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
1579         if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
1580             break;
1581         }
1582         /* [sector_num, pnum] unallocated on this layer, which could be only
1583          * the first part of [sector_num, nb_sectors].  */
1584         nb_sectors = MIN(nb_sectors, *pnum);
1585     }
1586     return ret;
1587 }
1588 
1589 /* Coroutine wrapper for bdrv_get_block_status_above() */
1590 static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
1591 {
1592     BdrvCoGetBlockStatusData *data = opaque;
1593 
1594     data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1595                                                data->sector_num,
1596                                                data->nb_sectors,
1597                                                data->pnum,
1598                                                data->file);
1599     data->done = true;
1600 }
1601 
1602 /*
1603  * Synchronous wrapper around bdrv_co_get_block_status_above().
1604  *
1605  * See bdrv_co_get_block_status_above() for details.
1606  */
1607 int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1608                                     BlockDriverState *base,
1609                                     int64_t sector_num,
1610                                     int nb_sectors, int *pnum,
1611                                     BlockDriverState **file)
1612 {
1613     Coroutine *co;
1614     BdrvCoGetBlockStatusData data = {
1615         .bs = bs,
1616         .base = base,
1617         .file = file,
1618         .sector_num = sector_num,
1619         .nb_sectors = nb_sectors,
1620         .pnum = pnum,
1621         .done = false,
1622     };
1623 
1624     if (qemu_in_coroutine()) {
1625         /* Fast-path if already in coroutine context */
1626         bdrv_get_block_status_above_co_entry(&data);
1627     } else {
1628         AioContext *aio_context = bdrv_get_aio_context(bs);
1629 
1630         co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry);
1631         qemu_coroutine_enter(co, &data);
1632         while (!data.done) {
1633             aio_poll(aio_context, true);
1634         }
1635     }
1636     return data.ret;
1637 }
1638 
1639 int64_t bdrv_get_block_status(BlockDriverState *bs,
1640                               int64_t sector_num,
1641                               int nb_sectors, int *pnum,
1642                               BlockDriverState **file)
1643 {
1644     return bdrv_get_block_status_above(bs, backing_bs(bs),
1645                                        sector_num, nb_sectors, pnum, file);
1646 }
1647 
1648 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1649                                    int nb_sectors, int *pnum)
1650 {
1651     BlockDriverState *file;
1652     int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
1653                                         &file);
1654     if (ret < 0) {
1655         return ret;
1656     }
1657     return !!(ret & BDRV_BLOCK_ALLOCATED);
1658 }
1659 
1660 /*
1661  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1662  *
1663  * Return true if the given sector is allocated in any image between
1664  * BASE and TOP (inclusive).  BASE can be NULL to check if the given
1665  * sector is allocated in any image of the chain.  Return false otherwise.
1666  *
1667  * 'pnum' is set to the number of sectors (including and immediately following
1668  *  the specified sector) that are known to be in the same
1669  *  allocated/unallocated state.
1670  *
1671  */
1672 int bdrv_is_allocated_above(BlockDriverState *top,
1673                             BlockDriverState *base,
1674                             int64_t sector_num,
1675                             int nb_sectors, int *pnum)
1676 {
1677     BlockDriverState *intermediate;
1678     int ret, n = nb_sectors;
1679 
1680     intermediate = top;
1681     while (intermediate && intermediate != base) {
1682         int pnum_inter;
1683         ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1684                                 &pnum_inter);
1685         if (ret < 0) {
1686             return ret;
1687         } else if (ret) {
1688             *pnum = pnum_inter;
1689             return 1;
1690         }
1691 
1692         /*
1693          * [sector_num, nb_sectors] is unallocated on top but intermediate
1694          * might have
1695          *
1696          * [sector_num+x, nr_sectors] allocated.
1697          */
1698         if (n > pnum_inter &&
1699             (intermediate == top ||
1700              sector_num + pnum_inter < intermediate->total_sectors)) {
1701             n = pnum_inter;
1702         }
1703 
1704         intermediate = backing_bs(intermediate);
1705     }
1706 
1707     *pnum = n;
1708     return 0;
1709 }
1710 
1711 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
1712                           const uint8_t *buf, int nb_sectors)
1713 {
1714     BlockDriver *drv = bs->drv;
1715     int ret;
1716 
1717     if (!drv) {
1718         return -ENOMEDIUM;
1719     }
1720     if (!drv->bdrv_write_compressed) {
1721         return -ENOTSUP;
1722     }
1723     ret = bdrv_check_request(bs, sector_num, nb_sectors);
1724     if (ret < 0) {
1725         return ret;
1726     }
1727 
1728     assert(QLIST_EMPTY(&bs->dirty_bitmaps));
1729 
1730     return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
1731 }
1732 
1733 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1734                       int64_t pos, int size)
1735 {
1736     QEMUIOVector qiov;
1737     struct iovec iov = {
1738         .iov_base   = (void *) buf,
1739         .iov_len    = size,
1740     };
1741 
1742     qemu_iovec_init_external(&qiov, &iov, 1);
1743     return bdrv_writev_vmstate(bs, &qiov, pos);
1744 }
1745 
1746 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1747 {
1748     BlockDriver *drv = bs->drv;
1749 
1750     if (!drv) {
1751         return -ENOMEDIUM;
1752     } else if (drv->bdrv_save_vmstate) {
1753         return drv->bdrv_save_vmstate(bs, qiov, pos);
1754     } else if (bs->file) {
1755         return bdrv_writev_vmstate(bs->file->bs, qiov, pos);
1756     }
1757 
1758     return -ENOTSUP;
1759 }
1760 
1761 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1762                       int64_t pos, int size)
1763 {
1764     BlockDriver *drv = bs->drv;
1765     if (!drv)
1766         return -ENOMEDIUM;
1767     if (drv->bdrv_load_vmstate)
1768         return drv->bdrv_load_vmstate(bs, buf, pos, size);
1769     if (bs->file)
1770         return bdrv_load_vmstate(bs->file->bs, buf, pos, size);
1771     return -ENOTSUP;
1772 }
1773 
1774 /**************************************************************/
1775 /* async I/Os */
1776 
1777 BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
1778                            QEMUIOVector *qiov, int nb_sectors,
1779                            BlockCompletionFunc *cb, void *opaque)
1780 {
1781     trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
1782 
1783     return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1784                                  cb, opaque, false);
1785 }
1786 
1787 BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
1788                             QEMUIOVector *qiov, int nb_sectors,
1789                             BlockCompletionFunc *cb, void *opaque)
1790 {
1791     trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
1792 
1793     return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1794                                  cb, opaque, true);
1795 }
1796 
1797 BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
1798         int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
1799         BlockCompletionFunc *cb, void *opaque)
1800 {
1801     trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
1802 
1803     return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
1804                                  BDRV_REQ_ZERO_WRITE | flags,
1805                                  cb, opaque, true);
1806 }
1807 
1808 
1809 typedef struct MultiwriteCB {
1810     int error;
1811     int num_requests;
1812     int num_callbacks;
1813     struct {
1814         BlockCompletionFunc *cb;
1815         void *opaque;
1816         QEMUIOVector *free_qiov;
1817     } callbacks[];
1818 } MultiwriteCB;
1819 
1820 static void multiwrite_user_cb(MultiwriteCB *mcb)
1821 {
1822     int i;
1823 
1824     for (i = 0; i < mcb->num_callbacks; i++) {
1825         mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
1826         if (mcb->callbacks[i].free_qiov) {
1827             qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
1828         }
1829         g_free(mcb->callbacks[i].free_qiov);
1830     }
1831 }
1832 
1833 static void multiwrite_cb(void *opaque, int ret)
1834 {
1835     MultiwriteCB *mcb = opaque;
1836 
1837     trace_multiwrite_cb(mcb, ret);
1838 
1839     if (ret < 0 && !mcb->error) {
1840         mcb->error = ret;
1841     }
1842 
1843     mcb->num_requests--;
1844     if (mcb->num_requests == 0) {
1845         multiwrite_user_cb(mcb);
1846         g_free(mcb);
1847     }
1848 }
1849 
1850 static int multiwrite_req_compare(const void *a, const void *b)
1851 {
1852     const BlockRequest *req1 = a, *req2 = b;
1853 
1854     /*
1855      * Note that we can't simply subtract req2->sector from req1->sector
1856      * here as that could overflow the return value.
1857      */
1858     if (req1->sector > req2->sector) {
1859         return 1;
1860     } else if (req1->sector < req2->sector) {
1861         return -1;
1862     } else {
1863         return 0;
1864     }
1865 }
1866 
1867 /*
1868  * Takes a bunch of requests and tries to merge them. Returns the number of
1869  * requests that remain after merging.
1870  */
1871 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
1872     int num_reqs, MultiwriteCB *mcb)
1873 {
1874     int i, outidx;
1875 
1876     // Sort requests by start sector
1877     qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
1878 
1879     // Check if adjacent requests touch the same clusters. If so, combine them,
1880     // filling up gaps with zero sectors.
1881     outidx = 0;
1882     for (i = 1; i < num_reqs; i++) {
1883         int merge = 0;
1884         int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
1885 
1886         // Handle exactly sequential writes and overlapping writes.
1887         if (reqs[i].sector <= oldreq_last) {
1888             merge = 1;
1889         }
1890 
1891         if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 >
1892             bs->bl.max_iov) {
1893             merge = 0;
1894         }
1895 
1896         if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors +
1897             reqs[i].nb_sectors > bs->bl.max_transfer_length) {
1898             merge = 0;
1899         }
1900 
1901         if (merge) {
1902             size_t size;
1903             QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
1904             qemu_iovec_init(qiov,
1905                 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
1906 
1907             // Add the first request to the merged one. If the requests are
1908             // overlapping, drop the last sectors of the first request.
1909             size = (reqs[i].sector - reqs[outidx].sector) << 9;
1910             qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
1911 
1912             // We should need to add any zeros between the two requests
1913             assert (reqs[i].sector <= oldreq_last);
1914 
1915             // Add the second request
1916             qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
1917 
1918             // Add tail of first request, if necessary
1919             if (qiov->size < reqs[outidx].qiov->size) {
1920                 qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size,
1921                                   reqs[outidx].qiov->size - qiov->size);
1922             }
1923 
1924             reqs[outidx].nb_sectors = qiov->size >> 9;
1925             reqs[outidx].qiov = qiov;
1926 
1927             mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
1928         } else {
1929             outidx++;
1930             reqs[outidx].sector     = reqs[i].sector;
1931             reqs[outidx].nb_sectors = reqs[i].nb_sectors;
1932             reqs[outidx].qiov       = reqs[i].qiov;
1933         }
1934     }
1935 
1936     if (bs->blk) {
1937         block_acct_merge_done(blk_get_stats(bs->blk), BLOCK_ACCT_WRITE,
1938                               num_reqs - outidx - 1);
1939     }
1940 
1941     return outidx + 1;
1942 }
1943 
1944 /*
1945  * Submit multiple AIO write requests at once.
1946  *
1947  * On success, the function returns 0 and all requests in the reqs array have
1948  * been submitted. In error case this function returns -1, and any of the
1949  * requests may or may not be submitted yet. In particular, this means that the
1950  * callback will be called for some of the requests, for others it won't. The
1951  * caller must check the error field of the BlockRequest to wait for the right
1952  * callbacks (if error != 0, no callback will be called).
1953  *
1954  * The implementation may modify the contents of the reqs array, e.g. to merge
1955  * requests. However, the fields opaque and error are left unmodified as they
1956  * are used to signal failure for a single request to the caller.
1957  */
1958 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
1959 {
1960     MultiwriteCB *mcb;
1961     int i;
1962 
1963     /* don't submit writes if we don't have a medium */
1964     if (bs->drv == NULL) {
1965         for (i = 0; i < num_reqs; i++) {
1966             reqs[i].error = -ENOMEDIUM;
1967         }
1968         return -1;
1969     }
1970 
1971     if (num_reqs == 0) {
1972         return 0;
1973     }
1974 
1975     // Create MultiwriteCB structure
1976     mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
1977     mcb->num_requests = 0;
1978     mcb->num_callbacks = num_reqs;
1979 
1980     for (i = 0; i < num_reqs; i++) {
1981         mcb->callbacks[i].cb = reqs[i].cb;
1982         mcb->callbacks[i].opaque = reqs[i].opaque;
1983     }
1984 
1985     // Check for mergable requests
1986     num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
1987 
1988     trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
1989 
1990     /* Run the aio requests. */
1991     mcb->num_requests = num_reqs;
1992     for (i = 0; i < num_reqs; i++) {
1993         bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
1994                               reqs[i].nb_sectors, reqs[i].flags,
1995                               multiwrite_cb, mcb,
1996                               true);
1997     }
1998 
1999     return 0;
2000 }
2001 
2002 void bdrv_aio_cancel(BlockAIOCB *acb)
2003 {
2004     qemu_aio_ref(acb);
2005     bdrv_aio_cancel_async(acb);
2006     while (acb->refcnt > 1) {
2007         if (acb->aiocb_info->get_aio_context) {
2008             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2009         } else if (acb->bs) {
2010             aio_poll(bdrv_get_aio_context(acb->bs), true);
2011         } else {
2012             abort();
2013         }
2014     }
2015     qemu_aio_unref(acb);
2016 }
2017 
2018 /* Async version of aio cancel. The caller is not blocked if the acb implements
2019  * cancel_async, otherwise we do nothing and let the request normally complete.
2020  * In either case the completion callback must be called. */
2021 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2022 {
2023     if (acb->aiocb_info->cancel_async) {
2024         acb->aiocb_info->cancel_async(acb);
2025     }
2026 }
2027 
2028 /**************************************************************/
2029 /* async block device emulation */
2030 
2031 typedef struct BlockAIOCBSync {
2032     BlockAIOCB common;
2033     QEMUBH *bh;
2034     int ret;
2035     /* vector translation state */
2036     QEMUIOVector *qiov;
2037     uint8_t *bounce;
2038     int is_write;
2039 } BlockAIOCBSync;
2040 
2041 static const AIOCBInfo bdrv_em_aiocb_info = {
2042     .aiocb_size         = sizeof(BlockAIOCBSync),
2043 };
2044 
2045 static void bdrv_aio_bh_cb(void *opaque)
2046 {
2047     BlockAIOCBSync *acb = opaque;
2048 
2049     if (!acb->is_write && acb->ret >= 0) {
2050         qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
2051     }
2052     qemu_vfree(acb->bounce);
2053     acb->common.cb(acb->common.opaque, acb->ret);
2054     qemu_bh_delete(acb->bh);
2055     acb->bh = NULL;
2056     qemu_aio_unref(acb);
2057 }
2058 
2059 static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
2060                                       int64_t sector_num,
2061                                       QEMUIOVector *qiov,
2062                                       int nb_sectors,
2063                                       BlockCompletionFunc *cb,
2064                                       void *opaque,
2065                                       int is_write)
2066 
2067 {
2068     BlockAIOCBSync *acb;
2069 
2070     acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
2071     acb->is_write = is_write;
2072     acb->qiov = qiov;
2073     acb->bounce = qemu_try_blockalign(bs, qiov->size);
2074     acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
2075 
2076     if (acb->bounce == NULL) {
2077         acb->ret = -ENOMEM;
2078     } else if (is_write) {
2079         qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
2080         acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
2081     } else {
2082         acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
2083     }
2084 
2085     qemu_bh_schedule(acb->bh);
2086 
2087     return &acb->common;
2088 }
2089 
2090 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
2091         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2092         BlockCompletionFunc *cb, void *opaque)
2093 {
2094     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
2095 }
2096 
2097 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
2098         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2099         BlockCompletionFunc *cb, void *opaque)
2100 {
2101     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
2102 }
2103 
2104 
2105 typedef struct BlockAIOCBCoroutine {
2106     BlockAIOCB common;
2107     BlockRequest req;
2108     bool is_write;
2109     bool need_bh;
2110     bool *done;
2111     QEMUBH* bh;
2112 } BlockAIOCBCoroutine;
2113 
2114 static const AIOCBInfo bdrv_em_co_aiocb_info = {
2115     .aiocb_size         = sizeof(BlockAIOCBCoroutine),
2116 };
2117 
2118 static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
2119 {
2120     if (!acb->need_bh) {
2121         acb->common.cb(acb->common.opaque, acb->req.error);
2122         qemu_aio_unref(acb);
2123     }
2124 }
2125 
2126 static void bdrv_co_em_bh(void *opaque)
2127 {
2128     BlockAIOCBCoroutine *acb = opaque;
2129 
2130     assert(!acb->need_bh);
2131     qemu_bh_delete(acb->bh);
2132     bdrv_co_complete(acb);
2133 }
2134 
2135 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
2136 {
2137     acb->need_bh = false;
2138     if (acb->req.error != -EINPROGRESS) {
2139         BlockDriverState *bs = acb->common.bs;
2140 
2141         acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
2142         qemu_bh_schedule(acb->bh);
2143     }
2144 }
2145 
2146 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2147 static void coroutine_fn bdrv_co_do_rw(void *opaque)
2148 {
2149     BlockAIOCBCoroutine *acb = opaque;
2150     BlockDriverState *bs = acb->common.bs;
2151 
2152     if (!acb->is_write) {
2153         acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
2154             acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
2155     } else {
2156         acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
2157             acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
2158     }
2159 
2160     bdrv_co_complete(acb);
2161 }
2162 
2163 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
2164                                          int64_t sector_num,
2165                                          QEMUIOVector *qiov,
2166                                          int nb_sectors,
2167                                          BdrvRequestFlags flags,
2168                                          BlockCompletionFunc *cb,
2169                                          void *opaque,
2170                                          bool is_write)
2171 {
2172     Coroutine *co;
2173     BlockAIOCBCoroutine *acb;
2174 
2175     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2176     acb->need_bh = true;
2177     acb->req.error = -EINPROGRESS;
2178     acb->req.sector = sector_num;
2179     acb->req.nb_sectors = nb_sectors;
2180     acb->req.qiov = qiov;
2181     acb->req.flags = flags;
2182     acb->is_write = is_write;
2183 
2184     co = qemu_coroutine_create(bdrv_co_do_rw);
2185     qemu_coroutine_enter(co, acb);
2186 
2187     bdrv_co_maybe_schedule_bh(acb);
2188     return &acb->common;
2189 }
2190 
2191 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
2192 {
2193     BlockAIOCBCoroutine *acb = opaque;
2194     BlockDriverState *bs = acb->common.bs;
2195 
2196     acb->req.error = bdrv_co_flush(bs);
2197     bdrv_co_complete(acb);
2198 }
2199 
2200 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2201         BlockCompletionFunc *cb, void *opaque)
2202 {
2203     trace_bdrv_aio_flush(bs, opaque);
2204 
2205     Coroutine *co;
2206     BlockAIOCBCoroutine *acb;
2207 
2208     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2209     acb->need_bh = true;
2210     acb->req.error = -EINPROGRESS;
2211 
2212     co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
2213     qemu_coroutine_enter(co, acb);
2214 
2215     bdrv_co_maybe_schedule_bh(acb);
2216     return &acb->common;
2217 }
2218 
2219 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
2220 {
2221     BlockAIOCBCoroutine *acb = opaque;
2222     BlockDriverState *bs = acb->common.bs;
2223 
2224     acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
2225     bdrv_co_complete(acb);
2226 }
2227 
2228 BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
2229         int64_t sector_num, int nb_sectors,
2230         BlockCompletionFunc *cb, void *opaque)
2231 {
2232     Coroutine *co;
2233     BlockAIOCBCoroutine *acb;
2234 
2235     trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
2236 
2237     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2238     acb->need_bh = true;
2239     acb->req.error = -EINPROGRESS;
2240     acb->req.sector = sector_num;
2241     acb->req.nb_sectors = nb_sectors;
2242     co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
2243     qemu_coroutine_enter(co, acb);
2244 
2245     bdrv_co_maybe_schedule_bh(acb);
2246     return &acb->common;
2247 }
2248 
2249 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
2250                    BlockCompletionFunc *cb, void *opaque)
2251 {
2252     BlockAIOCB *acb;
2253 
2254     acb = g_malloc(aiocb_info->aiocb_size);
2255     acb->aiocb_info = aiocb_info;
2256     acb->bs = bs;
2257     acb->cb = cb;
2258     acb->opaque = opaque;
2259     acb->refcnt = 1;
2260     return acb;
2261 }
2262 
2263 void qemu_aio_ref(void *p)
2264 {
2265     BlockAIOCB *acb = p;
2266     acb->refcnt++;
2267 }
2268 
2269 void qemu_aio_unref(void *p)
2270 {
2271     BlockAIOCB *acb = p;
2272     assert(acb->refcnt > 0);
2273     if (--acb->refcnt == 0) {
2274         g_free(acb);
2275     }
2276 }
2277 
2278 /**************************************************************/
2279 /* Coroutine block device emulation */
2280 
2281 typedef struct CoroutineIOCompletion {
2282     Coroutine *coroutine;
2283     int ret;
2284 } CoroutineIOCompletion;
2285 
2286 static void bdrv_co_io_em_complete(void *opaque, int ret)
2287 {
2288     CoroutineIOCompletion *co = opaque;
2289 
2290     co->ret = ret;
2291     qemu_coroutine_enter(co->coroutine, NULL);
2292 }
2293 
2294 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
2295                                       int nb_sectors, QEMUIOVector *iov,
2296                                       bool is_write)
2297 {
2298     CoroutineIOCompletion co = {
2299         .coroutine = qemu_coroutine_self(),
2300     };
2301     BlockAIOCB *acb;
2302 
2303     if (is_write) {
2304         acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
2305                                        bdrv_co_io_em_complete, &co);
2306     } else {
2307         acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
2308                                       bdrv_co_io_em_complete, &co);
2309     }
2310 
2311     trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
2312     if (!acb) {
2313         return -EIO;
2314     }
2315     qemu_coroutine_yield();
2316 
2317     return co.ret;
2318 }
2319 
2320 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
2321                                          int64_t sector_num, int nb_sectors,
2322                                          QEMUIOVector *iov)
2323 {
2324     return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
2325 }
2326 
2327 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
2328                                          int64_t sector_num, int nb_sectors,
2329                                          QEMUIOVector *iov)
2330 {
2331     return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
2332 }
2333 
2334 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2335 {
2336     RwCo *rwco = opaque;
2337 
2338     rwco->ret = bdrv_co_flush(rwco->bs);
2339 }
2340 
2341 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2342 {
2343     int ret;
2344     BdrvTrackedRequest req;
2345 
2346     if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2347         bdrv_is_sg(bs)) {
2348         return 0;
2349     }
2350 
2351     tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH);
2352     /* Write back cached data to the OS even with cache=unsafe */
2353     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2354     if (bs->drv->bdrv_co_flush_to_os) {
2355         ret = bs->drv->bdrv_co_flush_to_os(bs);
2356         if (ret < 0) {
2357             goto out;
2358         }
2359     }
2360 
2361     /* But don't actually force it to the disk with cache=unsafe */
2362     if (bs->open_flags & BDRV_O_NO_FLUSH) {
2363         goto flush_parent;
2364     }
2365 
2366     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2367     if (bs->drv->bdrv_co_flush_to_disk) {
2368         ret = bs->drv->bdrv_co_flush_to_disk(bs);
2369     } else if (bs->drv->bdrv_aio_flush) {
2370         BlockAIOCB *acb;
2371         CoroutineIOCompletion co = {
2372             .coroutine = qemu_coroutine_self(),
2373         };
2374 
2375         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2376         if (acb == NULL) {
2377             ret = -EIO;
2378         } else {
2379             qemu_coroutine_yield();
2380             ret = co.ret;
2381         }
2382     } else {
2383         /*
2384          * Some block drivers always operate in either writethrough or unsafe
2385          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2386          * know how the server works (because the behaviour is hardcoded or
2387          * depends on server-side configuration), so we can't ensure that
2388          * everything is safe on disk. Returning an error doesn't work because
2389          * that would break guests even if the server operates in writethrough
2390          * mode.
2391          *
2392          * Let's hope the user knows what he's doing.
2393          */
2394         ret = 0;
2395     }
2396     if (ret < 0) {
2397         goto out;
2398     }
2399 
2400     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
2401      * in the case of cache=unsafe, so there are no useless flushes.
2402      */
2403 flush_parent:
2404     ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2405 out:
2406     tracked_request_end(&req);
2407     return ret;
2408 }
2409 
2410 int bdrv_flush(BlockDriverState *bs)
2411 {
2412     Coroutine *co;
2413     RwCo rwco = {
2414         .bs = bs,
2415         .ret = NOT_DONE,
2416     };
2417 
2418     if (qemu_in_coroutine()) {
2419         /* Fast-path if already in coroutine context */
2420         bdrv_flush_co_entry(&rwco);
2421     } else {
2422         AioContext *aio_context = bdrv_get_aio_context(bs);
2423 
2424         co = qemu_coroutine_create(bdrv_flush_co_entry);
2425         qemu_coroutine_enter(co, &rwco);
2426         while (rwco.ret == NOT_DONE) {
2427             aio_poll(aio_context, true);
2428         }
2429     }
2430 
2431     return rwco.ret;
2432 }
2433 
2434 typedef struct DiscardCo {
2435     BlockDriverState *bs;
2436     int64_t sector_num;
2437     int nb_sectors;
2438     int ret;
2439 } DiscardCo;
2440 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
2441 {
2442     DiscardCo *rwco = opaque;
2443 
2444     rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
2445 }
2446 
2447 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
2448                                  int nb_sectors)
2449 {
2450     BdrvTrackedRequest req;
2451     int max_discard, ret;
2452 
2453     if (!bs->drv) {
2454         return -ENOMEDIUM;
2455     }
2456 
2457     ret = bdrv_check_request(bs, sector_num, nb_sectors);
2458     if (ret < 0) {
2459         return ret;
2460     } else if (bs->read_only) {
2461         return -EPERM;
2462     }
2463     assert(!(bs->open_flags & BDRV_O_INACTIVE));
2464 
2465     /* Do nothing if disabled.  */
2466     if (!(bs->open_flags & BDRV_O_UNMAP)) {
2467         return 0;
2468     }
2469 
2470     if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
2471         return 0;
2472     }
2473 
2474     tracked_request_begin(&req, bs, sector_num, nb_sectors,
2475                           BDRV_TRACKED_DISCARD);
2476     bdrv_set_dirty(bs, sector_num, nb_sectors);
2477 
2478     max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
2479     while (nb_sectors > 0) {
2480         int ret;
2481         int num = nb_sectors;
2482 
2483         /* align request */
2484         if (bs->bl.discard_alignment &&
2485             num >= bs->bl.discard_alignment &&
2486             sector_num % bs->bl.discard_alignment) {
2487             if (num > bs->bl.discard_alignment) {
2488                 num = bs->bl.discard_alignment;
2489             }
2490             num -= sector_num % bs->bl.discard_alignment;
2491         }
2492 
2493         /* limit request size */
2494         if (num > max_discard) {
2495             num = max_discard;
2496         }
2497 
2498         if (bs->drv->bdrv_co_discard) {
2499             ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
2500         } else {
2501             BlockAIOCB *acb;
2502             CoroutineIOCompletion co = {
2503                 .coroutine = qemu_coroutine_self(),
2504             };
2505 
2506             acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
2507                                             bdrv_co_io_em_complete, &co);
2508             if (acb == NULL) {
2509                 ret = -EIO;
2510                 goto out;
2511             } else {
2512                 qemu_coroutine_yield();
2513                 ret = co.ret;
2514             }
2515         }
2516         if (ret && ret != -ENOTSUP) {
2517             goto out;
2518         }
2519 
2520         sector_num += num;
2521         nb_sectors -= num;
2522     }
2523     ret = 0;
2524 out:
2525     tracked_request_end(&req);
2526     return ret;
2527 }
2528 
2529 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
2530 {
2531     Coroutine *co;
2532     DiscardCo rwco = {
2533         .bs = bs,
2534         .sector_num = sector_num,
2535         .nb_sectors = nb_sectors,
2536         .ret = NOT_DONE,
2537     };
2538 
2539     if (qemu_in_coroutine()) {
2540         /* Fast-path if already in coroutine context */
2541         bdrv_discard_co_entry(&rwco);
2542     } else {
2543         AioContext *aio_context = bdrv_get_aio_context(bs);
2544 
2545         co = qemu_coroutine_create(bdrv_discard_co_entry);
2546         qemu_coroutine_enter(co, &rwco);
2547         while (rwco.ret == NOT_DONE) {
2548             aio_poll(aio_context, true);
2549         }
2550     }
2551 
2552     return rwco.ret;
2553 }
2554 
2555 typedef struct {
2556     CoroutineIOCompletion *co;
2557     QEMUBH *bh;
2558 } BdrvIoctlCompletionData;
2559 
2560 static void bdrv_ioctl_bh_cb(void *opaque)
2561 {
2562     BdrvIoctlCompletionData *data = opaque;
2563 
2564     bdrv_co_io_em_complete(data->co, -ENOTSUP);
2565     qemu_bh_delete(data->bh);
2566 }
2567 
2568 static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
2569 {
2570     BlockDriver *drv = bs->drv;
2571     BdrvTrackedRequest tracked_req;
2572     CoroutineIOCompletion co = {
2573         .coroutine = qemu_coroutine_self(),
2574     };
2575     BlockAIOCB *acb;
2576 
2577     tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL);
2578     if (!drv || !drv->bdrv_aio_ioctl) {
2579         co.ret = -ENOTSUP;
2580         goto out;
2581     }
2582 
2583     acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2584     if (!acb) {
2585         BdrvIoctlCompletionData *data = g_new(BdrvIoctlCompletionData, 1);
2586         data->bh = aio_bh_new(bdrv_get_aio_context(bs),
2587                                 bdrv_ioctl_bh_cb, data);
2588         data->co = &co;
2589         qemu_bh_schedule(data->bh);
2590     }
2591     qemu_coroutine_yield();
2592 out:
2593     tracked_request_end(&tracked_req);
2594     return co.ret;
2595 }
2596 
2597 typedef struct {
2598     BlockDriverState *bs;
2599     int req;
2600     void *buf;
2601     int ret;
2602 } BdrvIoctlCoData;
2603 
2604 static void coroutine_fn bdrv_co_ioctl_entry(void *opaque)
2605 {
2606     BdrvIoctlCoData *data = opaque;
2607     data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf);
2608 }
2609 
2610 /* needed for generic scsi interface */
2611 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2612 {
2613     BdrvIoctlCoData data = {
2614         .bs = bs,
2615         .req = req,
2616         .buf = buf,
2617         .ret = -EINPROGRESS,
2618     };
2619 
2620     if (qemu_in_coroutine()) {
2621         /* Fast-path if already in coroutine context */
2622         bdrv_co_ioctl_entry(&data);
2623     } else {
2624         Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry);
2625 
2626         qemu_coroutine_enter(co, &data);
2627         while (data.ret == -EINPROGRESS) {
2628             aio_poll(bdrv_get_aio_context(bs), true);
2629         }
2630     }
2631     return data.ret;
2632 }
2633 
2634 static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque)
2635 {
2636     BlockAIOCBCoroutine *acb = opaque;
2637     acb->req.error = bdrv_co_do_ioctl(acb->common.bs,
2638                                       acb->req.req, acb->req.buf);
2639     bdrv_co_complete(acb);
2640 }
2641 
2642 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2643         unsigned long int req, void *buf,
2644         BlockCompletionFunc *cb, void *opaque)
2645 {
2646     BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info,
2647                                             bs, cb, opaque);
2648     Coroutine *co;
2649 
2650     acb->need_bh = true;
2651     acb->req.error = -EINPROGRESS;
2652     acb->req.req = req;
2653     acb->req.buf = buf;
2654     co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry);
2655     qemu_coroutine_enter(co, acb);
2656 
2657     bdrv_co_maybe_schedule_bh(acb);
2658     return &acb->common;
2659 }
2660 
2661 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2662 {
2663     return qemu_memalign(bdrv_opt_mem_align(bs), size);
2664 }
2665 
2666 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2667 {
2668     return memset(qemu_blockalign(bs, size), 0, size);
2669 }
2670 
2671 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2672 {
2673     size_t align = bdrv_opt_mem_align(bs);
2674 
2675     /* Ensure that NULL is never returned on success */
2676     assert(align > 0);
2677     if (size == 0) {
2678         size = align;
2679     }
2680 
2681     return qemu_try_memalign(align, size);
2682 }
2683 
2684 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2685 {
2686     void *mem = qemu_try_blockalign(bs, size);
2687 
2688     if (mem) {
2689         memset(mem, 0, size);
2690     }
2691 
2692     return mem;
2693 }
2694 
2695 /*
2696  * Check if all memory in this vector is sector aligned.
2697  */
2698 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2699 {
2700     int i;
2701     size_t alignment = bdrv_min_mem_align(bs);
2702 
2703     for (i = 0; i < qiov->niov; i++) {
2704         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2705             return false;
2706         }
2707         if (qiov->iov[i].iov_len % alignment) {
2708             return false;
2709         }
2710     }
2711 
2712     return true;
2713 }
2714 
2715 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2716                                     NotifierWithReturn *notifier)
2717 {
2718     notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2719 }
2720 
2721 void bdrv_io_plug(BlockDriverState *bs)
2722 {
2723     BlockDriver *drv = bs->drv;
2724     if (drv && drv->bdrv_io_plug) {
2725         drv->bdrv_io_plug(bs);
2726     } else if (bs->file) {
2727         bdrv_io_plug(bs->file->bs);
2728     }
2729 }
2730 
2731 void bdrv_io_unplug(BlockDriverState *bs)
2732 {
2733     BlockDriver *drv = bs->drv;
2734     if (drv && drv->bdrv_io_unplug) {
2735         drv->bdrv_io_unplug(bs);
2736     } else if (bs->file) {
2737         bdrv_io_unplug(bs->file->bs);
2738     }
2739 }
2740 
2741 void bdrv_flush_io_queue(BlockDriverState *bs)
2742 {
2743     BlockDriver *drv = bs->drv;
2744     if (drv && drv->bdrv_flush_io_queue) {
2745         drv->bdrv_flush_io_queue(bs);
2746     } else if (bs->file) {
2747         bdrv_flush_io_queue(bs->file->bs);
2748     }
2749     bdrv_start_throttled_reqs(bs);
2750 }
2751 
2752 void bdrv_drained_begin(BlockDriverState *bs)
2753 {
2754     if (!bs->quiesce_counter++) {
2755         aio_disable_external(bdrv_get_aio_context(bs));
2756     }
2757     bdrv_drain(bs);
2758 }
2759 
2760 void bdrv_drained_end(BlockDriverState *bs)
2761 {
2762     assert(bs->quiesce_counter > 0);
2763     if (--bs->quiesce_counter > 0) {
2764         return;
2765     }
2766     aio_enable_external(bdrv_get_aio_context(bs));
2767 }
2768