xref: /openbmc/qemu/block/io.c (revision f348b6d1a53e5271cf1c9f9acc4646b4b98c1771)
1 /*
2  * Block layer I/O functions
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "block/throttle-groups.h"
31 #include "qemu/cutils.h"
32 #include "qapi/error.h"
33 #include "qemu/error-report.h"
34 
35 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
36 
37 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
38         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
39         BlockCompletionFunc *cb, void *opaque);
40 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
41         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
42         BlockCompletionFunc *cb, void *opaque);
43 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
44                                          int64_t sector_num, int nb_sectors,
45                                          QEMUIOVector *iov);
46 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
47                                          int64_t sector_num, int nb_sectors,
48                                          QEMUIOVector *iov);
49 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
50                                          int64_t sector_num,
51                                          QEMUIOVector *qiov,
52                                          int nb_sectors,
53                                          BdrvRequestFlags flags,
54                                          BlockCompletionFunc *cb,
55                                          void *opaque,
56                                          bool is_write);
57 static void coroutine_fn bdrv_co_do_rw(void *opaque);
58 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
59     int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
60 
61 /* throttling disk I/O limits */
62 void bdrv_set_io_limits(BlockDriverState *bs,
63                         ThrottleConfig *cfg)
64 {
65     int i;
66 
67     throttle_group_config(bs, cfg);
68 
69     for (i = 0; i < 2; i++) {
70         qemu_co_enter_next(&bs->throttled_reqs[i]);
71     }
72 }
73 
74 /* this function drain all the throttled IOs */
75 static bool bdrv_start_throttled_reqs(BlockDriverState *bs)
76 {
77     bool drained = false;
78     bool enabled = bs->io_limits_enabled;
79     int i;
80 
81     bs->io_limits_enabled = false;
82 
83     for (i = 0; i < 2; i++) {
84         while (qemu_co_enter_next(&bs->throttled_reqs[i])) {
85             drained = true;
86         }
87     }
88 
89     bs->io_limits_enabled = enabled;
90 
91     return drained;
92 }
93 
94 void bdrv_io_limits_disable(BlockDriverState *bs)
95 {
96     bs->io_limits_enabled = false;
97     bdrv_start_throttled_reqs(bs);
98     throttle_group_unregister_bs(bs);
99 }
100 
101 /* should be called before bdrv_set_io_limits if a limit is set */
102 void bdrv_io_limits_enable(BlockDriverState *bs, const char *group)
103 {
104     assert(!bs->io_limits_enabled);
105     throttle_group_register_bs(bs, group);
106     bs->io_limits_enabled = true;
107 }
108 
109 void bdrv_io_limits_update_group(BlockDriverState *bs, const char *group)
110 {
111     /* this bs is not part of any group */
112     if (!bs->throttle_state) {
113         return;
114     }
115 
116     /* this bs is a part of the same group than the one we want */
117     if (!g_strcmp0(throttle_group_get_name(bs), group)) {
118         return;
119     }
120 
121     /* need to change the group this bs belong to */
122     bdrv_io_limits_disable(bs);
123     bdrv_io_limits_enable(bs, group);
124 }
125 
126 void bdrv_setup_io_funcs(BlockDriver *bdrv)
127 {
128     /* Block drivers without coroutine functions need emulation */
129     if (!bdrv->bdrv_co_readv) {
130         bdrv->bdrv_co_readv = bdrv_co_readv_em;
131         bdrv->bdrv_co_writev = bdrv_co_writev_em;
132 
133         /* bdrv_co_readv_em()/brdv_co_writev_em() work in terms of aio, so if
134          * the block driver lacks aio we need to emulate that too.
135          */
136         if (!bdrv->bdrv_aio_readv) {
137             /* add AIO emulation layer */
138             bdrv->bdrv_aio_readv = bdrv_aio_readv_em;
139             bdrv->bdrv_aio_writev = bdrv_aio_writev_em;
140         }
141     }
142 }
143 
144 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
145 {
146     BlockDriver *drv = bs->drv;
147     Error *local_err = NULL;
148 
149     memset(&bs->bl, 0, sizeof(bs->bl));
150 
151     if (!drv) {
152         return;
153     }
154 
155     /* Take some limits from the children as a default */
156     if (bs->file) {
157         bdrv_refresh_limits(bs->file->bs, &local_err);
158         if (local_err) {
159             error_propagate(errp, local_err);
160             return;
161         }
162         bs->bl.opt_transfer_length = bs->file->bs->bl.opt_transfer_length;
163         bs->bl.max_transfer_length = bs->file->bs->bl.max_transfer_length;
164         bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment;
165         bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment;
166         bs->bl.max_iov = bs->file->bs->bl.max_iov;
167     } else {
168         bs->bl.min_mem_alignment = 512;
169         bs->bl.opt_mem_alignment = getpagesize();
170 
171         /* Safe default since most protocols use readv()/writev()/etc */
172         bs->bl.max_iov = IOV_MAX;
173     }
174 
175     if (bs->backing) {
176         bdrv_refresh_limits(bs->backing->bs, &local_err);
177         if (local_err) {
178             error_propagate(errp, local_err);
179             return;
180         }
181         bs->bl.opt_transfer_length =
182             MAX(bs->bl.opt_transfer_length,
183                 bs->backing->bs->bl.opt_transfer_length);
184         bs->bl.max_transfer_length =
185             MIN_NON_ZERO(bs->bl.max_transfer_length,
186                          bs->backing->bs->bl.max_transfer_length);
187         bs->bl.opt_mem_alignment =
188             MAX(bs->bl.opt_mem_alignment,
189                 bs->backing->bs->bl.opt_mem_alignment);
190         bs->bl.min_mem_alignment =
191             MAX(bs->bl.min_mem_alignment,
192                 bs->backing->bs->bl.min_mem_alignment);
193         bs->bl.max_iov =
194             MIN(bs->bl.max_iov,
195                 bs->backing->bs->bl.max_iov);
196     }
197 
198     /* Then let the driver override it */
199     if (drv->bdrv_refresh_limits) {
200         drv->bdrv_refresh_limits(bs, errp);
201     }
202 }
203 
204 /**
205  * The copy-on-read flag is actually a reference count so multiple users may
206  * use the feature without worrying about clobbering its previous state.
207  * Copy-on-read stays enabled until all users have called to disable it.
208  */
209 void bdrv_enable_copy_on_read(BlockDriverState *bs)
210 {
211     bs->copy_on_read++;
212 }
213 
214 void bdrv_disable_copy_on_read(BlockDriverState *bs)
215 {
216     assert(bs->copy_on_read > 0);
217     bs->copy_on_read--;
218 }
219 
220 /* Check if any requests are in-flight (including throttled requests) */
221 bool bdrv_requests_pending(BlockDriverState *bs)
222 {
223     BdrvChild *child;
224 
225     if (!QLIST_EMPTY(&bs->tracked_requests)) {
226         return true;
227     }
228     if (!qemu_co_queue_empty(&bs->throttled_reqs[0])) {
229         return true;
230     }
231     if (!qemu_co_queue_empty(&bs->throttled_reqs[1])) {
232         return true;
233     }
234 
235     QLIST_FOREACH(child, &bs->children, next) {
236         if (bdrv_requests_pending(child->bs)) {
237             return true;
238         }
239     }
240 
241     return false;
242 }
243 
244 static void bdrv_drain_recurse(BlockDriverState *bs)
245 {
246     BdrvChild *child;
247 
248     if (bs->drv && bs->drv->bdrv_drain) {
249         bs->drv->bdrv_drain(bs);
250     }
251     QLIST_FOREACH(child, &bs->children, next) {
252         bdrv_drain_recurse(child->bs);
253     }
254 }
255 
256 /*
257  * Wait for pending requests to complete on a single BlockDriverState subtree,
258  * and suspend block driver's internal I/O until next request arrives.
259  *
260  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
261  * AioContext.
262  *
263  * Only this BlockDriverState's AioContext is run, so in-flight requests must
264  * not depend on events in other AioContexts.  In that case, use
265  * bdrv_drain_all() instead.
266  */
267 void bdrv_drain(BlockDriverState *bs)
268 {
269     bool busy = true;
270 
271     bdrv_drain_recurse(bs);
272     while (busy) {
273         /* Keep iterating */
274          bdrv_flush_io_queue(bs);
275          busy = bdrv_requests_pending(bs);
276          busy |= aio_poll(bdrv_get_aio_context(bs), busy);
277     }
278 }
279 
280 /*
281  * Wait for pending requests to complete across all BlockDriverStates
282  *
283  * This function does not flush data to disk, use bdrv_flush_all() for that
284  * after calling this function.
285  */
286 void bdrv_drain_all(void)
287 {
288     /* Always run first iteration so any pending completion BHs run */
289     bool busy = true;
290     BlockDriverState *bs = NULL;
291     GSList *aio_ctxs = NULL, *ctx;
292 
293     while ((bs = bdrv_next(bs))) {
294         AioContext *aio_context = bdrv_get_aio_context(bs);
295 
296         aio_context_acquire(aio_context);
297         if (bs->job) {
298             block_job_pause(bs->job);
299         }
300         bdrv_drain_recurse(bs);
301         aio_context_release(aio_context);
302 
303         if (!g_slist_find(aio_ctxs, aio_context)) {
304             aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
305         }
306     }
307 
308     /* Note that completion of an asynchronous I/O operation can trigger any
309      * number of other I/O operations on other devices---for example a
310      * coroutine can submit an I/O request to another device in response to
311      * request completion.  Therefore we must keep looping until there was no
312      * more activity rather than simply draining each device independently.
313      */
314     while (busy) {
315         busy = false;
316 
317         for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
318             AioContext *aio_context = ctx->data;
319             bs = NULL;
320 
321             aio_context_acquire(aio_context);
322             while ((bs = bdrv_next(bs))) {
323                 if (aio_context == bdrv_get_aio_context(bs)) {
324                     bdrv_flush_io_queue(bs);
325                     if (bdrv_requests_pending(bs)) {
326                         busy = true;
327                         aio_poll(aio_context, busy);
328                     }
329                 }
330             }
331             busy |= aio_poll(aio_context, false);
332             aio_context_release(aio_context);
333         }
334     }
335 
336     bs = NULL;
337     while ((bs = bdrv_next(bs))) {
338         AioContext *aio_context = bdrv_get_aio_context(bs);
339 
340         aio_context_acquire(aio_context);
341         if (bs->job) {
342             block_job_resume(bs->job);
343         }
344         aio_context_release(aio_context);
345     }
346     g_slist_free(aio_ctxs);
347 }
348 
349 /**
350  * Remove an active request from the tracked requests list
351  *
352  * This function should be called when a tracked request is completing.
353  */
354 static void tracked_request_end(BdrvTrackedRequest *req)
355 {
356     if (req->serialising) {
357         req->bs->serialising_in_flight--;
358     }
359 
360     QLIST_REMOVE(req, list);
361     qemu_co_queue_restart_all(&req->wait_queue);
362 }
363 
364 /**
365  * Add an active request to the tracked requests list
366  */
367 static void tracked_request_begin(BdrvTrackedRequest *req,
368                                   BlockDriverState *bs,
369                                   int64_t offset,
370                                   unsigned int bytes,
371                                   enum BdrvTrackedRequestType type)
372 {
373     *req = (BdrvTrackedRequest){
374         .bs = bs,
375         .offset         = offset,
376         .bytes          = bytes,
377         .type           = type,
378         .co             = qemu_coroutine_self(),
379         .serialising    = false,
380         .overlap_offset = offset,
381         .overlap_bytes  = bytes,
382     };
383 
384     qemu_co_queue_init(&req->wait_queue);
385 
386     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
387 }
388 
389 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
390 {
391     int64_t overlap_offset = req->offset & ~(align - 1);
392     unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
393                                - overlap_offset;
394 
395     if (!req->serialising) {
396         req->bs->serialising_in_flight++;
397         req->serialising = true;
398     }
399 
400     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
401     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
402 }
403 
404 /**
405  * Round a region to cluster boundaries
406  */
407 void bdrv_round_to_clusters(BlockDriverState *bs,
408                             int64_t sector_num, int nb_sectors,
409                             int64_t *cluster_sector_num,
410                             int *cluster_nb_sectors)
411 {
412     BlockDriverInfo bdi;
413 
414     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
415         *cluster_sector_num = sector_num;
416         *cluster_nb_sectors = nb_sectors;
417     } else {
418         int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
419         *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
420         *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
421                                             nb_sectors, c);
422     }
423 }
424 
425 static int bdrv_get_cluster_size(BlockDriverState *bs)
426 {
427     BlockDriverInfo bdi;
428     int ret;
429 
430     ret = bdrv_get_info(bs, &bdi);
431     if (ret < 0 || bdi.cluster_size == 0) {
432         return bs->request_alignment;
433     } else {
434         return bdi.cluster_size;
435     }
436 }
437 
438 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
439                                      int64_t offset, unsigned int bytes)
440 {
441     /*        aaaa   bbbb */
442     if (offset >= req->overlap_offset + req->overlap_bytes) {
443         return false;
444     }
445     /* bbbb   aaaa        */
446     if (req->overlap_offset >= offset + bytes) {
447         return false;
448     }
449     return true;
450 }
451 
452 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
453 {
454     BlockDriverState *bs = self->bs;
455     BdrvTrackedRequest *req;
456     bool retry;
457     bool waited = false;
458 
459     if (!bs->serialising_in_flight) {
460         return false;
461     }
462 
463     do {
464         retry = false;
465         QLIST_FOREACH(req, &bs->tracked_requests, list) {
466             if (req == self || (!req->serialising && !self->serialising)) {
467                 continue;
468             }
469             if (tracked_request_overlaps(req, self->overlap_offset,
470                                          self->overlap_bytes))
471             {
472                 /* Hitting this means there was a reentrant request, for
473                  * example, a block driver issuing nested requests.  This must
474                  * never happen since it means deadlock.
475                  */
476                 assert(qemu_coroutine_self() != req->co);
477 
478                 /* If the request is already (indirectly) waiting for us, or
479                  * will wait for us as soon as it wakes up, then just go on
480                  * (instead of producing a deadlock in the former case). */
481                 if (!req->waiting_for) {
482                     self->waiting_for = req;
483                     qemu_co_queue_wait(&req->wait_queue);
484                     self->waiting_for = NULL;
485                     retry = true;
486                     waited = true;
487                     break;
488                 }
489             }
490         }
491     } while (retry);
492 
493     return waited;
494 }
495 
496 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
497                                    size_t size)
498 {
499     if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
500         return -EIO;
501     }
502 
503     if (!bdrv_is_inserted(bs)) {
504         return -ENOMEDIUM;
505     }
506 
507     if (offset < 0) {
508         return -EIO;
509     }
510 
511     return 0;
512 }
513 
514 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
515                               int nb_sectors)
516 {
517     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
518         return -EIO;
519     }
520 
521     return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
522                                    nb_sectors * BDRV_SECTOR_SIZE);
523 }
524 
525 typedef struct RwCo {
526     BlockDriverState *bs;
527     int64_t offset;
528     QEMUIOVector *qiov;
529     bool is_write;
530     int ret;
531     BdrvRequestFlags flags;
532 } RwCo;
533 
534 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
535 {
536     RwCo *rwco = opaque;
537 
538     if (!rwco->is_write) {
539         rwco->ret = bdrv_co_do_preadv(rwco->bs, rwco->offset,
540                                       rwco->qiov->size, rwco->qiov,
541                                       rwco->flags);
542     } else {
543         rwco->ret = bdrv_co_do_pwritev(rwco->bs, rwco->offset,
544                                        rwco->qiov->size, rwco->qiov,
545                                        rwco->flags);
546     }
547 }
548 
549 /*
550  * Process a vectored synchronous request using coroutines
551  */
552 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
553                         QEMUIOVector *qiov, bool is_write,
554                         BdrvRequestFlags flags)
555 {
556     Coroutine *co;
557     RwCo rwco = {
558         .bs = bs,
559         .offset = offset,
560         .qiov = qiov,
561         .is_write = is_write,
562         .ret = NOT_DONE,
563         .flags = flags,
564     };
565 
566     /**
567      * In sync call context, when the vcpu is blocked, this throttling timer
568      * will not fire; so the I/O throttling function has to be disabled here
569      * if it has been enabled.
570      */
571     if (bs->io_limits_enabled) {
572         fprintf(stderr, "Disabling I/O throttling on '%s' due "
573                         "to synchronous I/O.\n", bdrv_get_device_name(bs));
574         bdrv_io_limits_disable(bs);
575     }
576 
577     if (qemu_in_coroutine()) {
578         /* Fast-path if already in coroutine context */
579         bdrv_rw_co_entry(&rwco);
580     } else {
581         AioContext *aio_context = bdrv_get_aio_context(bs);
582 
583         co = qemu_coroutine_create(bdrv_rw_co_entry);
584         qemu_coroutine_enter(co, &rwco);
585         while (rwco.ret == NOT_DONE) {
586             aio_poll(aio_context, true);
587         }
588     }
589     return rwco.ret;
590 }
591 
592 /*
593  * Process a synchronous request using coroutines
594  */
595 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
596                       int nb_sectors, bool is_write, BdrvRequestFlags flags)
597 {
598     QEMUIOVector qiov;
599     struct iovec iov = {
600         .iov_base = (void *)buf,
601         .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
602     };
603 
604     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
605         return -EINVAL;
606     }
607 
608     qemu_iovec_init_external(&qiov, &iov, 1);
609     return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
610                         &qiov, is_write, flags);
611 }
612 
613 /* return < 0 if error. See bdrv_write() for the return codes */
614 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
615               uint8_t *buf, int nb_sectors)
616 {
617     return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
618 }
619 
620 /* Return < 0 if error. Important errors are:
621   -EIO         generic I/O error (may happen for all errors)
622   -ENOMEDIUM   No media inserted.
623   -EINVAL      Invalid sector number or nb_sectors
624   -EACCES      Trying to write a read-only device
625 */
626 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
627                const uint8_t *buf, int nb_sectors)
628 {
629     return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
630 }
631 
632 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
633                       int nb_sectors, BdrvRequestFlags flags)
634 {
635     return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
636                       BDRV_REQ_ZERO_WRITE | flags);
637 }
638 
639 /*
640  * Completely zero out a block device with the help of bdrv_write_zeroes.
641  * The operation is sped up by checking the block status and only writing
642  * zeroes to the device if they currently do not return zeroes. Optional
643  * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP).
644  *
645  * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
646  */
647 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
648 {
649     int64_t target_sectors, ret, nb_sectors, sector_num = 0;
650     BlockDriverState *file;
651     int n;
652 
653     target_sectors = bdrv_nb_sectors(bs);
654     if (target_sectors < 0) {
655         return target_sectors;
656     }
657 
658     for (;;) {
659         nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
660         if (nb_sectors <= 0) {
661             return 0;
662         }
663         ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
664         if (ret < 0) {
665             error_report("error getting block status at sector %" PRId64 ": %s",
666                          sector_num, strerror(-ret));
667             return ret;
668         }
669         if (ret & BDRV_BLOCK_ZERO) {
670             sector_num += n;
671             continue;
672         }
673         ret = bdrv_write_zeroes(bs, sector_num, n, flags);
674         if (ret < 0) {
675             error_report("error writing zeroes at sector %" PRId64 ": %s",
676                          sector_num, strerror(-ret));
677             return ret;
678         }
679         sector_num += n;
680     }
681 }
682 
683 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
684 {
685     QEMUIOVector qiov;
686     struct iovec iov = {
687         .iov_base = (void *)buf,
688         .iov_len = bytes,
689     };
690     int ret;
691 
692     if (bytes < 0) {
693         return -EINVAL;
694     }
695 
696     qemu_iovec_init_external(&qiov, &iov, 1);
697     ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
698     if (ret < 0) {
699         return ret;
700     }
701 
702     return bytes;
703 }
704 
705 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
706 {
707     int ret;
708 
709     ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
710     if (ret < 0) {
711         return ret;
712     }
713 
714     return qiov->size;
715 }
716 
717 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
718                 const void *buf, int bytes)
719 {
720     QEMUIOVector qiov;
721     struct iovec iov = {
722         .iov_base   = (void *) buf,
723         .iov_len    = bytes,
724     };
725 
726     if (bytes < 0) {
727         return -EINVAL;
728     }
729 
730     qemu_iovec_init_external(&qiov, &iov, 1);
731     return bdrv_pwritev(bs, offset, &qiov);
732 }
733 
734 /*
735  * Writes to the file and ensures that no writes are reordered across this
736  * request (acts as a barrier)
737  *
738  * Returns 0 on success, -errno in error cases.
739  */
740 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
741     const void *buf, int count)
742 {
743     int ret;
744 
745     ret = bdrv_pwrite(bs, offset, buf, count);
746     if (ret < 0) {
747         return ret;
748     }
749 
750     /* No flush needed for cache modes that already do it */
751     if (bs->enable_write_cache) {
752         bdrv_flush(bs);
753     }
754 
755     return 0;
756 }
757 
758 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
759         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
760 {
761     /* Perform I/O through a temporary buffer so that users who scribble over
762      * their read buffer while the operation is in progress do not end up
763      * modifying the image file.  This is critical for zero-copy guest I/O
764      * where anything might happen inside guest memory.
765      */
766     void *bounce_buffer;
767 
768     BlockDriver *drv = bs->drv;
769     struct iovec iov;
770     QEMUIOVector bounce_qiov;
771     int64_t cluster_sector_num;
772     int cluster_nb_sectors;
773     size_t skip_bytes;
774     int ret;
775 
776     /* Cover entire cluster so no additional backing file I/O is required when
777      * allocating cluster in the image file.
778      */
779     bdrv_round_to_clusters(bs, sector_num, nb_sectors,
780                            &cluster_sector_num, &cluster_nb_sectors);
781 
782     trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
783                                    cluster_sector_num, cluster_nb_sectors);
784 
785     iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
786     iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
787     if (bounce_buffer == NULL) {
788         ret = -ENOMEM;
789         goto err;
790     }
791 
792     qemu_iovec_init_external(&bounce_qiov, &iov, 1);
793 
794     ret = drv->bdrv_co_readv(bs, cluster_sector_num, cluster_nb_sectors,
795                              &bounce_qiov);
796     if (ret < 0) {
797         goto err;
798     }
799 
800     if (drv->bdrv_co_write_zeroes &&
801         buffer_is_zero(bounce_buffer, iov.iov_len)) {
802         ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
803                                       cluster_nb_sectors, 0);
804     } else {
805         /* This does not change the data on the disk, it is not necessary
806          * to flush even in cache=writethrough mode.
807          */
808         ret = drv->bdrv_co_writev(bs, cluster_sector_num, cluster_nb_sectors,
809                                   &bounce_qiov);
810     }
811 
812     if (ret < 0) {
813         /* It might be okay to ignore write errors for guest requests.  If this
814          * is a deliberate copy-on-read then we don't want to ignore the error.
815          * Simply report it in all cases.
816          */
817         goto err;
818     }
819 
820     skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
821     qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
822                         nb_sectors * BDRV_SECTOR_SIZE);
823 
824 err:
825     qemu_vfree(bounce_buffer);
826     return ret;
827 }
828 
829 /*
830  * Forwards an already correctly aligned request to the BlockDriver. This
831  * handles copy on read and zeroing after EOF; any other features must be
832  * implemented by the caller.
833  */
834 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
835     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
836     int64_t align, QEMUIOVector *qiov, int flags)
837 {
838     BlockDriver *drv = bs->drv;
839     int ret;
840 
841     int64_t sector_num = offset >> BDRV_SECTOR_BITS;
842     unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
843 
844     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
845     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
846     assert(!qiov || bytes == qiov->size);
847 
848     /* Handle Copy on Read and associated serialisation */
849     if (flags & BDRV_REQ_COPY_ON_READ) {
850         /* If we touch the same cluster it counts as an overlap.  This
851          * guarantees that allocating writes will be serialized and not race
852          * with each other for the same cluster.  For example, in copy-on-read
853          * it ensures that the CoR read and write operations are atomic and
854          * guest writes cannot interleave between them. */
855         mark_request_serialising(req, bdrv_get_cluster_size(bs));
856     }
857 
858     if (!(flags & BDRV_REQ_NO_SERIALISING)) {
859         wait_serialising_requests(req);
860     }
861 
862     if (flags & BDRV_REQ_COPY_ON_READ) {
863         int pnum;
864 
865         ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
866         if (ret < 0) {
867             goto out;
868         }
869 
870         if (!ret || pnum != nb_sectors) {
871             ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
872             goto out;
873         }
874     }
875 
876     /* Forward the request to the BlockDriver */
877     if (!bs->zero_beyond_eof) {
878         ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
879     } else {
880         /* Read zeros after EOF */
881         int64_t total_sectors, max_nb_sectors;
882 
883         total_sectors = bdrv_nb_sectors(bs);
884         if (total_sectors < 0) {
885             ret = total_sectors;
886             goto out;
887         }
888 
889         max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
890                                   align >> BDRV_SECTOR_BITS);
891         if (nb_sectors < max_nb_sectors) {
892             ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
893         } else if (max_nb_sectors > 0) {
894             QEMUIOVector local_qiov;
895 
896             qemu_iovec_init(&local_qiov, qiov->niov);
897             qemu_iovec_concat(&local_qiov, qiov, 0,
898                               max_nb_sectors * BDRV_SECTOR_SIZE);
899 
900             ret = drv->bdrv_co_readv(bs, sector_num, max_nb_sectors,
901                                      &local_qiov);
902 
903             qemu_iovec_destroy(&local_qiov);
904         } else {
905             ret = 0;
906         }
907 
908         /* Reading beyond end of file is supposed to produce zeroes */
909         if (ret == 0 && total_sectors < sector_num + nb_sectors) {
910             uint64_t offset = MAX(0, total_sectors - sector_num);
911             uint64_t bytes = (sector_num + nb_sectors - offset) *
912                               BDRV_SECTOR_SIZE;
913             qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
914         }
915     }
916 
917 out:
918     return ret;
919 }
920 
921 /*
922  * Handle a read request in coroutine context
923  */
924 int coroutine_fn bdrv_co_do_preadv(BlockDriverState *bs,
925     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
926     BdrvRequestFlags flags)
927 {
928     BlockDriver *drv = bs->drv;
929     BdrvTrackedRequest req;
930 
931     /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
932     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
933     uint8_t *head_buf = NULL;
934     uint8_t *tail_buf = NULL;
935     QEMUIOVector local_qiov;
936     bool use_local_qiov = false;
937     int ret;
938 
939     if (!drv) {
940         return -ENOMEDIUM;
941     }
942 
943     ret = bdrv_check_byte_request(bs, offset, bytes);
944     if (ret < 0) {
945         return ret;
946     }
947 
948     /* Don't do copy-on-read if we read data before write operation */
949     if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
950         flags |= BDRV_REQ_COPY_ON_READ;
951     }
952 
953     /* throttling disk I/O */
954     if (bs->io_limits_enabled) {
955         throttle_group_co_io_limits_intercept(bs, bytes, false);
956     }
957 
958     /* Align read if necessary by padding qiov */
959     if (offset & (align - 1)) {
960         head_buf = qemu_blockalign(bs, align);
961         qemu_iovec_init(&local_qiov, qiov->niov + 2);
962         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
963         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
964         use_local_qiov = true;
965 
966         bytes += offset & (align - 1);
967         offset = offset & ~(align - 1);
968     }
969 
970     if ((offset + bytes) & (align - 1)) {
971         if (!use_local_qiov) {
972             qemu_iovec_init(&local_qiov, qiov->niov + 1);
973             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
974             use_local_qiov = true;
975         }
976         tail_buf = qemu_blockalign(bs, align);
977         qemu_iovec_add(&local_qiov, tail_buf,
978                        align - ((offset + bytes) & (align - 1)));
979 
980         bytes = ROUND_UP(bytes, align);
981     }
982 
983     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
984     ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
985                               use_local_qiov ? &local_qiov : qiov,
986                               flags);
987     tracked_request_end(&req);
988 
989     if (use_local_qiov) {
990         qemu_iovec_destroy(&local_qiov);
991         qemu_vfree(head_buf);
992         qemu_vfree(tail_buf);
993     }
994 
995     return ret;
996 }
997 
998 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
999     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1000     BdrvRequestFlags flags)
1001 {
1002     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1003         return -EINVAL;
1004     }
1005 
1006     return bdrv_co_do_preadv(bs, sector_num << BDRV_SECTOR_BITS,
1007                              nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1008 }
1009 
1010 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
1011     int nb_sectors, QEMUIOVector *qiov)
1012 {
1013     trace_bdrv_co_readv(bs, sector_num, nb_sectors);
1014 
1015     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
1016 }
1017 
1018 int coroutine_fn bdrv_co_readv_no_serialising(BlockDriverState *bs,
1019     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1020 {
1021     trace_bdrv_co_readv_no_serialising(bs, sector_num, nb_sectors);
1022 
1023     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1024                             BDRV_REQ_NO_SERIALISING);
1025 }
1026 
1027 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
1028     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1029 {
1030     trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
1031 
1032     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1033                             BDRV_REQ_COPY_ON_READ);
1034 }
1035 
1036 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1037 
1038 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
1039     int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
1040 {
1041     BlockDriver *drv = bs->drv;
1042     QEMUIOVector qiov;
1043     struct iovec iov = {0};
1044     int ret = 0;
1045 
1046     int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
1047                                         BDRV_REQUEST_MAX_SECTORS);
1048 
1049     while (nb_sectors > 0 && !ret) {
1050         int num = nb_sectors;
1051 
1052         /* Align request.  Block drivers can expect the "bulk" of the request
1053          * to be aligned.
1054          */
1055         if (bs->bl.write_zeroes_alignment
1056             && num > bs->bl.write_zeroes_alignment) {
1057             if (sector_num % bs->bl.write_zeroes_alignment != 0) {
1058                 /* Make a small request up to the first aligned sector.  */
1059                 num = bs->bl.write_zeroes_alignment;
1060                 num -= sector_num % bs->bl.write_zeroes_alignment;
1061             } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
1062                 /* Shorten the request to the last aligned sector.  num cannot
1063                  * underflow because num > bs->bl.write_zeroes_alignment.
1064                  */
1065                 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
1066             }
1067         }
1068 
1069         /* limit request size */
1070         if (num > max_write_zeroes) {
1071             num = max_write_zeroes;
1072         }
1073 
1074         ret = -ENOTSUP;
1075         /* First try the efficient write zeroes operation */
1076         if (drv->bdrv_co_write_zeroes) {
1077             ret = drv->bdrv_co_write_zeroes(bs, sector_num, num, flags);
1078         }
1079 
1080         if (ret == -ENOTSUP) {
1081             /* Fall back to bounce buffer if write zeroes is unsupported */
1082             int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
1083                                             MAX_WRITE_ZEROES_BOUNCE_BUFFER);
1084             num = MIN(num, max_xfer_len);
1085             iov.iov_len = num * BDRV_SECTOR_SIZE;
1086             if (iov.iov_base == NULL) {
1087                 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
1088                 if (iov.iov_base == NULL) {
1089                     ret = -ENOMEM;
1090                     goto fail;
1091                 }
1092                 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
1093             }
1094             qemu_iovec_init_external(&qiov, &iov, 1);
1095 
1096             ret = drv->bdrv_co_writev(bs, sector_num, num, &qiov);
1097 
1098             /* Keep bounce buffer around if it is big enough for all
1099              * all future requests.
1100              */
1101             if (num < max_xfer_len) {
1102                 qemu_vfree(iov.iov_base);
1103                 iov.iov_base = NULL;
1104             }
1105         }
1106 
1107         sector_num += num;
1108         nb_sectors -= num;
1109     }
1110 
1111 fail:
1112     qemu_vfree(iov.iov_base);
1113     return ret;
1114 }
1115 
1116 /*
1117  * Forwards an already correctly aligned write request to the BlockDriver.
1118  */
1119 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
1120     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1121     QEMUIOVector *qiov, int flags)
1122 {
1123     BlockDriver *drv = bs->drv;
1124     bool waited;
1125     int ret;
1126 
1127     int64_t sector_num = offset >> BDRV_SECTOR_BITS;
1128     unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
1129 
1130     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1131     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1132     assert(!qiov || bytes == qiov->size);
1133 
1134     waited = wait_serialising_requests(req);
1135     assert(!waited || !req->serialising);
1136     assert(req->overlap_offset <= offset);
1137     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1138 
1139     ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1140 
1141     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1142         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
1143         qemu_iovec_is_zero(qiov)) {
1144         flags |= BDRV_REQ_ZERO_WRITE;
1145         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1146             flags |= BDRV_REQ_MAY_UNMAP;
1147         }
1148     }
1149 
1150     if (ret < 0) {
1151         /* Do nothing, write notifier decided to fail this request */
1152     } else if (flags & BDRV_REQ_ZERO_WRITE) {
1153         bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1154         ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
1155     } else {
1156         bdrv_debug_event(bs, BLKDBG_PWRITEV);
1157         ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
1158     }
1159     bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1160 
1161     if (ret == 0 && !bs->enable_write_cache) {
1162         ret = bdrv_co_flush(bs);
1163     }
1164 
1165     bdrv_set_dirty(bs, sector_num, nb_sectors);
1166 
1167     if (bs->wr_highest_offset < offset + bytes) {
1168         bs->wr_highest_offset = offset + bytes;
1169     }
1170 
1171     if (ret >= 0) {
1172         bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
1173     }
1174 
1175     return ret;
1176 }
1177 
1178 static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
1179                                                 int64_t offset,
1180                                                 unsigned int bytes,
1181                                                 BdrvRequestFlags flags,
1182                                                 BdrvTrackedRequest *req)
1183 {
1184     uint8_t *buf = NULL;
1185     QEMUIOVector local_qiov;
1186     struct iovec iov;
1187     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1188     unsigned int head_padding_bytes, tail_padding_bytes;
1189     int ret = 0;
1190 
1191     head_padding_bytes = offset & (align - 1);
1192     tail_padding_bytes = align - ((offset + bytes) & (align - 1));
1193 
1194 
1195     assert(flags & BDRV_REQ_ZERO_WRITE);
1196     if (head_padding_bytes || tail_padding_bytes) {
1197         buf = qemu_blockalign(bs, align);
1198         iov = (struct iovec) {
1199             .iov_base   = buf,
1200             .iov_len    = align,
1201         };
1202         qemu_iovec_init_external(&local_qiov, &iov, 1);
1203     }
1204     if (head_padding_bytes) {
1205         uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1206 
1207         /* RMW the unaligned part before head. */
1208         mark_request_serialising(req, align);
1209         wait_serialising_requests(req);
1210         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1211         ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align,
1212                                   align, &local_qiov, 0);
1213         if (ret < 0) {
1214             goto fail;
1215         }
1216         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1217 
1218         memset(buf + head_padding_bytes, 0, zero_bytes);
1219         ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
1220                                    &local_qiov,
1221                                    flags & ~BDRV_REQ_ZERO_WRITE);
1222         if (ret < 0) {
1223             goto fail;
1224         }
1225         offset += zero_bytes;
1226         bytes -= zero_bytes;
1227     }
1228 
1229     assert(!bytes || (offset & (align - 1)) == 0);
1230     if (bytes >= align) {
1231         /* Write the aligned part in the middle. */
1232         uint64_t aligned_bytes = bytes & ~(align - 1);
1233         ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes,
1234                                    NULL, flags);
1235         if (ret < 0) {
1236             goto fail;
1237         }
1238         bytes -= aligned_bytes;
1239         offset += aligned_bytes;
1240     }
1241 
1242     assert(!bytes || (offset & (align - 1)) == 0);
1243     if (bytes) {
1244         assert(align == tail_padding_bytes + bytes);
1245         /* RMW the unaligned part after tail. */
1246         mark_request_serialising(req, align);
1247         wait_serialising_requests(req);
1248         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1249         ret = bdrv_aligned_preadv(bs, req, offset, align,
1250                                   align, &local_qiov, 0);
1251         if (ret < 0) {
1252             goto fail;
1253         }
1254         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1255 
1256         memset(buf, 0, bytes);
1257         ret = bdrv_aligned_pwritev(bs, req, offset, align,
1258                                    &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1259     }
1260 fail:
1261     qemu_vfree(buf);
1262     return ret;
1263 
1264 }
1265 
1266 /*
1267  * Handle a write request in coroutine context
1268  */
1269 int coroutine_fn bdrv_co_do_pwritev(BlockDriverState *bs,
1270     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1271     BdrvRequestFlags flags)
1272 {
1273     BdrvTrackedRequest req;
1274     /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1275     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1276     uint8_t *head_buf = NULL;
1277     uint8_t *tail_buf = NULL;
1278     QEMUIOVector local_qiov;
1279     bool use_local_qiov = false;
1280     int ret;
1281 
1282     if (!bs->drv) {
1283         return -ENOMEDIUM;
1284     }
1285     if (bs->read_only) {
1286         return -EPERM;
1287     }
1288     assert(!(bs->open_flags & BDRV_O_INACTIVE));
1289 
1290     ret = bdrv_check_byte_request(bs, offset, bytes);
1291     if (ret < 0) {
1292         return ret;
1293     }
1294 
1295     /* throttling disk I/O */
1296     if (bs->io_limits_enabled) {
1297         throttle_group_co_io_limits_intercept(bs, bytes, true);
1298     }
1299 
1300     /*
1301      * Align write if necessary by performing a read-modify-write cycle.
1302      * Pad qiov with the read parts and be sure to have a tracked request not
1303      * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1304      */
1305     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1306 
1307     if (!qiov) {
1308         ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req);
1309         goto out;
1310     }
1311 
1312     if (offset & (align - 1)) {
1313         QEMUIOVector head_qiov;
1314         struct iovec head_iov;
1315 
1316         mark_request_serialising(&req, align);
1317         wait_serialising_requests(&req);
1318 
1319         head_buf = qemu_blockalign(bs, align);
1320         head_iov = (struct iovec) {
1321             .iov_base   = head_buf,
1322             .iov_len    = align,
1323         };
1324         qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1325 
1326         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1327         ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
1328                                   align, &head_qiov, 0);
1329         if (ret < 0) {
1330             goto fail;
1331         }
1332         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1333 
1334         qemu_iovec_init(&local_qiov, qiov->niov + 2);
1335         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1336         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1337         use_local_qiov = true;
1338 
1339         bytes += offset & (align - 1);
1340         offset = offset & ~(align - 1);
1341     }
1342 
1343     if ((offset + bytes) & (align - 1)) {
1344         QEMUIOVector tail_qiov;
1345         struct iovec tail_iov;
1346         size_t tail_bytes;
1347         bool waited;
1348 
1349         mark_request_serialising(&req, align);
1350         waited = wait_serialising_requests(&req);
1351         assert(!waited || !use_local_qiov);
1352 
1353         tail_buf = qemu_blockalign(bs, align);
1354         tail_iov = (struct iovec) {
1355             .iov_base   = tail_buf,
1356             .iov_len    = align,
1357         };
1358         qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1359 
1360         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1361         ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
1362                                   align, &tail_qiov, 0);
1363         if (ret < 0) {
1364             goto fail;
1365         }
1366         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1367 
1368         if (!use_local_qiov) {
1369             qemu_iovec_init(&local_qiov, qiov->niov + 1);
1370             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1371             use_local_qiov = true;
1372         }
1373 
1374         tail_bytes = (offset + bytes) & (align - 1);
1375         qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1376 
1377         bytes = ROUND_UP(bytes, align);
1378     }
1379 
1380     ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
1381                                use_local_qiov ? &local_qiov : qiov,
1382                                flags);
1383 
1384 fail:
1385 
1386     if (use_local_qiov) {
1387         qemu_iovec_destroy(&local_qiov);
1388     }
1389     qemu_vfree(head_buf);
1390     qemu_vfree(tail_buf);
1391 out:
1392     tracked_request_end(&req);
1393     return ret;
1394 }
1395 
1396 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
1397     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1398     BdrvRequestFlags flags)
1399 {
1400     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1401         return -EINVAL;
1402     }
1403 
1404     return bdrv_co_do_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
1405                               nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1406 }
1407 
1408 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
1409     int nb_sectors, QEMUIOVector *qiov)
1410 {
1411     trace_bdrv_co_writev(bs, sector_num, nb_sectors);
1412 
1413     return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
1414 }
1415 
1416 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
1417                                       int64_t sector_num, int nb_sectors,
1418                                       BdrvRequestFlags flags)
1419 {
1420     trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
1421 
1422     if (!(bs->open_flags & BDRV_O_UNMAP)) {
1423         flags &= ~BDRV_REQ_MAY_UNMAP;
1424     }
1425 
1426     return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
1427                              BDRV_REQ_ZERO_WRITE | flags);
1428 }
1429 
1430 typedef struct BdrvCoGetBlockStatusData {
1431     BlockDriverState *bs;
1432     BlockDriverState *base;
1433     BlockDriverState **file;
1434     int64_t sector_num;
1435     int nb_sectors;
1436     int *pnum;
1437     int64_t ret;
1438     bool done;
1439 } BdrvCoGetBlockStatusData;
1440 
1441 /*
1442  * Returns the allocation status of the specified sectors.
1443  * Drivers not implementing the functionality are assumed to not support
1444  * backing files, hence all their sectors are reported as allocated.
1445  *
1446  * If 'sector_num' is beyond the end of the disk image the return value is 0
1447  * and 'pnum' is set to 0.
1448  *
1449  * 'pnum' is set to the number of sectors (including and immediately following
1450  * the specified sector) that are known to be in the same
1451  * allocated/unallocated state.
1452  *
1453  * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
1454  * beyond the end of the disk image it will be clamped.
1455  *
1456  * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1457  * points to the BDS which the sector range is allocated in.
1458  */
1459 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1460                                                      int64_t sector_num,
1461                                                      int nb_sectors, int *pnum,
1462                                                      BlockDriverState **file)
1463 {
1464     int64_t total_sectors;
1465     int64_t n;
1466     int64_t ret, ret2;
1467 
1468     total_sectors = bdrv_nb_sectors(bs);
1469     if (total_sectors < 0) {
1470         return total_sectors;
1471     }
1472 
1473     if (sector_num >= total_sectors) {
1474         *pnum = 0;
1475         return 0;
1476     }
1477 
1478     n = total_sectors - sector_num;
1479     if (n < nb_sectors) {
1480         nb_sectors = n;
1481     }
1482 
1483     if (!bs->drv->bdrv_co_get_block_status) {
1484         *pnum = nb_sectors;
1485         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1486         if (bs->drv->protocol_name) {
1487             ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1488         }
1489         return ret;
1490     }
1491 
1492     *file = NULL;
1493     ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1494                                             file);
1495     if (ret < 0) {
1496         *pnum = 0;
1497         return ret;
1498     }
1499 
1500     if (ret & BDRV_BLOCK_RAW) {
1501         assert(ret & BDRV_BLOCK_OFFSET_VALID);
1502         return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
1503                                      *pnum, pnum, file);
1504     }
1505 
1506     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1507         ret |= BDRV_BLOCK_ALLOCATED;
1508     } else {
1509         if (bdrv_unallocated_blocks_are_zero(bs)) {
1510             ret |= BDRV_BLOCK_ZERO;
1511         } else if (bs->backing) {
1512             BlockDriverState *bs2 = bs->backing->bs;
1513             int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1514             if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1515                 ret |= BDRV_BLOCK_ZERO;
1516             }
1517         }
1518     }
1519 
1520     if (*file && *file != bs &&
1521         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1522         (ret & BDRV_BLOCK_OFFSET_VALID)) {
1523         BlockDriverState *file2;
1524         int file_pnum;
1525 
1526         ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
1527                                         *pnum, &file_pnum, &file2);
1528         if (ret2 >= 0) {
1529             /* Ignore errors.  This is just providing extra information, it
1530              * is useful but not necessary.
1531              */
1532             if (!file_pnum) {
1533                 /* !file_pnum indicates an offset at or beyond the EOF; it is
1534                  * perfectly valid for the format block driver to point to such
1535                  * offsets, so catch it and mark everything as zero */
1536                 ret |= BDRV_BLOCK_ZERO;
1537             } else {
1538                 /* Limit request to the range reported by the protocol driver */
1539                 *pnum = file_pnum;
1540                 ret |= (ret2 & BDRV_BLOCK_ZERO);
1541             }
1542         }
1543     }
1544 
1545     return ret;
1546 }
1547 
1548 static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1549         BlockDriverState *base,
1550         int64_t sector_num,
1551         int nb_sectors,
1552         int *pnum,
1553         BlockDriverState **file)
1554 {
1555     BlockDriverState *p;
1556     int64_t ret = 0;
1557 
1558     assert(bs != base);
1559     for (p = bs; p != base; p = backing_bs(p)) {
1560         ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
1561         if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
1562             break;
1563         }
1564         /* [sector_num, pnum] unallocated on this layer, which could be only
1565          * the first part of [sector_num, nb_sectors].  */
1566         nb_sectors = MIN(nb_sectors, *pnum);
1567     }
1568     return ret;
1569 }
1570 
1571 /* Coroutine wrapper for bdrv_get_block_status_above() */
1572 static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
1573 {
1574     BdrvCoGetBlockStatusData *data = opaque;
1575 
1576     data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1577                                                data->sector_num,
1578                                                data->nb_sectors,
1579                                                data->pnum,
1580                                                data->file);
1581     data->done = true;
1582 }
1583 
1584 /*
1585  * Synchronous wrapper around bdrv_co_get_block_status_above().
1586  *
1587  * See bdrv_co_get_block_status_above() for details.
1588  */
1589 int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1590                                     BlockDriverState *base,
1591                                     int64_t sector_num,
1592                                     int nb_sectors, int *pnum,
1593                                     BlockDriverState **file)
1594 {
1595     Coroutine *co;
1596     BdrvCoGetBlockStatusData data = {
1597         .bs = bs,
1598         .base = base,
1599         .file = file,
1600         .sector_num = sector_num,
1601         .nb_sectors = nb_sectors,
1602         .pnum = pnum,
1603         .done = false,
1604     };
1605 
1606     if (qemu_in_coroutine()) {
1607         /* Fast-path if already in coroutine context */
1608         bdrv_get_block_status_above_co_entry(&data);
1609     } else {
1610         AioContext *aio_context = bdrv_get_aio_context(bs);
1611 
1612         co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry);
1613         qemu_coroutine_enter(co, &data);
1614         while (!data.done) {
1615             aio_poll(aio_context, true);
1616         }
1617     }
1618     return data.ret;
1619 }
1620 
1621 int64_t bdrv_get_block_status(BlockDriverState *bs,
1622                               int64_t sector_num,
1623                               int nb_sectors, int *pnum,
1624                               BlockDriverState **file)
1625 {
1626     return bdrv_get_block_status_above(bs, backing_bs(bs),
1627                                        sector_num, nb_sectors, pnum, file);
1628 }
1629 
1630 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1631                                    int nb_sectors, int *pnum)
1632 {
1633     BlockDriverState *file;
1634     int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
1635                                         &file);
1636     if (ret < 0) {
1637         return ret;
1638     }
1639     return !!(ret & BDRV_BLOCK_ALLOCATED);
1640 }
1641 
1642 /*
1643  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1644  *
1645  * Return true if the given sector is allocated in any image between
1646  * BASE and TOP (inclusive).  BASE can be NULL to check if the given
1647  * sector is allocated in any image of the chain.  Return false otherwise.
1648  *
1649  * 'pnum' is set to the number of sectors (including and immediately following
1650  *  the specified sector) that are known to be in the same
1651  *  allocated/unallocated state.
1652  *
1653  */
1654 int bdrv_is_allocated_above(BlockDriverState *top,
1655                             BlockDriverState *base,
1656                             int64_t sector_num,
1657                             int nb_sectors, int *pnum)
1658 {
1659     BlockDriverState *intermediate;
1660     int ret, n = nb_sectors;
1661 
1662     intermediate = top;
1663     while (intermediate && intermediate != base) {
1664         int pnum_inter;
1665         ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1666                                 &pnum_inter);
1667         if (ret < 0) {
1668             return ret;
1669         } else if (ret) {
1670             *pnum = pnum_inter;
1671             return 1;
1672         }
1673 
1674         /*
1675          * [sector_num, nb_sectors] is unallocated on top but intermediate
1676          * might have
1677          *
1678          * [sector_num+x, nr_sectors] allocated.
1679          */
1680         if (n > pnum_inter &&
1681             (intermediate == top ||
1682              sector_num + pnum_inter < intermediate->total_sectors)) {
1683             n = pnum_inter;
1684         }
1685 
1686         intermediate = backing_bs(intermediate);
1687     }
1688 
1689     *pnum = n;
1690     return 0;
1691 }
1692 
1693 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
1694                           const uint8_t *buf, int nb_sectors)
1695 {
1696     BlockDriver *drv = bs->drv;
1697     int ret;
1698 
1699     if (!drv) {
1700         return -ENOMEDIUM;
1701     }
1702     if (!drv->bdrv_write_compressed) {
1703         return -ENOTSUP;
1704     }
1705     ret = bdrv_check_request(bs, sector_num, nb_sectors);
1706     if (ret < 0) {
1707         return ret;
1708     }
1709 
1710     assert(QLIST_EMPTY(&bs->dirty_bitmaps));
1711 
1712     return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
1713 }
1714 
1715 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1716                       int64_t pos, int size)
1717 {
1718     QEMUIOVector qiov;
1719     struct iovec iov = {
1720         .iov_base   = (void *) buf,
1721         .iov_len    = size,
1722     };
1723 
1724     qemu_iovec_init_external(&qiov, &iov, 1);
1725     return bdrv_writev_vmstate(bs, &qiov, pos);
1726 }
1727 
1728 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1729 {
1730     BlockDriver *drv = bs->drv;
1731 
1732     if (!drv) {
1733         return -ENOMEDIUM;
1734     } else if (drv->bdrv_save_vmstate) {
1735         return drv->bdrv_save_vmstate(bs, qiov, pos);
1736     } else if (bs->file) {
1737         return bdrv_writev_vmstate(bs->file->bs, qiov, pos);
1738     }
1739 
1740     return -ENOTSUP;
1741 }
1742 
1743 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1744                       int64_t pos, int size)
1745 {
1746     BlockDriver *drv = bs->drv;
1747     if (!drv)
1748         return -ENOMEDIUM;
1749     if (drv->bdrv_load_vmstate)
1750         return drv->bdrv_load_vmstate(bs, buf, pos, size);
1751     if (bs->file)
1752         return bdrv_load_vmstate(bs->file->bs, buf, pos, size);
1753     return -ENOTSUP;
1754 }
1755 
1756 /**************************************************************/
1757 /* async I/Os */
1758 
1759 BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
1760                            QEMUIOVector *qiov, int nb_sectors,
1761                            BlockCompletionFunc *cb, void *opaque)
1762 {
1763     trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
1764 
1765     return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1766                                  cb, opaque, false);
1767 }
1768 
1769 BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
1770                             QEMUIOVector *qiov, int nb_sectors,
1771                             BlockCompletionFunc *cb, void *opaque)
1772 {
1773     trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
1774 
1775     return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1776                                  cb, opaque, true);
1777 }
1778 
1779 BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
1780         int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
1781         BlockCompletionFunc *cb, void *opaque)
1782 {
1783     trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
1784 
1785     return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
1786                                  BDRV_REQ_ZERO_WRITE | flags,
1787                                  cb, opaque, true);
1788 }
1789 
1790 
1791 typedef struct MultiwriteCB {
1792     int error;
1793     int num_requests;
1794     int num_callbacks;
1795     struct {
1796         BlockCompletionFunc *cb;
1797         void *opaque;
1798         QEMUIOVector *free_qiov;
1799     } callbacks[];
1800 } MultiwriteCB;
1801 
1802 static void multiwrite_user_cb(MultiwriteCB *mcb)
1803 {
1804     int i;
1805 
1806     for (i = 0; i < mcb->num_callbacks; i++) {
1807         mcb->callbacks[i].cb(mcb->callbacks[i].opaque, mcb->error);
1808         if (mcb->callbacks[i].free_qiov) {
1809             qemu_iovec_destroy(mcb->callbacks[i].free_qiov);
1810         }
1811         g_free(mcb->callbacks[i].free_qiov);
1812     }
1813 }
1814 
1815 static void multiwrite_cb(void *opaque, int ret)
1816 {
1817     MultiwriteCB *mcb = opaque;
1818 
1819     trace_multiwrite_cb(mcb, ret);
1820 
1821     if (ret < 0 && !mcb->error) {
1822         mcb->error = ret;
1823     }
1824 
1825     mcb->num_requests--;
1826     if (mcb->num_requests == 0) {
1827         multiwrite_user_cb(mcb);
1828         g_free(mcb);
1829     }
1830 }
1831 
1832 static int multiwrite_req_compare(const void *a, const void *b)
1833 {
1834     const BlockRequest *req1 = a, *req2 = b;
1835 
1836     /*
1837      * Note that we can't simply subtract req2->sector from req1->sector
1838      * here as that could overflow the return value.
1839      */
1840     if (req1->sector > req2->sector) {
1841         return 1;
1842     } else if (req1->sector < req2->sector) {
1843         return -1;
1844     } else {
1845         return 0;
1846     }
1847 }
1848 
1849 /*
1850  * Takes a bunch of requests and tries to merge them. Returns the number of
1851  * requests that remain after merging.
1852  */
1853 static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
1854     int num_reqs, MultiwriteCB *mcb)
1855 {
1856     int i, outidx;
1857 
1858     // Sort requests by start sector
1859     qsort(reqs, num_reqs, sizeof(*reqs), &multiwrite_req_compare);
1860 
1861     // Check if adjacent requests touch the same clusters. If so, combine them,
1862     // filling up gaps with zero sectors.
1863     outidx = 0;
1864     for (i = 1; i < num_reqs; i++) {
1865         int merge = 0;
1866         int64_t oldreq_last = reqs[outidx].sector + reqs[outidx].nb_sectors;
1867 
1868         // Handle exactly sequential writes and overlapping writes.
1869         if (reqs[i].sector <= oldreq_last) {
1870             merge = 1;
1871         }
1872 
1873         if (reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1 >
1874             bs->bl.max_iov) {
1875             merge = 0;
1876         }
1877 
1878         if (bs->bl.max_transfer_length && reqs[outidx].nb_sectors +
1879             reqs[i].nb_sectors > bs->bl.max_transfer_length) {
1880             merge = 0;
1881         }
1882 
1883         if (merge) {
1884             size_t size;
1885             QEMUIOVector *qiov = g_malloc0(sizeof(*qiov));
1886             qemu_iovec_init(qiov,
1887                 reqs[outidx].qiov->niov + reqs[i].qiov->niov + 1);
1888 
1889             // Add the first request to the merged one. If the requests are
1890             // overlapping, drop the last sectors of the first request.
1891             size = (reqs[i].sector - reqs[outidx].sector) << 9;
1892             qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
1893 
1894             // We should need to add any zeros between the two requests
1895             assert (reqs[i].sector <= oldreq_last);
1896 
1897             // Add the second request
1898             qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
1899 
1900             // Add tail of first request, if necessary
1901             if (qiov->size < reqs[outidx].qiov->size) {
1902                 qemu_iovec_concat(qiov, reqs[outidx].qiov, qiov->size,
1903                                   reqs[outidx].qiov->size - qiov->size);
1904             }
1905 
1906             reqs[outidx].nb_sectors = qiov->size >> 9;
1907             reqs[outidx].qiov = qiov;
1908 
1909             mcb->callbacks[i].free_qiov = reqs[outidx].qiov;
1910         } else {
1911             outidx++;
1912             reqs[outidx].sector     = reqs[i].sector;
1913             reqs[outidx].nb_sectors = reqs[i].nb_sectors;
1914             reqs[outidx].qiov       = reqs[i].qiov;
1915         }
1916     }
1917 
1918     if (bs->blk) {
1919         block_acct_merge_done(blk_get_stats(bs->blk), BLOCK_ACCT_WRITE,
1920                               num_reqs - outidx - 1);
1921     }
1922 
1923     return outidx + 1;
1924 }
1925 
1926 /*
1927  * Submit multiple AIO write requests at once.
1928  *
1929  * On success, the function returns 0 and all requests in the reqs array have
1930  * been submitted. In error case this function returns -1, and any of the
1931  * requests may or may not be submitted yet. In particular, this means that the
1932  * callback will be called for some of the requests, for others it won't. The
1933  * caller must check the error field of the BlockRequest to wait for the right
1934  * callbacks (if error != 0, no callback will be called).
1935  *
1936  * The implementation may modify the contents of the reqs array, e.g. to merge
1937  * requests. However, the fields opaque and error are left unmodified as they
1938  * are used to signal failure for a single request to the caller.
1939  */
1940 int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs, int num_reqs)
1941 {
1942     MultiwriteCB *mcb;
1943     int i;
1944 
1945     /* don't submit writes if we don't have a medium */
1946     if (bs->drv == NULL) {
1947         for (i = 0; i < num_reqs; i++) {
1948             reqs[i].error = -ENOMEDIUM;
1949         }
1950         return -1;
1951     }
1952 
1953     if (num_reqs == 0) {
1954         return 0;
1955     }
1956 
1957     // Create MultiwriteCB structure
1958     mcb = g_malloc0(sizeof(*mcb) + num_reqs * sizeof(*mcb->callbacks));
1959     mcb->num_requests = 0;
1960     mcb->num_callbacks = num_reqs;
1961 
1962     for (i = 0; i < num_reqs; i++) {
1963         mcb->callbacks[i].cb = reqs[i].cb;
1964         mcb->callbacks[i].opaque = reqs[i].opaque;
1965     }
1966 
1967     // Check for mergable requests
1968     num_reqs = multiwrite_merge(bs, reqs, num_reqs, mcb);
1969 
1970     trace_bdrv_aio_multiwrite(mcb, mcb->num_callbacks, num_reqs);
1971 
1972     /* Run the aio requests. */
1973     mcb->num_requests = num_reqs;
1974     for (i = 0; i < num_reqs; i++) {
1975         bdrv_co_aio_rw_vector(bs, reqs[i].sector, reqs[i].qiov,
1976                               reqs[i].nb_sectors, reqs[i].flags,
1977                               multiwrite_cb, mcb,
1978                               true);
1979     }
1980 
1981     return 0;
1982 }
1983 
1984 void bdrv_aio_cancel(BlockAIOCB *acb)
1985 {
1986     qemu_aio_ref(acb);
1987     bdrv_aio_cancel_async(acb);
1988     while (acb->refcnt > 1) {
1989         if (acb->aiocb_info->get_aio_context) {
1990             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
1991         } else if (acb->bs) {
1992             aio_poll(bdrv_get_aio_context(acb->bs), true);
1993         } else {
1994             abort();
1995         }
1996     }
1997     qemu_aio_unref(acb);
1998 }
1999 
2000 /* Async version of aio cancel. The caller is not blocked if the acb implements
2001  * cancel_async, otherwise we do nothing and let the request normally complete.
2002  * In either case the completion callback must be called. */
2003 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2004 {
2005     if (acb->aiocb_info->cancel_async) {
2006         acb->aiocb_info->cancel_async(acb);
2007     }
2008 }
2009 
2010 /**************************************************************/
2011 /* async block device emulation */
2012 
2013 typedef struct BlockAIOCBSync {
2014     BlockAIOCB common;
2015     QEMUBH *bh;
2016     int ret;
2017     /* vector translation state */
2018     QEMUIOVector *qiov;
2019     uint8_t *bounce;
2020     int is_write;
2021 } BlockAIOCBSync;
2022 
2023 static const AIOCBInfo bdrv_em_aiocb_info = {
2024     .aiocb_size         = sizeof(BlockAIOCBSync),
2025 };
2026 
2027 static void bdrv_aio_bh_cb(void *opaque)
2028 {
2029     BlockAIOCBSync *acb = opaque;
2030 
2031     if (!acb->is_write && acb->ret >= 0) {
2032         qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
2033     }
2034     qemu_vfree(acb->bounce);
2035     acb->common.cb(acb->common.opaque, acb->ret);
2036     qemu_bh_delete(acb->bh);
2037     acb->bh = NULL;
2038     qemu_aio_unref(acb);
2039 }
2040 
2041 static BlockAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
2042                                       int64_t sector_num,
2043                                       QEMUIOVector *qiov,
2044                                       int nb_sectors,
2045                                       BlockCompletionFunc *cb,
2046                                       void *opaque,
2047                                       int is_write)
2048 
2049 {
2050     BlockAIOCBSync *acb;
2051 
2052     acb = qemu_aio_get(&bdrv_em_aiocb_info, bs, cb, opaque);
2053     acb->is_write = is_write;
2054     acb->qiov = qiov;
2055     acb->bounce = qemu_try_blockalign(bs, qiov->size);
2056     acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_aio_bh_cb, acb);
2057 
2058     if (acb->bounce == NULL) {
2059         acb->ret = -ENOMEM;
2060     } else if (is_write) {
2061         qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
2062         acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
2063     } else {
2064         acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
2065     }
2066 
2067     qemu_bh_schedule(acb->bh);
2068 
2069     return &acb->common;
2070 }
2071 
2072 static BlockAIOCB *bdrv_aio_readv_em(BlockDriverState *bs,
2073         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2074         BlockCompletionFunc *cb, void *opaque)
2075 {
2076     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
2077 }
2078 
2079 static BlockAIOCB *bdrv_aio_writev_em(BlockDriverState *bs,
2080         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
2081         BlockCompletionFunc *cb, void *opaque)
2082 {
2083     return bdrv_aio_rw_vector(bs, sector_num, qiov, nb_sectors, cb, opaque, 1);
2084 }
2085 
2086 
2087 typedef struct BlockAIOCBCoroutine {
2088     BlockAIOCB common;
2089     BlockRequest req;
2090     bool is_write;
2091     bool need_bh;
2092     bool *done;
2093     QEMUBH* bh;
2094 } BlockAIOCBCoroutine;
2095 
2096 static const AIOCBInfo bdrv_em_co_aiocb_info = {
2097     .aiocb_size         = sizeof(BlockAIOCBCoroutine),
2098 };
2099 
2100 static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
2101 {
2102     if (!acb->need_bh) {
2103         acb->common.cb(acb->common.opaque, acb->req.error);
2104         qemu_aio_unref(acb);
2105     }
2106 }
2107 
2108 static void bdrv_co_em_bh(void *opaque)
2109 {
2110     BlockAIOCBCoroutine *acb = opaque;
2111 
2112     assert(!acb->need_bh);
2113     qemu_bh_delete(acb->bh);
2114     bdrv_co_complete(acb);
2115 }
2116 
2117 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
2118 {
2119     acb->need_bh = false;
2120     if (acb->req.error != -EINPROGRESS) {
2121         BlockDriverState *bs = acb->common.bs;
2122 
2123         acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
2124         qemu_bh_schedule(acb->bh);
2125     }
2126 }
2127 
2128 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2129 static void coroutine_fn bdrv_co_do_rw(void *opaque)
2130 {
2131     BlockAIOCBCoroutine *acb = opaque;
2132     BlockDriverState *bs = acb->common.bs;
2133 
2134     if (!acb->is_write) {
2135         acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
2136             acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
2137     } else {
2138         acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
2139             acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
2140     }
2141 
2142     bdrv_co_complete(acb);
2143 }
2144 
2145 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
2146                                          int64_t sector_num,
2147                                          QEMUIOVector *qiov,
2148                                          int nb_sectors,
2149                                          BdrvRequestFlags flags,
2150                                          BlockCompletionFunc *cb,
2151                                          void *opaque,
2152                                          bool is_write)
2153 {
2154     Coroutine *co;
2155     BlockAIOCBCoroutine *acb;
2156 
2157     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2158     acb->need_bh = true;
2159     acb->req.error = -EINPROGRESS;
2160     acb->req.sector = sector_num;
2161     acb->req.nb_sectors = nb_sectors;
2162     acb->req.qiov = qiov;
2163     acb->req.flags = flags;
2164     acb->is_write = is_write;
2165 
2166     co = qemu_coroutine_create(bdrv_co_do_rw);
2167     qemu_coroutine_enter(co, acb);
2168 
2169     bdrv_co_maybe_schedule_bh(acb);
2170     return &acb->common;
2171 }
2172 
2173 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
2174 {
2175     BlockAIOCBCoroutine *acb = opaque;
2176     BlockDriverState *bs = acb->common.bs;
2177 
2178     acb->req.error = bdrv_co_flush(bs);
2179     bdrv_co_complete(acb);
2180 }
2181 
2182 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2183         BlockCompletionFunc *cb, void *opaque)
2184 {
2185     trace_bdrv_aio_flush(bs, opaque);
2186 
2187     Coroutine *co;
2188     BlockAIOCBCoroutine *acb;
2189 
2190     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2191     acb->need_bh = true;
2192     acb->req.error = -EINPROGRESS;
2193 
2194     co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
2195     qemu_coroutine_enter(co, acb);
2196 
2197     bdrv_co_maybe_schedule_bh(acb);
2198     return &acb->common;
2199 }
2200 
2201 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
2202 {
2203     BlockAIOCBCoroutine *acb = opaque;
2204     BlockDriverState *bs = acb->common.bs;
2205 
2206     acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
2207     bdrv_co_complete(acb);
2208 }
2209 
2210 BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
2211         int64_t sector_num, int nb_sectors,
2212         BlockCompletionFunc *cb, void *opaque)
2213 {
2214     Coroutine *co;
2215     BlockAIOCBCoroutine *acb;
2216 
2217     trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
2218 
2219     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2220     acb->need_bh = true;
2221     acb->req.error = -EINPROGRESS;
2222     acb->req.sector = sector_num;
2223     acb->req.nb_sectors = nb_sectors;
2224     co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
2225     qemu_coroutine_enter(co, acb);
2226 
2227     bdrv_co_maybe_schedule_bh(acb);
2228     return &acb->common;
2229 }
2230 
2231 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
2232                    BlockCompletionFunc *cb, void *opaque)
2233 {
2234     BlockAIOCB *acb;
2235 
2236     acb = g_malloc(aiocb_info->aiocb_size);
2237     acb->aiocb_info = aiocb_info;
2238     acb->bs = bs;
2239     acb->cb = cb;
2240     acb->opaque = opaque;
2241     acb->refcnt = 1;
2242     return acb;
2243 }
2244 
2245 void qemu_aio_ref(void *p)
2246 {
2247     BlockAIOCB *acb = p;
2248     acb->refcnt++;
2249 }
2250 
2251 void qemu_aio_unref(void *p)
2252 {
2253     BlockAIOCB *acb = p;
2254     assert(acb->refcnt > 0);
2255     if (--acb->refcnt == 0) {
2256         g_free(acb);
2257     }
2258 }
2259 
2260 /**************************************************************/
2261 /* Coroutine block device emulation */
2262 
2263 typedef struct CoroutineIOCompletion {
2264     Coroutine *coroutine;
2265     int ret;
2266 } CoroutineIOCompletion;
2267 
2268 static void bdrv_co_io_em_complete(void *opaque, int ret)
2269 {
2270     CoroutineIOCompletion *co = opaque;
2271 
2272     co->ret = ret;
2273     qemu_coroutine_enter(co->coroutine, NULL);
2274 }
2275 
2276 static int coroutine_fn bdrv_co_io_em(BlockDriverState *bs, int64_t sector_num,
2277                                       int nb_sectors, QEMUIOVector *iov,
2278                                       bool is_write)
2279 {
2280     CoroutineIOCompletion co = {
2281         .coroutine = qemu_coroutine_self(),
2282     };
2283     BlockAIOCB *acb;
2284 
2285     if (is_write) {
2286         acb = bs->drv->bdrv_aio_writev(bs, sector_num, iov, nb_sectors,
2287                                        bdrv_co_io_em_complete, &co);
2288     } else {
2289         acb = bs->drv->bdrv_aio_readv(bs, sector_num, iov, nb_sectors,
2290                                       bdrv_co_io_em_complete, &co);
2291     }
2292 
2293     trace_bdrv_co_io_em(bs, sector_num, nb_sectors, is_write, acb);
2294     if (!acb) {
2295         return -EIO;
2296     }
2297     qemu_coroutine_yield();
2298 
2299     return co.ret;
2300 }
2301 
2302 static int coroutine_fn bdrv_co_readv_em(BlockDriverState *bs,
2303                                          int64_t sector_num, int nb_sectors,
2304                                          QEMUIOVector *iov)
2305 {
2306     return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, false);
2307 }
2308 
2309 static int coroutine_fn bdrv_co_writev_em(BlockDriverState *bs,
2310                                          int64_t sector_num, int nb_sectors,
2311                                          QEMUIOVector *iov)
2312 {
2313     return bdrv_co_io_em(bs, sector_num, nb_sectors, iov, true);
2314 }
2315 
2316 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2317 {
2318     RwCo *rwco = opaque;
2319 
2320     rwco->ret = bdrv_co_flush(rwco->bs);
2321 }
2322 
2323 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2324 {
2325     int ret;
2326     BdrvTrackedRequest req;
2327 
2328     if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2329         bdrv_is_sg(bs)) {
2330         return 0;
2331     }
2332 
2333     tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH);
2334     /* Write back cached data to the OS even with cache=unsafe */
2335     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2336     if (bs->drv->bdrv_co_flush_to_os) {
2337         ret = bs->drv->bdrv_co_flush_to_os(bs);
2338         if (ret < 0) {
2339             goto out;
2340         }
2341     }
2342 
2343     /* But don't actually force it to the disk with cache=unsafe */
2344     if (bs->open_flags & BDRV_O_NO_FLUSH) {
2345         goto flush_parent;
2346     }
2347 
2348     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2349     if (bs->drv->bdrv_co_flush_to_disk) {
2350         ret = bs->drv->bdrv_co_flush_to_disk(bs);
2351     } else if (bs->drv->bdrv_aio_flush) {
2352         BlockAIOCB *acb;
2353         CoroutineIOCompletion co = {
2354             .coroutine = qemu_coroutine_self(),
2355         };
2356 
2357         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2358         if (acb == NULL) {
2359             ret = -EIO;
2360         } else {
2361             qemu_coroutine_yield();
2362             ret = co.ret;
2363         }
2364     } else {
2365         /*
2366          * Some block drivers always operate in either writethrough or unsafe
2367          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2368          * know how the server works (because the behaviour is hardcoded or
2369          * depends on server-side configuration), so we can't ensure that
2370          * everything is safe on disk. Returning an error doesn't work because
2371          * that would break guests even if the server operates in writethrough
2372          * mode.
2373          *
2374          * Let's hope the user knows what he's doing.
2375          */
2376         ret = 0;
2377     }
2378     if (ret < 0) {
2379         goto out;
2380     }
2381 
2382     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
2383      * in the case of cache=unsafe, so there are no useless flushes.
2384      */
2385 flush_parent:
2386     ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2387 out:
2388     tracked_request_end(&req);
2389     return ret;
2390 }
2391 
2392 int bdrv_flush(BlockDriverState *bs)
2393 {
2394     Coroutine *co;
2395     RwCo rwco = {
2396         .bs = bs,
2397         .ret = NOT_DONE,
2398     };
2399 
2400     if (qemu_in_coroutine()) {
2401         /* Fast-path if already in coroutine context */
2402         bdrv_flush_co_entry(&rwco);
2403     } else {
2404         AioContext *aio_context = bdrv_get_aio_context(bs);
2405 
2406         co = qemu_coroutine_create(bdrv_flush_co_entry);
2407         qemu_coroutine_enter(co, &rwco);
2408         while (rwco.ret == NOT_DONE) {
2409             aio_poll(aio_context, true);
2410         }
2411     }
2412 
2413     return rwco.ret;
2414 }
2415 
2416 typedef struct DiscardCo {
2417     BlockDriverState *bs;
2418     int64_t sector_num;
2419     int nb_sectors;
2420     int ret;
2421 } DiscardCo;
2422 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
2423 {
2424     DiscardCo *rwco = opaque;
2425 
2426     rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
2427 }
2428 
2429 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
2430                                  int nb_sectors)
2431 {
2432     BdrvTrackedRequest req;
2433     int max_discard, ret;
2434 
2435     if (!bs->drv) {
2436         return -ENOMEDIUM;
2437     }
2438 
2439     ret = bdrv_check_request(bs, sector_num, nb_sectors);
2440     if (ret < 0) {
2441         return ret;
2442     } else if (bs->read_only) {
2443         return -EPERM;
2444     }
2445     assert(!(bs->open_flags & BDRV_O_INACTIVE));
2446 
2447     /* Do nothing if disabled.  */
2448     if (!(bs->open_flags & BDRV_O_UNMAP)) {
2449         return 0;
2450     }
2451 
2452     if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
2453         return 0;
2454     }
2455 
2456     tracked_request_begin(&req, bs, sector_num, nb_sectors,
2457                           BDRV_TRACKED_DISCARD);
2458     bdrv_set_dirty(bs, sector_num, nb_sectors);
2459 
2460     max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
2461     while (nb_sectors > 0) {
2462         int ret;
2463         int num = nb_sectors;
2464 
2465         /* align request */
2466         if (bs->bl.discard_alignment &&
2467             num >= bs->bl.discard_alignment &&
2468             sector_num % bs->bl.discard_alignment) {
2469             if (num > bs->bl.discard_alignment) {
2470                 num = bs->bl.discard_alignment;
2471             }
2472             num -= sector_num % bs->bl.discard_alignment;
2473         }
2474 
2475         /* limit request size */
2476         if (num > max_discard) {
2477             num = max_discard;
2478         }
2479 
2480         if (bs->drv->bdrv_co_discard) {
2481             ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
2482         } else {
2483             BlockAIOCB *acb;
2484             CoroutineIOCompletion co = {
2485                 .coroutine = qemu_coroutine_self(),
2486             };
2487 
2488             acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
2489                                             bdrv_co_io_em_complete, &co);
2490             if (acb == NULL) {
2491                 ret = -EIO;
2492                 goto out;
2493             } else {
2494                 qemu_coroutine_yield();
2495                 ret = co.ret;
2496             }
2497         }
2498         if (ret && ret != -ENOTSUP) {
2499             goto out;
2500         }
2501 
2502         sector_num += num;
2503         nb_sectors -= num;
2504     }
2505     ret = 0;
2506 out:
2507     tracked_request_end(&req);
2508     return ret;
2509 }
2510 
2511 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
2512 {
2513     Coroutine *co;
2514     DiscardCo rwco = {
2515         .bs = bs,
2516         .sector_num = sector_num,
2517         .nb_sectors = nb_sectors,
2518         .ret = NOT_DONE,
2519     };
2520 
2521     if (qemu_in_coroutine()) {
2522         /* Fast-path if already in coroutine context */
2523         bdrv_discard_co_entry(&rwco);
2524     } else {
2525         AioContext *aio_context = bdrv_get_aio_context(bs);
2526 
2527         co = qemu_coroutine_create(bdrv_discard_co_entry);
2528         qemu_coroutine_enter(co, &rwco);
2529         while (rwco.ret == NOT_DONE) {
2530             aio_poll(aio_context, true);
2531         }
2532     }
2533 
2534     return rwco.ret;
2535 }
2536 
2537 typedef struct {
2538     CoroutineIOCompletion *co;
2539     QEMUBH *bh;
2540 } BdrvIoctlCompletionData;
2541 
2542 static void bdrv_ioctl_bh_cb(void *opaque)
2543 {
2544     BdrvIoctlCompletionData *data = opaque;
2545 
2546     bdrv_co_io_em_complete(data->co, -ENOTSUP);
2547     qemu_bh_delete(data->bh);
2548 }
2549 
2550 static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
2551 {
2552     BlockDriver *drv = bs->drv;
2553     BdrvTrackedRequest tracked_req;
2554     CoroutineIOCompletion co = {
2555         .coroutine = qemu_coroutine_self(),
2556     };
2557     BlockAIOCB *acb;
2558 
2559     tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL);
2560     if (!drv || !drv->bdrv_aio_ioctl) {
2561         co.ret = -ENOTSUP;
2562         goto out;
2563     }
2564 
2565     acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2566     if (!acb) {
2567         BdrvIoctlCompletionData *data = g_new(BdrvIoctlCompletionData, 1);
2568         data->bh = aio_bh_new(bdrv_get_aio_context(bs),
2569                                 bdrv_ioctl_bh_cb, data);
2570         data->co = &co;
2571         qemu_bh_schedule(data->bh);
2572     }
2573     qemu_coroutine_yield();
2574 out:
2575     tracked_request_end(&tracked_req);
2576     return co.ret;
2577 }
2578 
2579 typedef struct {
2580     BlockDriverState *bs;
2581     int req;
2582     void *buf;
2583     int ret;
2584 } BdrvIoctlCoData;
2585 
2586 static void coroutine_fn bdrv_co_ioctl_entry(void *opaque)
2587 {
2588     BdrvIoctlCoData *data = opaque;
2589     data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf);
2590 }
2591 
2592 /* needed for generic scsi interface */
2593 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2594 {
2595     BdrvIoctlCoData data = {
2596         .bs = bs,
2597         .req = req,
2598         .buf = buf,
2599         .ret = -EINPROGRESS,
2600     };
2601 
2602     if (qemu_in_coroutine()) {
2603         /* Fast-path if already in coroutine context */
2604         bdrv_co_ioctl_entry(&data);
2605     } else {
2606         Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry);
2607 
2608         qemu_coroutine_enter(co, &data);
2609         while (data.ret == -EINPROGRESS) {
2610             aio_poll(bdrv_get_aio_context(bs), true);
2611         }
2612     }
2613     return data.ret;
2614 }
2615 
2616 static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque)
2617 {
2618     BlockAIOCBCoroutine *acb = opaque;
2619     acb->req.error = bdrv_co_do_ioctl(acb->common.bs,
2620                                       acb->req.req, acb->req.buf);
2621     bdrv_co_complete(acb);
2622 }
2623 
2624 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2625         unsigned long int req, void *buf,
2626         BlockCompletionFunc *cb, void *opaque)
2627 {
2628     BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info,
2629                                             bs, cb, opaque);
2630     Coroutine *co;
2631 
2632     acb->need_bh = true;
2633     acb->req.error = -EINPROGRESS;
2634     acb->req.req = req;
2635     acb->req.buf = buf;
2636     co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry);
2637     qemu_coroutine_enter(co, acb);
2638 
2639     bdrv_co_maybe_schedule_bh(acb);
2640     return &acb->common;
2641 }
2642 
2643 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2644 {
2645     return qemu_memalign(bdrv_opt_mem_align(bs), size);
2646 }
2647 
2648 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2649 {
2650     return memset(qemu_blockalign(bs, size), 0, size);
2651 }
2652 
2653 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2654 {
2655     size_t align = bdrv_opt_mem_align(bs);
2656 
2657     /* Ensure that NULL is never returned on success */
2658     assert(align > 0);
2659     if (size == 0) {
2660         size = align;
2661     }
2662 
2663     return qemu_try_memalign(align, size);
2664 }
2665 
2666 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2667 {
2668     void *mem = qemu_try_blockalign(bs, size);
2669 
2670     if (mem) {
2671         memset(mem, 0, size);
2672     }
2673 
2674     return mem;
2675 }
2676 
2677 /*
2678  * Check if all memory in this vector is sector aligned.
2679  */
2680 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2681 {
2682     int i;
2683     size_t alignment = bdrv_min_mem_align(bs);
2684 
2685     for (i = 0; i < qiov->niov; i++) {
2686         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2687             return false;
2688         }
2689         if (qiov->iov[i].iov_len % alignment) {
2690             return false;
2691         }
2692     }
2693 
2694     return true;
2695 }
2696 
2697 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2698                                     NotifierWithReturn *notifier)
2699 {
2700     notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2701 }
2702 
2703 void bdrv_io_plug(BlockDriverState *bs)
2704 {
2705     BlockDriver *drv = bs->drv;
2706     if (drv && drv->bdrv_io_plug) {
2707         drv->bdrv_io_plug(bs);
2708     } else if (bs->file) {
2709         bdrv_io_plug(bs->file->bs);
2710     }
2711 }
2712 
2713 void bdrv_io_unplug(BlockDriverState *bs)
2714 {
2715     BlockDriver *drv = bs->drv;
2716     if (drv && drv->bdrv_io_unplug) {
2717         drv->bdrv_io_unplug(bs);
2718     } else if (bs->file) {
2719         bdrv_io_unplug(bs->file->bs);
2720     }
2721 }
2722 
2723 void bdrv_flush_io_queue(BlockDriverState *bs)
2724 {
2725     BlockDriver *drv = bs->drv;
2726     if (drv && drv->bdrv_flush_io_queue) {
2727         drv->bdrv_flush_io_queue(bs);
2728     } else if (bs->file) {
2729         bdrv_flush_io_queue(bs->file->bs);
2730     }
2731     bdrv_start_throttled_reqs(bs);
2732 }
2733 
2734 void bdrv_drained_begin(BlockDriverState *bs)
2735 {
2736     if (!bs->quiesce_counter++) {
2737         aio_disable_external(bdrv_get_aio_context(bs));
2738     }
2739     bdrv_drain(bs);
2740 }
2741 
2742 void bdrv_drained_end(BlockDriverState *bs)
2743 {
2744     assert(bs->quiesce_counter > 0);
2745     if (--bs->quiesce_counter > 0) {
2746         return;
2747     }
2748     aio_enable_external(bdrv_get_aio_context(bs));
2749 }
2750