xref: /openbmc/qemu/block/io.c (revision 9c2037d0)
1 /*
2  * Block layer I/O functions
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
33 
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
35 
36 static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
37                                           int64_t offset,
38                                           QEMUIOVector *qiov,
39                                           BdrvRequestFlags flags,
40                                           BlockCompletionFunc *cb,
41                                           void *opaque,
42                                           bool is_write);
43 static void coroutine_fn bdrv_co_do_rw(void *opaque);
44 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
45     int64_t offset, int count, BdrvRequestFlags flags);
46 
47 static void bdrv_parent_drained_begin(BlockDriverState *bs)
48 {
49     BdrvChild *c;
50 
51     QLIST_FOREACH(c, &bs->parents, next_parent) {
52         if (c->role->drained_begin) {
53             c->role->drained_begin(c);
54         }
55     }
56 }
57 
58 static void bdrv_parent_drained_end(BlockDriverState *bs)
59 {
60     BdrvChild *c;
61 
62     QLIST_FOREACH(c, &bs->parents, next_parent) {
63         if (c->role->drained_end) {
64             c->role->drained_end(c);
65         }
66     }
67 }
68 
69 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
70 {
71     dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
72     dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
73     dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
74                                  src->opt_mem_alignment);
75     dst->min_mem_alignment = MAX(dst->min_mem_alignment,
76                                  src->min_mem_alignment);
77     dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
78 }
79 
80 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
81 {
82     BlockDriver *drv = bs->drv;
83     Error *local_err = NULL;
84 
85     memset(&bs->bl, 0, sizeof(bs->bl));
86 
87     if (!drv) {
88         return;
89     }
90 
91     /* Default alignment based on whether driver has byte interface */
92     bs->bl.request_alignment = drv->bdrv_co_preadv ? 1 : 512;
93 
94     /* Take some limits from the children as a default */
95     if (bs->file) {
96         bdrv_refresh_limits(bs->file->bs, &local_err);
97         if (local_err) {
98             error_propagate(errp, local_err);
99             return;
100         }
101         bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
102     } else {
103         bs->bl.min_mem_alignment = 512;
104         bs->bl.opt_mem_alignment = getpagesize();
105 
106         /* Safe default since most protocols use readv()/writev()/etc */
107         bs->bl.max_iov = IOV_MAX;
108     }
109 
110     if (bs->backing) {
111         bdrv_refresh_limits(bs->backing->bs, &local_err);
112         if (local_err) {
113             error_propagate(errp, local_err);
114             return;
115         }
116         bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
117     }
118 
119     /* Then let the driver override it */
120     if (drv->bdrv_refresh_limits) {
121         drv->bdrv_refresh_limits(bs, errp);
122     }
123 }
124 
125 /**
126  * The copy-on-read flag is actually a reference count so multiple users may
127  * use the feature without worrying about clobbering its previous state.
128  * Copy-on-read stays enabled until all users have called to disable it.
129  */
130 void bdrv_enable_copy_on_read(BlockDriverState *bs)
131 {
132     bs->copy_on_read++;
133 }
134 
135 void bdrv_disable_copy_on_read(BlockDriverState *bs)
136 {
137     assert(bs->copy_on_read > 0);
138     bs->copy_on_read--;
139 }
140 
141 /* Check if any requests are in-flight (including throttled requests) */
142 bool bdrv_requests_pending(BlockDriverState *bs)
143 {
144     BdrvChild *child;
145 
146     if (atomic_read(&bs->in_flight)) {
147         return true;
148     }
149 
150     QLIST_FOREACH(child, &bs->children, next) {
151         if (bdrv_requests_pending(child->bs)) {
152             return true;
153         }
154     }
155 
156     return false;
157 }
158 
159 static bool bdrv_drain_recurse(BlockDriverState *bs)
160 {
161     BdrvChild *child;
162     bool waited;
163 
164     waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
165 
166     if (bs->drv && bs->drv->bdrv_drain) {
167         bs->drv->bdrv_drain(bs);
168     }
169 
170     QLIST_FOREACH(child, &bs->children, next) {
171         waited |= bdrv_drain_recurse(child->bs);
172     }
173 
174     return waited;
175 }
176 
177 typedef struct {
178     Coroutine *co;
179     BlockDriverState *bs;
180     bool done;
181 } BdrvCoDrainData;
182 
183 static void bdrv_co_drain_bh_cb(void *opaque)
184 {
185     BdrvCoDrainData *data = opaque;
186     Coroutine *co = data->co;
187     BlockDriverState *bs = data->bs;
188 
189     bdrv_dec_in_flight(bs);
190     bdrv_drained_begin(bs);
191     data->done = true;
192     qemu_coroutine_enter(co);
193 }
194 
195 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
196 {
197     BdrvCoDrainData data;
198 
199     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
200      * other coroutines run if they were queued from
201      * qemu_co_queue_run_restart(). */
202 
203     assert(qemu_in_coroutine());
204     data = (BdrvCoDrainData) {
205         .co = qemu_coroutine_self(),
206         .bs = bs,
207         .done = false,
208     };
209     bdrv_inc_in_flight(bs);
210     aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
211                             bdrv_co_drain_bh_cb, &data);
212 
213     qemu_coroutine_yield();
214     /* If we are resumed from some other event (such as an aio completion or a
215      * timer callback), it is a bug in the caller that should be fixed. */
216     assert(data.done);
217 }
218 
219 void bdrv_drained_begin(BlockDriverState *bs)
220 {
221     if (qemu_in_coroutine()) {
222         bdrv_co_yield_to_drain(bs);
223         return;
224     }
225 
226     if (!bs->quiesce_counter++) {
227         aio_disable_external(bdrv_get_aio_context(bs));
228         bdrv_parent_drained_begin(bs);
229     }
230 
231     bdrv_drain_recurse(bs);
232 }
233 
234 void bdrv_drained_end(BlockDriverState *bs)
235 {
236     assert(bs->quiesce_counter > 0);
237     if (--bs->quiesce_counter > 0) {
238         return;
239     }
240 
241     bdrv_parent_drained_end(bs);
242     aio_enable_external(bdrv_get_aio_context(bs));
243 }
244 
245 /*
246  * Wait for pending requests to complete on a single BlockDriverState subtree,
247  * and suspend block driver's internal I/O until next request arrives.
248  *
249  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
250  * AioContext.
251  *
252  * Only this BlockDriverState's AioContext is run, so in-flight requests must
253  * not depend on events in other AioContexts.  In that case, use
254  * bdrv_drain_all() instead.
255  */
256 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
257 {
258     assert(qemu_in_coroutine());
259     bdrv_drained_begin(bs);
260     bdrv_drained_end(bs);
261 }
262 
263 void bdrv_drain(BlockDriverState *bs)
264 {
265     bdrv_drained_begin(bs);
266     bdrv_drained_end(bs);
267 }
268 
269 /*
270  * Wait for pending requests to complete across all BlockDriverStates
271  *
272  * This function does not flush data to disk, use bdrv_flush_all() for that
273  * after calling this function.
274  *
275  * This pauses all block jobs and disables external clients. It must
276  * be paired with bdrv_drain_all_end().
277  *
278  * NOTE: no new block jobs or BlockDriverStates can be created between
279  * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
280  */
281 void bdrv_drain_all_begin(void)
282 {
283     /* Always run first iteration so any pending completion BHs run */
284     bool waited = true;
285     BlockDriverState *bs;
286     BdrvNextIterator it;
287     BlockJob *job = NULL;
288     GSList *aio_ctxs = NULL, *ctx;
289 
290     while ((job = block_job_next(job))) {
291         AioContext *aio_context = blk_get_aio_context(job->blk);
292 
293         aio_context_acquire(aio_context);
294         block_job_pause(job);
295         aio_context_release(aio_context);
296     }
297 
298     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
299         AioContext *aio_context = bdrv_get_aio_context(bs);
300 
301         aio_context_acquire(aio_context);
302         bdrv_parent_drained_begin(bs);
303         aio_disable_external(aio_context);
304         aio_context_release(aio_context);
305 
306         if (!g_slist_find(aio_ctxs, aio_context)) {
307             aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
308         }
309     }
310 
311     /* Note that completion of an asynchronous I/O operation can trigger any
312      * number of other I/O operations on other devices---for example a
313      * coroutine can submit an I/O request to another device in response to
314      * request completion.  Therefore we must keep looping until there was no
315      * more activity rather than simply draining each device independently.
316      */
317     while (waited) {
318         waited = false;
319 
320         for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
321             AioContext *aio_context = ctx->data;
322 
323             aio_context_acquire(aio_context);
324             for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
325                 if (aio_context == bdrv_get_aio_context(bs)) {
326                     waited |= bdrv_drain_recurse(bs);
327                 }
328             }
329             aio_context_release(aio_context);
330         }
331     }
332 
333     g_slist_free(aio_ctxs);
334 }
335 
336 void bdrv_drain_all_end(void)
337 {
338     BlockDriverState *bs;
339     BdrvNextIterator it;
340     BlockJob *job = NULL;
341 
342     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
343         AioContext *aio_context = bdrv_get_aio_context(bs);
344 
345         aio_context_acquire(aio_context);
346         aio_enable_external(aio_context);
347         bdrv_parent_drained_end(bs);
348         aio_context_release(aio_context);
349     }
350 
351     while ((job = block_job_next(job))) {
352         AioContext *aio_context = blk_get_aio_context(job->blk);
353 
354         aio_context_acquire(aio_context);
355         block_job_resume(job);
356         aio_context_release(aio_context);
357     }
358 }
359 
360 void bdrv_drain_all(void)
361 {
362     bdrv_drain_all_begin();
363     bdrv_drain_all_end();
364 }
365 
366 /**
367  * Remove an active request from the tracked requests list
368  *
369  * This function should be called when a tracked request is completing.
370  */
371 static void tracked_request_end(BdrvTrackedRequest *req)
372 {
373     if (req->serialising) {
374         req->bs->serialising_in_flight--;
375     }
376 
377     QLIST_REMOVE(req, list);
378     qemu_co_queue_restart_all(&req->wait_queue);
379 }
380 
381 /**
382  * Add an active request to the tracked requests list
383  */
384 static void tracked_request_begin(BdrvTrackedRequest *req,
385                                   BlockDriverState *bs,
386                                   int64_t offset,
387                                   unsigned int bytes,
388                                   enum BdrvTrackedRequestType type)
389 {
390     *req = (BdrvTrackedRequest){
391         .bs = bs,
392         .offset         = offset,
393         .bytes          = bytes,
394         .type           = type,
395         .co             = qemu_coroutine_self(),
396         .serialising    = false,
397         .overlap_offset = offset,
398         .overlap_bytes  = bytes,
399     };
400 
401     qemu_co_queue_init(&req->wait_queue);
402 
403     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
404 }
405 
406 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
407 {
408     int64_t overlap_offset = req->offset & ~(align - 1);
409     unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
410                                - overlap_offset;
411 
412     if (!req->serialising) {
413         req->bs->serialising_in_flight++;
414         req->serialising = true;
415     }
416 
417     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
418     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
419 }
420 
421 /**
422  * Round a region to cluster boundaries (sector-based)
423  */
424 void bdrv_round_sectors_to_clusters(BlockDriverState *bs,
425                                     int64_t sector_num, int nb_sectors,
426                                     int64_t *cluster_sector_num,
427                                     int *cluster_nb_sectors)
428 {
429     BlockDriverInfo bdi;
430 
431     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
432         *cluster_sector_num = sector_num;
433         *cluster_nb_sectors = nb_sectors;
434     } else {
435         int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
436         *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
437         *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
438                                             nb_sectors, c);
439     }
440 }
441 
442 /**
443  * Round a region to cluster boundaries
444  */
445 void bdrv_round_to_clusters(BlockDriverState *bs,
446                             int64_t offset, unsigned int bytes,
447                             int64_t *cluster_offset,
448                             unsigned int *cluster_bytes)
449 {
450     BlockDriverInfo bdi;
451 
452     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
453         *cluster_offset = offset;
454         *cluster_bytes = bytes;
455     } else {
456         int64_t c = bdi.cluster_size;
457         *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
458         *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
459     }
460 }
461 
462 static int bdrv_get_cluster_size(BlockDriverState *bs)
463 {
464     BlockDriverInfo bdi;
465     int ret;
466 
467     ret = bdrv_get_info(bs, &bdi);
468     if (ret < 0 || bdi.cluster_size == 0) {
469         return bs->bl.request_alignment;
470     } else {
471         return bdi.cluster_size;
472     }
473 }
474 
475 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
476                                      int64_t offset, unsigned int bytes)
477 {
478     /*        aaaa   bbbb */
479     if (offset >= req->overlap_offset + req->overlap_bytes) {
480         return false;
481     }
482     /* bbbb   aaaa        */
483     if (req->overlap_offset >= offset + bytes) {
484         return false;
485     }
486     return true;
487 }
488 
489 void bdrv_inc_in_flight(BlockDriverState *bs)
490 {
491     atomic_inc(&bs->in_flight);
492 }
493 
494 static void dummy_bh_cb(void *opaque)
495 {
496 }
497 
498 void bdrv_wakeup(BlockDriverState *bs)
499 {
500     if (bs->wakeup) {
501         aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
502     }
503 }
504 
505 void bdrv_dec_in_flight(BlockDriverState *bs)
506 {
507     atomic_dec(&bs->in_flight);
508     bdrv_wakeup(bs);
509 }
510 
511 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
512 {
513     BlockDriverState *bs = self->bs;
514     BdrvTrackedRequest *req;
515     bool retry;
516     bool waited = false;
517 
518     if (!bs->serialising_in_flight) {
519         return false;
520     }
521 
522     do {
523         retry = false;
524         QLIST_FOREACH(req, &bs->tracked_requests, list) {
525             if (req == self || (!req->serialising && !self->serialising)) {
526                 continue;
527             }
528             if (tracked_request_overlaps(req, self->overlap_offset,
529                                          self->overlap_bytes))
530             {
531                 /* Hitting this means there was a reentrant request, for
532                  * example, a block driver issuing nested requests.  This must
533                  * never happen since it means deadlock.
534                  */
535                 assert(qemu_coroutine_self() != req->co);
536 
537                 /* If the request is already (indirectly) waiting for us, or
538                  * will wait for us as soon as it wakes up, then just go on
539                  * (instead of producing a deadlock in the former case). */
540                 if (!req->waiting_for) {
541                     self->waiting_for = req;
542                     qemu_co_queue_wait(&req->wait_queue);
543                     self->waiting_for = NULL;
544                     retry = true;
545                     waited = true;
546                     break;
547                 }
548             }
549         }
550     } while (retry);
551 
552     return waited;
553 }
554 
555 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
556                                    size_t size)
557 {
558     if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
559         return -EIO;
560     }
561 
562     if (!bdrv_is_inserted(bs)) {
563         return -ENOMEDIUM;
564     }
565 
566     if (offset < 0) {
567         return -EIO;
568     }
569 
570     return 0;
571 }
572 
573 typedef struct RwCo {
574     BdrvChild *child;
575     int64_t offset;
576     QEMUIOVector *qiov;
577     bool is_write;
578     int ret;
579     BdrvRequestFlags flags;
580 } RwCo;
581 
582 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
583 {
584     RwCo *rwco = opaque;
585 
586     if (!rwco->is_write) {
587         rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
588                                    rwco->qiov->size, rwco->qiov,
589                                    rwco->flags);
590     } else {
591         rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
592                                     rwco->qiov->size, rwco->qiov,
593                                     rwco->flags);
594     }
595 }
596 
597 /*
598  * Process a vectored synchronous request using coroutines
599  */
600 static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
601                         QEMUIOVector *qiov, bool is_write,
602                         BdrvRequestFlags flags)
603 {
604     Coroutine *co;
605     RwCo rwco = {
606         .child = child,
607         .offset = offset,
608         .qiov = qiov,
609         .is_write = is_write,
610         .ret = NOT_DONE,
611         .flags = flags,
612     };
613 
614     if (qemu_in_coroutine()) {
615         /* Fast-path if already in coroutine context */
616         bdrv_rw_co_entry(&rwco);
617     } else {
618         co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
619         qemu_coroutine_enter(co);
620         BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
621     }
622     return rwco.ret;
623 }
624 
625 /*
626  * Process a synchronous request using coroutines
627  */
628 static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
629                       int nb_sectors, bool is_write, BdrvRequestFlags flags)
630 {
631     QEMUIOVector qiov;
632     struct iovec iov = {
633         .iov_base = (void *)buf,
634         .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
635     };
636 
637     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
638         return -EINVAL;
639     }
640 
641     qemu_iovec_init_external(&qiov, &iov, 1);
642     return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
643                         &qiov, is_write, flags);
644 }
645 
646 /* return < 0 if error. See bdrv_write() for the return codes */
647 int bdrv_read(BdrvChild *child, int64_t sector_num,
648               uint8_t *buf, int nb_sectors)
649 {
650     return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
651 }
652 
653 /* Return < 0 if error. Important errors are:
654   -EIO         generic I/O error (may happen for all errors)
655   -ENOMEDIUM   No media inserted.
656   -EINVAL      Invalid sector number or nb_sectors
657   -EACCES      Trying to write a read-only device
658 */
659 int bdrv_write(BdrvChild *child, int64_t sector_num,
660                const uint8_t *buf, int nb_sectors)
661 {
662     return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
663 }
664 
665 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
666                        int count, BdrvRequestFlags flags)
667 {
668     QEMUIOVector qiov;
669     struct iovec iov = {
670         .iov_base = NULL,
671         .iov_len = count,
672     };
673 
674     qemu_iovec_init_external(&qiov, &iov, 1);
675     return bdrv_prwv_co(child, offset, &qiov, true,
676                         BDRV_REQ_ZERO_WRITE | flags);
677 }
678 
679 /*
680  * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
681  * The operation is sped up by checking the block status and only writing
682  * zeroes to the device if they currently do not return zeroes. Optional
683  * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
684  * BDRV_REQ_FUA).
685  *
686  * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
687  */
688 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
689 {
690     int64_t target_sectors, ret, nb_sectors, sector_num = 0;
691     BlockDriverState *bs = child->bs;
692     BlockDriverState *file;
693     int n;
694 
695     target_sectors = bdrv_nb_sectors(bs);
696     if (target_sectors < 0) {
697         return target_sectors;
698     }
699 
700     for (;;) {
701         nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
702         if (nb_sectors <= 0) {
703             return 0;
704         }
705         ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
706         if (ret < 0) {
707             error_report("error getting block status at sector %" PRId64 ": %s",
708                          sector_num, strerror(-ret));
709             return ret;
710         }
711         if (ret & BDRV_BLOCK_ZERO) {
712             sector_num += n;
713             continue;
714         }
715         ret = bdrv_pwrite_zeroes(child, sector_num << BDRV_SECTOR_BITS,
716                                  n << BDRV_SECTOR_BITS, flags);
717         if (ret < 0) {
718             error_report("error writing zeroes at sector %" PRId64 ": %s",
719                          sector_num, strerror(-ret));
720             return ret;
721         }
722         sector_num += n;
723     }
724 }
725 
726 int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
727 {
728     int ret;
729 
730     ret = bdrv_prwv_co(child, offset, qiov, false, 0);
731     if (ret < 0) {
732         return ret;
733     }
734 
735     return qiov->size;
736 }
737 
738 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
739 {
740     QEMUIOVector qiov;
741     struct iovec iov = {
742         .iov_base = (void *)buf,
743         .iov_len = bytes,
744     };
745 
746     if (bytes < 0) {
747         return -EINVAL;
748     }
749 
750     qemu_iovec_init_external(&qiov, &iov, 1);
751     return bdrv_preadv(child, offset, &qiov);
752 }
753 
754 int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
755 {
756     int ret;
757 
758     ret = bdrv_prwv_co(child, offset, qiov, true, 0);
759     if (ret < 0) {
760         return ret;
761     }
762 
763     return qiov->size;
764 }
765 
766 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
767 {
768     QEMUIOVector qiov;
769     struct iovec iov = {
770         .iov_base   = (void *) buf,
771         .iov_len    = bytes,
772     };
773 
774     if (bytes < 0) {
775         return -EINVAL;
776     }
777 
778     qemu_iovec_init_external(&qiov, &iov, 1);
779     return bdrv_pwritev(child, offset, &qiov);
780 }
781 
782 /*
783  * Writes to the file and ensures that no writes are reordered across this
784  * request (acts as a barrier)
785  *
786  * Returns 0 on success, -errno in error cases.
787  */
788 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
789                      const void *buf, int count)
790 {
791     int ret;
792 
793     ret = bdrv_pwrite(child, offset, buf, count);
794     if (ret < 0) {
795         return ret;
796     }
797 
798     ret = bdrv_flush(child->bs);
799     if (ret < 0) {
800         return ret;
801     }
802 
803     return 0;
804 }
805 
806 typedef struct CoroutineIOCompletion {
807     Coroutine *coroutine;
808     int ret;
809 } CoroutineIOCompletion;
810 
811 static void bdrv_co_io_em_complete(void *opaque, int ret)
812 {
813     CoroutineIOCompletion *co = opaque;
814 
815     co->ret = ret;
816     qemu_coroutine_enter(co->coroutine);
817 }
818 
819 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
820                                            uint64_t offset, uint64_t bytes,
821                                            QEMUIOVector *qiov, int flags)
822 {
823     BlockDriver *drv = bs->drv;
824     int64_t sector_num;
825     unsigned int nb_sectors;
826 
827     assert(!(flags & ~BDRV_REQ_MASK));
828 
829     if (drv->bdrv_co_preadv) {
830         return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
831     }
832 
833     sector_num = offset >> BDRV_SECTOR_BITS;
834     nb_sectors = bytes >> BDRV_SECTOR_BITS;
835 
836     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
837     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
838     assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
839 
840     if (drv->bdrv_co_readv) {
841         return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
842     } else {
843         BlockAIOCB *acb;
844         CoroutineIOCompletion co = {
845             .coroutine = qemu_coroutine_self(),
846         };
847 
848         acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
849                                       bdrv_co_io_em_complete, &co);
850         if (acb == NULL) {
851             return -EIO;
852         } else {
853             qemu_coroutine_yield();
854             return co.ret;
855         }
856     }
857 }
858 
859 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
860                                             uint64_t offset, uint64_t bytes,
861                                             QEMUIOVector *qiov, int flags)
862 {
863     BlockDriver *drv = bs->drv;
864     int64_t sector_num;
865     unsigned int nb_sectors;
866     int ret;
867 
868     assert(!(flags & ~BDRV_REQ_MASK));
869 
870     if (drv->bdrv_co_pwritev) {
871         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
872                                    flags & bs->supported_write_flags);
873         flags &= ~bs->supported_write_flags;
874         goto emulate_flags;
875     }
876 
877     sector_num = offset >> BDRV_SECTOR_BITS;
878     nb_sectors = bytes >> BDRV_SECTOR_BITS;
879 
880     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
881     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
882     assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
883 
884     if (drv->bdrv_co_writev_flags) {
885         ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
886                                         flags & bs->supported_write_flags);
887         flags &= ~bs->supported_write_flags;
888     } else if (drv->bdrv_co_writev) {
889         assert(!bs->supported_write_flags);
890         ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
891     } else {
892         BlockAIOCB *acb;
893         CoroutineIOCompletion co = {
894             .coroutine = qemu_coroutine_self(),
895         };
896 
897         acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
898                                        bdrv_co_io_em_complete, &co);
899         if (acb == NULL) {
900             ret = -EIO;
901         } else {
902             qemu_coroutine_yield();
903             ret = co.ret;
904         }
905     }
906 
907 emulate_flags:
908     if (ret == 0 && (flags & BDRV_REQ_FUA)) {
909         ret = bdrv_co_flush(bs);
910     }
911 
912     return ret;
913 }
914 
915 static int coroutine_fn
916 bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
917                                uint64_t bytes, QEMUIOVector *qiov)
918 {
919     BlockDriver *drv = bs->drv;
920 
921     if (!drv->bdrv_co_pwritev_compressed) {
922         return -ENOTSUP;
923     }
924 
925     return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
926 }
927 
928 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
929         int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
930 {
931     /* Perform I/O through a temporary buffer so that users who scribble over
932      * their read buffer while the operation is in progress do not end up
933      * modifying the image file.  This is critical for zero-copy guest I/O
934      * where anything might happen inside guest memory.
935      */
936     void *bounce_buffer;
937 
938     BlockDriver *drv = bs->drv;
939     struct iovec iov;
940     QEMUIOVector bounce_qiov;
941     int64_t cluster_offset;
942     unsigned int cluster_bytes;
943     size_t skip_bytes;
944     int ret;
945 
946     /* Cover entire cluster so no additional backing file I/O is required when
947      * allocating cluster in the image file.
948      */
949     bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
950 
951     trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
952                                    cluster_offset, cluster_bytes);
953 
954     iov.iov_len = cluster_bytes;
955     iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
956     if (bounce_buffer == NULL) {
957         ret = -ENOMEM;
958         goto err;
959     }
960 
961     qemu_iovec_init_external(&bounce_qiov, &iov, 1);
962 
963     ret = bdrv_driver_preadv(bs, cluster_offset, cluster_bytes,
964                              &bounce_qiov, 0);
965     if (ret < 0) {
966         goto err;
967     }
968 
969     if (drv->bdrv_co_pwrite_zeroes &&
970         buffer_is_zero(bounce_buffer, iov.iov_len)) {
971         /* FIXME: Should we (perhaps conditionally) be setting
972          * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
973          * that still correctly reads as zero? */
974         ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, cluster_bytes, 0);
975     } else {
976         /* This does not change the data on the disk, it is not necessary
977          * to flush even in cache=writethrough mode.
978          */
979         ret = bdrv_driver_pwritev(bs, cluster_offset, cluster_bytes,
980                                   &bounce_qiov, 0);
981     }
982 
983     if (ret < 0) {
984         /* It might be okay to ignore write errors for guest requests.  If this
985          * is a deliberate copy-on-read then we don't want to ignore the error.
986          * Simply report it in all cases.
987          */
988         goto err;
989     }
990 
991     skip_bytes = offset - cluster_offset;
992     qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes, bytes);
993 
994 err:
995     qemu_vfree(bounce_buffer);
996     return ret;
997 }
998 
999 /*
1000  * Forwards an already correctly aligned request to the BlockDriver. This
1001  * handles copy on read, zeroing after EOF, and fragmentation of large
1002  * reads; any other features must be implemented by the caller.
1003  */
1004 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
1005     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1006     int64_t align, QEMUIOVector *qiov, int flags)
1007 {
1008     int64_t total_bytes, max_bytes;
1009     int ret = 0;
1010     uint64_t bytes_remaining = bytes;
1011     int max_transfer;
1012 
1013     assert(is_power_of_2(align));
1014     assert((offset & (align - 1)) == 0);
1015     assert((bytes & (align - 1)) == 0);
1016     assert(!qiov || bytes == qiov->size);
1017     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1018     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1019                                    align);
1020 
1021     /* TODO: We would need a per-BDS .supported_read_flags and
1022      * potential fallback support, if we ever implement any read flags
1023      * to pass through to drivers.  For now, there aren't any
1024      * passthrough flags.  */
1025     assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
1026 
1027     /* Handle Copy on Read and associated serialisation */
1028     if (flags & BDRV_REQ_COPY_ON_READ) {
1029         /* If we touch the same cluster it counts as an overlap.  This
1030          * guarantees that allocating writes will be serialized and not race
1031          * with each other for the same cluster.  For example, in copy-on-read
1032          * it ensures that the CoR read and write operations are atomic and
1033          * guest writes cannot interleave between them. */
1034         mark_request_serialising(req, bdrv_get_cluster_size(bs));
1035     }
1036 
1037     if (!(flags & BDRV_REQ_NO_SERIALISING)) {
1038         wait_serialising_requests(req);
1039     }
1040 
1041     if (flags & BDRV_REQ_COPY_ON_READ) {
1042         int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1043         int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1044         unsigned int nb_sectors = end_sector - start_sector;
1045         int pnum;
1046 
1047         ret = bdrv_is_allocated(bs, start_sector, nb_sectors, &pnum);
1048         if (ret < 0) {
1049             goto out;
1050         }
1051 
1052         if (!ret || pnum != nb_sectors) {
1053             ret = bdrv_co_do_copy_on_readv(bs, offset, bytes, qiov);
1054             goto out;
1055         }
1056     }
1057 
1058     /* Forward the request to the BlockDriver, possibly fragmenting it */
1059     total_bytes = bdrv_getlength(bs);
1060     if (total_bytes < 0) {
1061         ret = total_bytes;
1062         goto out;
1063     }
1064 
1065     max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1066     if (bytes <= max_bytes && bytes <= max_transfer) {
1067         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
1068         goto out;
1069     }
1070 
1071     while (bytes_remaining) {
1072         int num;
1073 
1074         if (max_bytes) {
1075             QEMUIOVector local_qiov;
1076 
1077             num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1078             assert(num);
1079             qemu_iovec_init(&local_qiov, qiov->niov);
1080             qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1081 
1082             ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1083                                      num, &local_qiov, 0);
1084             max_bytes -= num;
1085             qemu_iovec_destroy(&local_qiov);
1086         } else {
1087             num = bytes_remaining;
1088             ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1089                                     bytes_remaining);
1090         }
1091         if (ret < 0) {
1092             goto out;
1093         }
1094         bytes_remaining -= num;
1095     }
1096 
1097 out:
1098     return ret < 0 ? ret : 0;
1099 }
1100 
1101 /*
1102  * Handle a read request in coroutine context
1103  */
1104 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1105     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1106     BdrvRequestFlags flags)
1107 {
1108     BlockDriverState *bs = child->bs;
1109     BlockDriver *drv = bs->drv;
1110     BdrvTrackedRequest req;
1111 
1112     uint64_t align = bs->bl.request_alignment;
1113     uint8_t *head_buf = NULL;
1114     uint8_t *tail_buf = NULL;
1115     QEMUIOVector local_qiov;
1116     bool use_local_qiov = false;
1117     int ret;
1118 
1119     if (!drv) {
1120         return -ENOMEDIUM;
1121     }
1122 
1123     ret = bdrv_check_byte_request(bs, offset, bytes);
1124     if (ret < 0) {
1125         return ret;
1126     }
1127 
1128     bdrv_inc_in_flight(bs);
1129 
1130     /* Don't do copy-on-read if we read data before write operation */
1131     if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
1132         flags |= BDRV_REQ_COPY_ON_READ;
1133     }
1134 
1135     /* Align read if necessary by padding qiov */
1136     if (offset & (align - 1)) {
1137         head_buf = qemu_blockalign(bs, align);
1138         qemu_iovec_init(&local_qiov, qiov->niov + 2);
1139         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1140         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1141         use_local_qiov = true;
1142 
1143         bytes += offset & (align - 1);
1144         offset = offset & ~(align - 1);
1145     }
1146 
1147     if ((offset + bytes) & (align - 1)) {
1148         if (!use_local_qiov) {
1149             qemu_iovec_init(&local_qiov, qiov->niov + 1);
1150             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1151             use_local_qiov = true;
1152         }
1153         tail_buf = qemu_blockalign(bs, align);
1154         qemu_iovec_add(&local_qiov, tail_buf,
1155                        align - ((offset + bytes) & (align - 1)));
1156 
1157         bytes = ROUND_UP(bytes, align);
1158     }
1159 
1160     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1161     ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
1162                               use_local_qiov ? &local_qiov : qiov,
1163                               flags);
1164     tracked_request_end(&req);
1165     bdrv_dec_in_flight(bs);
1166 
1167     if (use_local_qiov) {
1168         qemu_iovec_destroy(&local_qiov);
1169         qemu_vfree(head_buf);
1170         qemu_vfree(tail_buf);
1171     }
1172 
1173     return ret;
1174 }
1175 
1176 static int coroutine_fn bdrv_co_do_readv(BdrvChild *child,
1177     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1178     BdrvRequestFlags flags)
1179 {
1180     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1181         return -EINVAL;
1182     }
1183 
1184     return bdrv_co_preadv(child, sector_num << BDRV_SECTOR_BITS,
1185                           nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1186 }
1187 
1188 int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num,
1189                                int nb_sectors, QEMUIOVector *qiov)
1190 {
1191     trace_bdrv_co_readv(child->bs, sector_num, nb_sectors);
1192 
1193     return bdrv_co_do_readv(child, sector_num, nb_sectors, qiov, 0);
1194 }
1195 
1196 /* Maximum buffer for write zeroes fallback, in bytes */
1197 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
1198 
1199 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1200     int64_t offset, int count, BdrvRequestFlags flags)
1201 {
1202     BlockDriver *drv = bs->drv;
1203     QEMUIOVector qiov;
1204     struct iovec iov = {0};
1205     int ret = 0;
1206     bool need_flush = false;
1207     int head = 0;
1208     int tail = 0;
1209 
1210     int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1211     int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1212                         bs->bl.request_alignment);
1213     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1214                                     MAX_WRITE_ZEROES_BOUNCE_BUFFER);
1215 
1216     assert(alignment % bs->bl.request_alignment == 0);
1217     head = offset % alignment;
1218     tail = (offset + count) % alignment;
1219     max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1220     assert(max_write_zeroes >= bs->bl.request_alignment);
1221 
1222     while (count > 0 && !ret) {
1223         int num = count;
1224 
1225         /* Align request.  Block drivers can expect the "bulk" of the request
1226          * to be aligned, and that unaligned requests do not cross cluster
1227          * boundaries.
1228          */
1229         if (head) {
1230             /* Make a small request up to the first aligned sector. For
1231              * convenience, limit this request to max_transfer even if
1232              * we don't need to fall back to writes.  */
1233             num = MIN(MIN(count, max_transfer), alignment - head);
1234             head = (head + num) % alignment;
1235             assert(num < max_write_zeroes);
1236         } else if (tail && num > alignment) {
1237             /* Shorten the request to the last aligned sector.  */
1238             num -= tail;
1239         }
1240 
1241         /* limit request size */
1242         if (num > max_write_zeroes) {
1243             num = max_write_zeroes;
1244         }
1245 
1246         ret = -ENOTSUP;
1247         /* First try the efficient write zeroes operation */
1248         if (drv->bdrv_co_pwrite_zeroes) {
1249             ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1250                                              flags & bs->supported_zero_flags);
1251             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1252                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1253                 need_flush = true;
1254             }
1255         } else {
1256             assert(!bs->supported_zero_flags);
1257         }
1258 
1259         if (ret == -ENOTSUP) {
1260             /* Fall back to bounce buffer if write zeroes is unsupported */
1261             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1262 
1263             if ((flags & BDRV_REQ_FUA) &&
1264                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1265                 /* No need for bdrv_driver_pwrite() to do a fallback
1266                  * flush on each chunk; use just one at the end */
1267                 write_flags &= ~BDRV_REQ_FUA;
1268                 need_flush = true;
1269             }
1270             num = MIN(num, max_transfer);
1271             iov.iov_len = num;
1272             if (iov.iov_base == NULL) {
1273                 iov.iov_base = qemu_try_blockalign(bs, num);
1274                 if (iov.iov_base == NULL) {
1275                     ret = -ENOMEM;
1276                     goto fail;
1277                 }
1278                 memset(iov.iov_base, 0, num);
1279             }
1280             qemu_iovec_init_external(&qiov, &iov, 1);
1281 
1282             ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
1283 
1284             /* Keep bounce buffer around if it is big enough for all
1285              * all future requests.
1286              */
1287             if (num < max_transfer) {
1288                 qemu_vfree(iov.iov_base);
1289                 iov.iov_base = NULL;
1290             }
1291         }
1292 
1293         offset += num;
1294         count -= num;
1295     }
1296 
1297 fail:
1298     if (ret == 0 && need_flush) {
1299         ret = bdrv_co_flush(bs);
1300     }
1301     qemu_vfree(iov.iov_base);
1302     return ret;
1303 }
1304 
1305 /*
1306  * Forwards an already correctly aligned write request to the BlockDriver,
1307  * after possibly fragmenting it.
1308  */
1309 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
1310     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1311     int64_t align, QEMUIOVector *qiov, int flags)
1312 {
1313     BlockDriver *drv = bs->drv;
1314     bool waited;
1315     int ret;
1316 
1317     int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1318     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1319     uint64_t bytes_remaining = bytes;
1320     int max_transfer;
1321 
1322     assert(is_power_of_2(align));
1323     assert((offset & (align - 1)) == 0);
1324     assert((bytes & (align - 1)) == 0);
1325     assert(!qiov || bytes == qiov->size);
1326     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1327     assert(!(flags & ~BDRV_REQ_MASK));
1328     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1329                                    align);
1330 
1331     waited = wait_serialising_requests(req);
1332     assert(!waited || !req->serialising);
1333     assert(req->overlap_offset <= offset);
1334     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1335 
1336     ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1337 
1338     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1339         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
1340         qemu_iovec_is_zero(qiov)) {
1341         flags |= BDRV_REQ_ZERO_WRITE;
1342         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1343             flags |= BDRV_REQ_MAY_UNMAP;
1344         }
1345     }
1346 
1347     if (ret < 0) {
1348         /* Do nothing, write notifier decided to fail this request */
1349     } else if (flags & BDRV_REQ_ZERO_WRITE) {
1350         bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1351         ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
1352     } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1353         ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
1354     } else if (bytes <= max_transfer) {
1355         bdrv_debug_event(bs, BLKDBG_PWRITEV);
1356         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
1357     } else {
1358         bdrv_debug_event(bs, BLKDBG_PWRITEV);
1359         while (bytes_remaining) {
1360             int num = MIN(bytes_remaining, max_transfer);
1361             QEMUIOVector local_qiov;
1362             int local_flags = flags;
1363 
1364             assert(num);
1365             if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1366                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1367                 /* If FUA is going to be emulated by flush, we only
1368                  * need to flush on the last iteration */
1369                 local_flags &= ~BDRV_REQ_FUA;
1370             }
1371             qemu_iovec_init(&local_qiov, qiov->niov);
1372             qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1373 
1374             ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1375                                       num, &local_qiov, local_flags);
1376             qemu_iovec_destroy(&local_qiov);
1377             if (ret < 0) {
1378                 break;
1379             }
1380             bytes_remaining -= num;
1381         }
1382     }
1383     bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1384 
1385     ++bs->write_gen;
1386     bdrv_set_dirty(bs, start_sector, end_sector - start_sector);
1387 
1388     if (bs->wr_highest_offset < offset + bytes) {
1389         bs->wr_highest_offset = offset + bytes;
1390     }
1391 
1392     if (ret >= 0) {
1393         bs->total_sectors = MAX(bs->total_sectors, end_sector);
1394         ret = 0;
1395     }
1396 
1397     return ret;
1398 }
1399 
1400 static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
1401                                                 int64_t offset,
1402                                                 unsigned int bytes,
1403                                                 BdrvRequestFlags flags,
1404                                                 BdrvTrackedRequest *req)
1405 {
1406     uint8_t *buf = NULL;
1407     QEMUIOVector local_qiov;
1408     struct iovec iov;
1409     uint64_t align = bs->bl.request_alignment;
1410     unsigned int head_padding_bytes, tail_padding_bytes;
1411     int ret = 0;
1412 
1413     head_padding_bytes = offset & (align - 1);
1414     tail_padding_bytes = align - ((offset + bytes) & (align - 1));
1415 
1416 
1417     assert(flags & BDRV_REQ_ZERO_WRITE);
1418     if (head_padding_bytes || tail_padding_bytes) {
1419         buf = qemu_blockalign(bs, align);
1420         iov = (struct iovec) {
1421             .iov_base   = buf,
1422             .iov_len    = align,
1423         };
1424         qemu_iovec_init_external(&local_qiov, &iov, 1);
1425     }
1426     if (head_padding_bytes) {
1427         uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1428 
1429         /* RMW the unaligned part before head. */
1430         mark_request_serialising(req, align);
1431         wait_serialising_requests(req);
1432         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1433         ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align,
1434                                   align, &local_qiov, 0);
1435         if (ret < 0) {
1436             goto fail;
1437         }
1438         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1439 
1440         memset(buf + head_padding_bytes, 0, zero_bytes);
1441         ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
1442                                    align, &local_qiov,
1443                                    flags & ~BDRV_REQ_ZERO_WRITE);
1444         if (ret < 0) {
1445             goto fail;
1446         }
1447         offset += zero_bytes;
1448         bytes -= zero_bytes;
1449     }
1450 
1451     assert(!bytes || (offset & (align - 1)) == 0);
1452     if (bytes >= align) {
1453         /* Write the aligned part in the middle. */
1454         uint64_t aligned_bytes = bytes & ~(align - 1);
1455         ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes, align,
1456                                    NULL, flags);
1457         if (ret < 0) {
1458             goto fail;
1459         }
1460         bytes -= aligned_bytes;
1461         offset += aligned_bytes;
1462     }
1463 
1464     assert(!bytes || (offset & (align - 1)) == 0);
1465     if (bytes) {
1466         assert(align == tail_padding_bytes + bytes);
1467         /* RMW the unaligned part after tail. */
1468         mark_request_serialising(req, align);
1469         wait_serialising_requests(req);
1470         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1471         ret = bdrv_aligned_preadv(bs, req, offset, align,
1472                                   align, &local_qiov, 0);
1473         if (ret < 0) {
1474             goto fail;
1475         }
1476         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1477 
1478         memset(buf, 0, bytes);
1479         ret = bdrv_aligned_pwritev(bs, req, offset, align, align,
1480                                    &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1481     }
1482 fail:
1483     qemu_vfree(buf);
1484     return ret;
1485 
1486 }
1487 
1488 /*
1489  * Handle a write request in coroutine context
1490  */
1491 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
1492     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1493     BdrvRequestFlags flags)
1494 {
1495     BlockDriverState *bs = child->bs;
1496     BdrvTrackedRequest req;
1497     uint64_t align = bs->bl.request_alignment;
1498     uint8_t *head_buf = NULL;
1499     uint8_t *tail_buf = NULL;
1500     QEMUIOVector local_qiov;
1501     bool use_local_qiov = false;
1502     int ret;
1503 
1504     if (!bs->drv) {
1505         return -ENOMEDIUM;
1506     }
1507     if (bs->read_only) {
1508         return -EPERM;
1509     }
1510     assert(!(bs->open_flags & BDRV_O_INACTIVE));
1511 
1512     ret = bdrv_check_byte_request(bs, offset, bytes);
1513     if (ret < 0) {
1514         return ret;
1515     }
1516 
1517     bdrv_inc_in_flight(bs);
1518     /*
1519      * Align write if necessary by performing a read-modify-write cycle.
1520      * Pad qiov with the read parts and be sure to have a tracked request not
1521      * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1522      */
1523     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1524 
1525     if (!qiov) {
1526         ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req);
1527         goto out;
1528     }
1529 
1530     if (offset & (align - 1)) {
1531         QEMUIOVector head_qiov;
1532         struct iovec head_iov;
1533 
1534         mark_request_serialising(&req, align);
1535         wait_serialising_requests(&req);
1536 
1537         head_buf = qemu_blockalign(bs, align);
1538         head_iov = (struct iovec) {
1539             .iov_base   = head_buf,
1540             .iov_len    = align,
1541         };
1542         qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1543 
1544         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1545         ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
1546                                   align, &head_qiov, 0);
1547         if (ret < 0) {
1548             goto fail;
1549         }
1550         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1551 
1552         qemu_iovec_init(&local_qiov, qiov->niov + 2);
1553         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1554         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1555         use_local_qiov = true;
1556 
1557         bytes += offset & (align - 1);
1558         offset = offset & ~(align - 1);
1559 
1560         /* We have read the tail already if the request is smaller
1561          * than one aligned block.
1562          */
1563         if (bytes < align) {
1564             qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1565             bytes = align;
1566         }
1567     }
1568 
1569     if ((offset + bytes) & (align - 1)) {
1570         QEMUIOVector tail_qiov;
1571         struct iovec tail_iov;
1572         size_t tail_bytes;
1573         bool waited;
1574 
1575         mark_request_serialising(&req, align);
1576         waited = wait_serialising_requests(&req);
1577         assert(!waited || !use_local_qiov);
1578 
1579         tail_buf = qemu_blockalign(bs, align);
1580         tail_iov = (struct iovec) {
1581             .iov_base   = tail_buf,
1582             .iov_len    = align,
1583         };
1584         qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1585 
1586         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1587         ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
1588                                   align, &tail_qiov, 0);
1589         if (ret < 0) {
1590             goto fail;
1591         }
1592         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1593 
1594         if (!use_local_qiov) {
1595             qemu_iovec_init(&local_qiov, qiov->niov + 1);
1596             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1597             use_local_qiov = true;
1598         }
1599 
1600         tail_bytes = (offset + bytes) & (align - 1);
1601         qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1602 
1603         bytes = ROUND_UP(bytes, align);
1604     }
1605 
1606     ret = bdrv_aligned_pwritev(bs, &req, offset, bytes, align,
1607                                use_local_qiov ? &local_qiov : qiov,
1608                                flags);
1609 
1610 fail:
1611 
1612     if (use_local_qiov) {
1613         qemu_iovec_destroy(&local_qiov);
1614     }
1615     qemu_vfree(head_buf);
1616     qemu_vfree(tail_buf);
1617 out:
1618     tracked_request_end(&req);
1619     bdrv_dec_in_flight(bs);
1620     return ret;
1621 }
1622 
1623 static int coroutine_fn bdrv_co_do_writev(BdrvChild *child,
1624     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1625     BdrvRequestFlags flags)
1626 {
1627     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1628         return -EINVAL;
1629     }
1630 
1631     return bdrv_co_pwritev(child, sector_num << BDRV_SECTOR_BITS,
1632                            nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1633 }
1634 
1635 int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
1636     int nb_sectors, QEMUIOVector *qiov)
1637 {
1638     trace_bdrv_co_writev(child->bs, sector_num, nb_sectors);
1639 
1640     return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0);
1641 }
1642 
1643 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
1644                                        int count, BdrvRequestFlags flags)
1645 {
1646     trace_bdrv_co_pwrite_zeroes(child->bs, offset, count, flags);
1647 
1648     if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
1649         flags &= ~BDRV_REQ_MAY_UNMAP;
1650     }
1651 
1652     return bdrv_co_pwritev(child, offset, count, NULL,
1653                            BDRV_REQ_ZERO_WRITE | flags);
1654 }
1655 
1656 /*
1657  * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1658  */
1659 int bdrv_flush_all(void)
1660 {
1661     BdrvNextIterator it;
1662     BlockDriverState *bs = NULL;
1663     int result = 0;
1664 
1665     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
1666         AioContext *aio_context = bdrv_get_aio_context(bs);
1667         int ret;
1668 
1669         aio_context_acquire(aio_context);
1670         ret = bdrv_flush(bs);
1671         if (ret < 0 && !result) {
1672             result = ret;
1673         }
1674         aio_context_release(aio_context);
1675     }
1676 
1677     return result;
1678 }
1679 
1680 
1681 typedef struct BdrvCoGetBlockStatusData {
1682     BlockDriverState *bs;
1683     BlockDriverState *base;
1684     BlockDriverState **file;
1685     int64_t sector_num;
1686     int nb_sectors;
1687     int *pnum;
1688     int64_t ret;
1689     bool done;
1690 } BdrvCoGetBlockStatusData;
1691 
1692 /*
1693  * Returns the allocation status of the specified sectors.
1694  * Drivers not implementing the functionality are assumed to not support
1695  * backing files, hence all their sectors are reported as allocated.
1696  *
1697  * If 'sector_num' is beyond the end of the disk image the return value is 0
1698  * and 'pnum' is set to 0.
1699  *
1700  * 'pnum' is set to the number of sectors (including and immediately following
1701  * the specified sector) that are known to be in the same
1702  * allocated/unallocated state.
1703  *
1704  * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
1705  * beyond the end of the disk image it will be clamped.
1706  *
1707  * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1708  * points to the BDS which the sector range is allocated in.
1709  */
1710 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1711                                                      int64_t sector_num,
1712                                                      int nb_sectors, int *pnum,
1713                                                      BlockDriverState **file)
1714 {
1715     int64_t total_sectors;
1716     int64_t n;
1717     int64_t ret, ret2;
1718 
1719     total_sectors = bdrv_nb_sectors(bs);
1720     if (total_sectors < 0) {
1721         return total_sectors;
1722     }
1723 
1724     if (sector_num >= total_sectors) {
1725         *pnum = 0;
1726         return 0;
1727     }
1728 
1729     n = total_sectors - sector_num;
1730     if (n < nb_sectors) {
1731         nb_sectors = n;
1732     }
1733 
1734     if (!bs->drv->bdrv_co_get_block_status) {
1735         *pnum = nb_sectors;
1736         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1737         if (bs->drv->protocol_name) {
1738             ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1739         }
1740         return ret;
1741     }
1742 
1743     *file = NULL;
1744     bdrv_inc_in_flight(bs);
1745     ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1746                                             file);
1747     if (ret < 0) {
1748         *pnum = 0;
1749         goto out;
1750     }
1751 
1752     if (ret & BDRV_BLOCK_RAW) {
1753         assert(ret & BDRV_BLOCK_OFFSET_VALID);
1754         ret = bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
1755                                     *pnum, pnum, file);
1756         goto out;
1757     }
1758 
1759     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1760         ret |= BDRV_BLOCK_ALLOCATED;
1761     } else {
1762         if (bdrv_unallocated_blocks_are_zero(bs)) {
1763             ret |= BDRV_BLOCK_ZERO;
1764         } else if (bs->backing) {
1765             BlockDriverState *bs2 = bs->backing->bs;
1766             int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1767             if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1768                 ret |= BDRV_BLOCK_ZERO;
1769             }
1770         }
1771     }
1772 
1773     if (*file && *file != bs &&
1774         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1775         (ret & BDRV_BLOCK_OFFSET_VALID)) {
1776         BlockDriverState *file2;
1777         int file_pnum;
1778 
1779         ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
1780                                         *pnum, &file_pnum, &file2);
1781         if (ret2 >= 0) {
1782             /* Ignore errors.  This is just providing extra information, it
1783              * is useful but not necessary.
1784              */
1785             if (!file_pnum) {
1786                 /* !file_pnum indicates an offset at or beyond the EOF; it is
1787                  * perfectly valid for the format block driver to point to such
1788                  * offsets, so catch it and mark everything as zero */
1789                 ret |= BDRV_BLOCK_ZERO;
1790             } else {
1791                 /* Limit request to the range reported by the protocol driver */
1792                 *pnum = file_pnum;
1793                 ret |= (ret2 & BDRV_BLOCK_ZERO);
1794             }
1795         }
1796     }
1797 
1798 out:
1799     bdrv_dec_in_flight(bs);
1800     return ret;
1801 }
1802 
1803 static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1804         BlockDriverState *base,
1805         int64_t sector_num,
1806         int nb_sectors,
1807         int *pnum,
1808         BlockDriverState **file)
1809 {
1810     BlockDriverState *p;
1811     int64_t ret = 0;
1812 
1813     assert(bs != base);
1814     for (p = bs; p != base; p = backing_bs(p)) {
1815         ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
1816         if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
1817             break;
1818         }
1819         /* [sector_num, pnum] unallocated on this layer, which could be only
1820          * the first part of [sector_num, nb_sectors].  */
1821         nb_sectors = MIN(nb_sectors, *pnum);
1822     }
1823     return ret;
1824 }
1825 
1826 /* Coroutine wrapper for bdrv_get_block_status_above() */
1827 static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
1828 {
1829     BdrvCoGetBlockStatusData *data = opaque;
1830 
1831     data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1832                                                data->sector_num,
1833                                                data->nb_sectors,
1834                                                data->pnum,
1835                                                data->file);
1836     data->done = true;
1837 }
1838 
1839 /*
1840  * Synchronous wrapper around bdrv_co_get_block_status_above().
1841  *
1842  * See bdrv_co_get_block_status_above() for details.
1843  */
1844 int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1845                                     BlockDriverState *base,
1846                                     int64_t sector_num,
1847                                     int nb_sectors, int *pnum,
1848                                     BlockDriverState **file)
1849 {
1850     Coroutine *co;
1851     BdrvCoGetBlockStatusData data = {
1852         .bs = bs,
1853         .base = base,
1854         .file = file,
1855         .sector_num = sector_num,
1856         .nb_sectors = nb_sectors,
1857         .pnum = pnum,
1858         .done = false,
1859     };
1860 
1861     if (qemu_in_coroutine()) {
1862         /* Fast-path if already in coroutine context */
1863         bdrv_get_block_status_above_co_entry(&data);
1864     } else {
1865         co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
1866                                    &data);
1867         qemu_coroutine_enter(co);
1868         BDRV_POLL_WHILE(bs, !data.done);
1869     }
1870     return data.ret;
1871 }
1872 
1873 int64_t bdrv_get_block_status(BlockDriverState *bs,
1874                               int64_t sector_num,
1875                               int nb_sectors, int *pnum,
1876                               BlockDriverState **file)
1877 {
1878     return bdrv_get_block_status_above(bs, backing_bs(bs),
1879                                        sector_num, nb_sectors, pnum, file);
1880 }
1881 
1882 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1883                                    int nb_sectors, int *pnum)
1884 {
1885     BlockDriverState *file;
1886     int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
1887                                         &file);
1888     if (ret < 0) {
1889         return ret;
1890     }
1891     return !!(ret & BDRV_BLOCK_ALLOCATED);
1892 }
1893 
1894 /*
1895  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1896  *
1897  * Return true if the given sector is allocated in any image between
1898  * BASE and TOP (inclusive).  BASE can be NULL to check if the given
1899  * sector is allocated in any image of the chain.  Return false otherwise.
1900  *
1901  * 'pnum' is set to the number of sectors (including and immediately following
1902  *  the specified sector) that are known to be in the same
1903  *  allocated/unallocated state.
1904  *
1905  */
1906 int bdrv_is_allocated_above(BlockDriverState *top,
1907                             BlockDriverState *base,
1908                             int64_t sector_num,
1909                             int nb_sectors, int *pnum)
1910 {
1911     BlockDriverState *intermediate;
1912     int ret, n = nb_sectors;
1913 
1914     intermediate = top;
1915     while (intermediate && intermediate != base) {
1916         int pnum_inter;
1917         ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1918                                 &pnum_inter);
1919         if (ret < 0) {
1920             return ret;
1921         } else if (ret) {
1922             *pnum = pnum_inter;
1923             return 1;
1924         }
1925 
1926         /*
1927          * [sector_num, nb_sectors] is unallocated on top but intermediate
1928          * might have
1929          *
1930          * [sector_num+x, nr_sectors] allocated.
1931          */
1932         if (n > pnum_inter &&
1933             (intermediate == top ||
1934              sector_num + pnum_inter < intermediate->total_sectors)) {
1935             n = pnum_inter;
1936         }
1937 
1938         intermediate = backing_bs(intermediate);
1939     }
1940 
1941     *pnum = n;
1942     return 0;
1943 }
1944 
1945 typedef struct BdrvVmstateCo {
1946     BlockDriverState    *bs;
1947     QEMUIOVector        *qiov;
1948     int64_t             pos;
1949     bool                is_read;
1950     int                 ret;
1951 } BdrvVmstateCo;
1952 
1953 static int coroutine_fn
1954 bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
1955                    bool is_read)
1956 {
1957     BlockDriver *drv = bs->drv;
1958 
1959     if (!drv) {
1960         return -ENOMEDIUM;
1961     } else if (drv->bdrv_load_vmstate) {
1962         return is_read ? drv->bdrv_load_vmstate(bs, qiov, pos)
1963                        : drv->bdrv_save_vmstate(bs, qiov, pos);
1964     } else if (bs->file) {
1965         return bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
1966     }
1967 
1968     return -ENOTSUP;
1969 }
1970 
1971 static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
1972 {
1973     BdrvVmstateCo *co = opaque;
1974     co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
1975 }
1976 
1977 static inline int
1978 bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
1979                 bool is_read)
1980 {
1981     if (qemu_in_coroutine()) {
1982         return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
1983     } else {
1984         BdrvVmstateCo data = {
1985             .bs         = bs,
1986             .qiov       = qiov,
1987             .pos        = pos,
1988             .is_read    = is_read,
1989             .ret        = -EINPROGRESS,
1990         };
1991         Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
1992 
1993         qemu_coroutine_enter(co);
1994         while (data.ret == -EINPROGRESS) {
1995             aio_poll(bdrv_get_aio_context(bs), true);
1996         }
1997         return data.ret;
1998     }
1999 }
2000 
2001 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2002                       int64_t pos, int size)
2003 {
2004     QEMUIOVector qiov;
2005     struct iovec iov = {
2006         .iov_base   = (void *) buf,
2007         .iov_len    = size,
2008     };
2009     int ret;
2010 
2011     qemu_iovec_init_external(&qiov, &iov, 1);
2012 
2013     ret = bdrv_writev_vmstate(bs, &qiov, pos);
2014     if (ret < 0) {
2015         return ret;
2016     }
2017 
2018     return size;
2019 }
2020 
2021 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2022 {
2023     return bdrv_rw_vmstate(bs, qiov, pos, false);
2024 }
2025 
2026 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2027                       int64_t pos, int size)
2028 {
2029     QEMUIOVector qiov;
2030     struct iovec iov = {
2031         .iov_base   = buf,
2032         .iov_len    = size,
2033     };
2034     int ret;
2035 
2036     qemu_iovec_init_external(&qiov, &iov, 1);
2037     ret = bdrv_readv_vmstate(bs, &qiov, pos);
2038     if (ret < 0) {
2039         return ret;
2040     }
2041 
2042     return size;
2043 }
2044 
2045 int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2046 {
2047     return bdrv_rw_vmstate(bs, qiov, pos, true);
2048 }
2049 
2050 /**************************************************************/
2051 /* async I/Os */
2052 
2053 BlockAIOCB *bdrv_aio_readv(BdrvChild *child, int64_t sector_num,
2054                            QEMUIOVector *qiov, int nb_sectors,
2055                            BlockCompletionFunc *cb, void *opaque)
2056 {
2057     trace_bdrv_aio_readv(child->bs, sector_num, nb_sectors, opaque);
2058 
2059     assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
2060     return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
2061                                   0, cb, opaque, false);
2062 }
2063 
2064 BlockAIOCB *bdrv_aio_writev(BdrvChild *child, int64_t sector_num,
2065                             QEMUIOVector *qiov, int nb_sectors,
2066                             BlockCompletionFunc *cb, void *opaque)
2067 {
2068     trace_bdrv_aio_writev(child->bs, sector_num, nb_sectors, opaque);
2069 
2070     assert(nb_sectors << BDRV_SECTOR_BITS == qiov->size);
2071     return bdrv_co_aio_prw_vector(child, sector_num << BDRV_SECTOR_BITS, qiov,
2072                                   0, cb, opaque, true);
2073 }
2074 
2075 void bdrv_aio_cancel(BlockAIOCB *acb)
2076 {
2077     qemu_aio_ref(acb);
2078     bdrv_aio_cancel_async(acb);
2079     while (acb->refcnt > 1) {
2080         if (acb->aiocb_info->get_aio_context) {
2081             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2082         } else if (acb->bs) {
2083             aio_poll(bdrv_get_aio_context(acb->bs), true);
2084         } else {
2085             abort();
2086         }
2087     }
2088     qemu_aio_unref(acb);
2089 }
2090 
2091 /* Async version of aio cancel. The caller is not blocked if the acb implements
2092  * cancel_async, otherwise we do nothing and let the request normally complete.
2093  * In either case the completion callback must be called. */
2094 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2095 {
2096     if (acb->aiocb_info->cancel_async) {
2097         acb->aiocb_info->cancel_async(acb);
2098     }
2099 }
2100 
2101 /**************************************************************/
2102 /* async block device emulation */
2103 
2104 typedef struct BlockRequest {
2105     union {
2106         /* Used during read, write, trim */
2107         struct {
2108             int64_t offset;
2109             int bytes;
2110             int flags;
2111             QEMUIOVector *qiov;
2112         };
2113         /* Used during ioctl */
2114         struct {
2115             int req;
2116             void *buf;
2117         };
2118     };
2119     BlockCompletionFunc *cb;
2120     void *opaque;
2121 
2122     int error;
2123 } BlockRequest;
2124 
2125 typedef struct BlockAIOCBCoroutine {
2126     BlockAIOCB common;
2127     BdrvChild *child;
2128     BlockRequest req;
2129     bool is_write;
2130     bool need_bh;
2131     bool *done;
2132 } BlockAIOCBCoroutine;
2133 
2134 static const AIOCBInfo bdrv_em_co_aiocb_info = {
2135     .aiocb_size         = sizeof(BlockAIOCBCoroutine),
2136 };
2137 
2138 static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
2139 {
2140     if (!acb->need_bh) {
2141         bdrv_dec_in_flight(acb->common.bs);
2142         acb->common.cb(acb->common.opaque, acb->req.error);
2143         qemu_aio_unref(acb);
2144     }
2145 }
2146 
2147 static void bdrv_co_em_bh(void *opaque)
2148 {
2149     BlockAIOCBCoroutine *acb = opaque;
2150 
2151     assert(!acb->need_bh);
2152     bdrv_co_complete(acb);
2153 }
2154 
2155 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
2156 {
2157     acb->need_bh = false;
2158     if (acb->req.error != -EINPROGRESS) {
2159         BlockDriverState *bs = acb->common.bs;
2160 
2161         aio_bh_schedule_oneshot(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
2162     }
2163 }
2164 
2165 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
2166 static void coroutine_fn bdrv_co_do_rw(void *opaque)
2167 {
2168     BlockAIOCBCoroutine *acb = opaque;
2169 
2170     if (!acb->is_write) {
2171         acb->req.error = bdrv_co_preadv(acb->child, acb->req.offset,
2172             acb->req.qiov->size, acb->req.qiov, acb->req.flags);
2173     } else {
2174         acb->req.error = bdrv_co_pwritev(acb->child, acb->req.offset,
2175             acb->req.qiov->size, acb->req.qiov, acb->req.flags);
2176     }
2177 
2178     bdrv_co_complete(acb);
2179 }
2180 
2181 static BlockAIOCB *bdrv_co_aio_prw_vector(BdrvChild *child,
2182                                           int64_t offset,
2183                                           QEMUIOVector *qiov,
2184                                           BdrvRequestFlags flags,
2185                                           BlockCompletionFunc *cb,
2186                                           void *opaque,
2187                                           bool is_write)
2188 {
2189     Coroutine *co;
2190     BlockAIOCBCoroutine *acb;
2191 
2192     /* Matched by bdrv_co_complete's bdrv_dec_in_flight.  */
2193     bdrv_inc_in_flight(child->bs);
2194 
2195     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, child->bs, cb, opaque);
2196     acb->child = child;
2197     acb->need_bh = true;
2198     acb->req.error = -EINPROGRESS;
2199     acb->req.offset = offset;
2200     acb->req.qiov = qiov;
2201     acb->req.flags = flags;
2202     acb->is_write = is_write;
2203 
2204     co = qemu_coroutine_create(bdrv_co_do_rw, acb);
2205     qemu_coroutine_enter(co);
2206 
2207     bdrv_co_maybe_schedule_bh(acb);
2208     return &acb->common;
2209 }
2210 
2211 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
2212 {
2213     BlockAIOCBCoroutine *acb = opaque;
2214     BlockDriverState *bs = acb->common.bs;
2215 
2216     acb->req.error = bdrv_co_flush(bs);
2217     bdrv_co_complete(acb);
2218 }
2219 
2220 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2221         BlockCompletionFunc *cb, void *opaque)
2222 {
2223     trace_bdrv_aio_flush(bs, opaque);
2224 
2225     Coroutine *co;
2226     BlockAIOCBCoroutine *acb;
2227 
2228     /* Matched by bdrv_co_complete's bdrv_dec_in_flight.  */
2229     bdrv_inc_in_flight(bs);
2230 
2231     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2232     acb->need_bh = true;
2233     acb->req.error = -EINPROGRESS;
2234 
2235     co = qemu_coroutine_create(bdrv_aio_flush_co_entry, acb);
2236     qemu_coroutine_enter(co);
2237 
2238     bdrv_co_maybe_schedule_bh(acb);
2239     return &acb->common;
2240 }
2241 
2242 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
2243                    BlockCompletionFunc *cb, void *opaque)
2244 {
2245     BlockAIOCB *acb;
2246 
2247     acb = g_malloc(aiocb_info->aiocb_size);
2248     acb->aiocb_info = aiocb_info;
2249     acb->bs = bs;
2250     acb->cb = cb;
2251     acb->opaque = opaque;
2252     acb->refcnt = 1;
2253     return acb;
2254 }
2255 
2256 void qemu_aio_ref(void *p)
2257 {
2258     BlockAIOCB *acb = p;
2259     acb->refcnt++;
2260 }
2261 
2262 void qemu_aio_unref(void *p)
2263 {
2264     BlockAIOCB *acb = p;
2265     assert(acb->refcnt > 0);
2266     if (--acb->refcnt == 0) {
2267         g_free(acb);
2268     }
2269 }
2270 
2271 /**************************************************************/
2272 /* Coroutine block device emulation */
2273 
2274 typedef struct FlushCo {
2275     BlockDriverState *bs;
2276     int ret;
2277 } FlushCo;
2278 
2279 
2280 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2281 {
2282     FlushCo *rwco = opaque;
2283 
2284     rwco->ret = bdrv_co_flush(rwco->bs);
2285 }
2286 
2287 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2288 {
2289     int ret;
2290 
2291     if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2292         bdrv_is_sg(bs)) {
2293         return 0;
2294     }
2295 
2296     bdrv_inc_in_flight(bs);
2297 
2298     int current_gen = bs->write_gen;
2299 
2300     /* Wait until any previous flushes are completed */
2301     while (bs->active_flush_req) {
2302         qemu_co_queue_wait(&bs->flush_queue);
2303     }
2304 
2305     bs->active_flush_req = true;
2306 
2307     /* Write back all layers by calling one driver function */
2308     if (bs->drv->bdrv_co_flush) {
2309         ret = bs->drv->bdrv_co_flush(bs);
2310         goto out;
2311     }
2312 
2313     /* Write back cached data to the OS even with cache=unsafe */
2314     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2315     if (bs->drv->bdrv_co_flush_to_os) {
2316         ret = bs->drv->bdrv_co_flush_to_os(bs);
2317         if (ret < 0) {
2318             goto out;
2319         }
2320     }
2321 
2322     /* But don't actually force it to the disk with cache=unsafe */
2323     if (bs->open_flags & BDRV_O_NO_FLUSH) {
2324         goto flush_parent;
2325     }
2326 
2327     /* Check if we really need to flush anything */
2328     if (bs->flushed_gen == current_gen) {
2329         goto flush_parent;
2330     }
2331 
2332     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2333     if (bs->drv->bdrv_co_flush_to_disk) {
2334         ret = bs->drv->bdrv_co_flush_to_disk(bs);
2335     } else if (bs->drv->bdrv_aio_flush) {
2336         BlockAIOCB *acb;
2337         CoroutineIOCompletion co = {
2338             .coroutine = qemu_coroutine_self(),
2339         };
2340 
2341         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2342         if (acb == NULL) {
2343             ret = -EIO;
2344         } else {
2345             qemu_coroutine_yield();
2346             ret = co.ret;
2347         }
2348     } else {
2349         /*
2350          * Some block drivers always operate in either writethrough or unsafe
2351          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2352          * know how the server works (because the behaviour is hardcoded or
2353          * depends on server-side configuration), so we can't ensure that
2354          * everything is safe on disk. Returning an error doesn't work because
2355          * that would break guests even if the server operates in writethrough
2356          * mode.
2357          *
2358          * Let's hope the user knows what he's doing.
2359          */
2360         ret = 0;
2361     }
2362 
2363     if (ret < 0) {
2364         goto out;
2365     }
2366 
2367     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
2368      * in the case of cache=unsafe, so there are no useless flushes.
2369      */
2370 flush_parent:
2371     ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2372 out:
2373     /* Notify any pending flushes that we have completed */
2374     if (ret == 0) {
2375         bs->flushed_gen = current_gen;
2376     }
2377     bs->active_flush_req = false;
2378     /* Return value is ignored - it's ok if wait queue is empty */
2379     qemu_co_queue_next(&bs->flush_queue);
2380 
2381     bdrv_dec_in_flight(bs);
2382     return ret;
2383 }
2384 
2385 int bdrv_flush(BlockDriverState *bs)
2386 {
2387     Coroutine *co;
2388     FlushCo flush_co = {
2389         .bs = bs,
2390         .ret = NOT_DONE,
2391     };
2392 
2393     if (qemu_in_coroutine()) {
2394         /* Fast-path if already in coroutine context */
2395         bdrv_flush_co_entry(&flush_co);
2396     } else {
2397         co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
2398         qemu_coroutine_enter(co);
2399         BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
2400     }
2401 
2402     return flush_co.ret;
2403 }
2404 
2405 typedef struct DiscardCo {
2406     BlockDriverState *bs;
2407     int64_t offset;
2408     int count;
2409     int ret;
2410 } DiscardCo;
2411 static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
2412 {
2413     DiscardCo *rwco = opaque;
2414 
2415     rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->count);
2416 }
2417 
2418 int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
2419                                   int count)
2420 {
2421     BdrvTrackedRequest req;
2422     int max_pdiscard, ret;
2423     int head, tail, align;
2424 
2425     if (!bs->drv) {
2426         return -ENOMEDIUM;
2427     }
2428 
2429     ret = bdrv_check_byte_request(bs, offset, count);
2430     if (ret < 0) {
2431         return ret;
2432     } else if (bs->read_only) {
2433         return -EPERM;
2434     }
2435     assert(!(bs->open_flags & BDRV_O_INACTIVE));
2436 
2437     /* Do nothing if disabled.  */
2438     if (!(bs->open_flags & BDRV_O_UNMAP)) {
2439         return 0;
2440     }
2441 
2442     if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
2443         return 0;
2444     }
2445 
2446     /* Discard is advisory, but some devices track and coalesce
2447      * unaligned requests, so we must pass everything down rather than
2448      * round here.  Still, most devices will just silently ignore
2449      * unaligned requests (by returning -ENOTSUP), so we must fragment
2450      * the request accordingly.  */
2451     align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
2452     assert(align % bs->bl.request_alignment == 0);
2453     head = offset % align;
2454     tail = (offset + count) % align;
2455 
2456     bdrv_inc_in_flight(bs);
2457     tracked_request_begin(&req, bs, offset, count, BDRV_TRACKED_DISCARD);
2458 
2459     ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
2460     if (ret < 0) {
2461         goto out;
2462     }
2463 
2464     max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2465                                    align);
2466     assert(max_pdiscard >= bs->bl.request_alignment);
2467 
2468     while (count > 0) {
2469         int ret;
2470         int num = count;
2471 
2472         if (head) {
2473             /* Make small requests to get to alignment boundaries. */
2474             num = MIN(count, align - head);
2475             if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
2476                 num %= bs->bl.request_alignment;
2477             }
2478             head = (head + num) % align;
2479             assert(num < max_pdiscard);
2480         } else if (tail) {
2481             if (num > align) {
2482                 /* Shorten the request to the last aligned cluster.  */
2483                 num -= tail;
2484             } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
2485                        tail > bs->bl.request_alignment) {
2486                 tail %= bs->bl.request_alignment;
2487                 num -= tail;
2488             }
2489         }
2490         /* limit request size */
2491         if (num > max_pdiscard) {
2492             num = max_pdiscard;
2493         }
2494 
2495         if (bs->drv->bdrv_co_pdiscard) {
2496             ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
2497         } else {
2498             BlockAIOCB *acb;
2499             CoroutineIOCompletion co = {
2500                 .coroutine = qemu_coroutine_self(),
2501             };
2502 
2503             acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
2504                                              bdrv_co_io_em_complete, &co);
2505             if (acb == NULL) {
2506                 ret = -EIO;
2507                 goto out;
2508             } else {
2509                 qemu_coroutine_yield();
2510                 ret = co.ret;
2511             }
2512         }
2513         if (ret && ret != -ENOTSUP) {
2514             goto out;
2515         }
2516 
2517         offset += num;
2518         count -= num;
2519     }
2520     ret = 0;
2521 out:
2522     ++bs->write_gen;
2523     bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS,
2524                    req.bytes >> BDRV_SECTOR_BITS);
2525     tracked_request_end(&req);
2526     bdrv_dec_in_flight(bs);
2527     return ret;
2528 }
2529 
2530 int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int count)
2531 {
2532     Coroutine *co;
2533     DiscardCo rwco = {
2534         .bs = bs,
2535         .offset = offset,
2536         .count = count,
2537         .ret = NOT_DONE,
2538     };
2539 
2540     if (qemu_in_coroutine()) {
2541         /* Fast-path if already in coroutine context */
2542         bdrv_pdiscard_co_entry(&rwco);
2543     } else {
2544         co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
2545         qemu_coroutine_enter(co);
2546         BDRV_POLL_WHILE(bs, rwco.ret == NOT_DONE);
2547     }
2548 
2549     return rwco.ret;
2550 }
2551 
2552 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
2553 {
2554     BlockDriver *drv = bs->drv;
2555     CoroutineIOCompletion co = {
2556         .coroutine = qemu_coroutine_self(),
2557     };
2558     BlockAIOCB *acb;
2559 
2560     bdrv_inc_in_flight(bs);
2561     if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
2562         co.ret = -ENOTSUP;
2563         goto out;
2564     }
2565 
2566     if (drv->bdrv_co_ioctl) {
2567         co.ret = drv->bdrv_co_ioctl(bs, req, buf);
2568     } else {
2569         acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2570         if (!acb) {
2571             co.ret = -ENOTSUP;
2572             goto out;
2573         }
2574         qemu_coroutine_yield();
2575     }
2576 out:
2577     bdrv_dec_in_flight(bs);
2578     return co.ret;
2579 }
2580 
2581 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2582 {
2583     return qemu_memalign(bdrv_opt_mem_align(bs), size);
2584 }
2585 
2586 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2587 {
2588     return memset(qemu_blockalign(bs, size), 0, size);
2589 }
2590 
2591 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2592 {
2593     size_t align = bdrv_opt_mem_align(bs);
2594 
2595     /* Ensure that NULL is never returned on success */
2596     assert(align > 0);
2597     if (size == 0) {
2598         size = align;
2599     }
2600 
2601     return qemu_try_memalign(align, size);
2602 }
2603 
2604 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2605 {
2606     void *mem = qemu_try_blockalign(bs, size);
2607 
2608     if (mem) {
2609         memset(mem, 0, size);
2610     }
2611 
2612     return mem;
2613 }
2614 
2615 /*
2616  * Check if all memory in this vector is sector aligned.
2617  */
2618 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2619 {
2620     int i;
2621     size_t alignment = bdrv_min_mem_align(bs);
2622 
2623     for (i = 0; i < qiov->niov; i++) {
2624         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2625             return false;
2626         }
2627         if (qiov->iov[i].iov_len % alignment) {
2628             return false;
2629         }
2630     }
2631 
2632     return true;
2633 }
2634 
2635 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2636                                     NotifierWithReturn *notifier)
2637 {
2638     notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2639 }
2640 
2641 void bdrv_io_plug(BlockDriverState *bs)
2642 {
2643     BdrvChild *child;
2644 
2645     QLIST_FOREACH(child, &bs->children, next) {
2646         bdrv_io_plug(child->bs);
2647     }
2648 
2649     if (bs->io_plugged++ == 0) {
2650         BlockDriver *drv = bs->drv;
2651         if (drv && drv->bdrv_io_plug) {
2652             drv->bdrv_io_plug(bs);
2653         }
2654     }
2655 }
2656 
2657 void bdrv_io_unplug(BlockDriverState *bs)
2658 {
2659     BdrvChild *child;
2660 
2661     assert(bs->io_plugged);
2662     if (--bs->io_plugged == 0) {
2663         BlockDriver *drv = bs->drv;
2664         if (drv && drv->bdrv_io_unplug) {
2665             drv->bdrv_io_unplug(bs);
2666         }
2667     }
2668 
2669     QLIST_FOREACH(child, &bs->children, next) {
2670         bdrv_io_unplug(child->bs);
2671     }
2672 }
2673