xref: /openbmc/qemu/block/io.c (revision 2b3912f1350971fbc2c04d986a1d0c60ae757c78)
1 /*
2  * Block layer I/O functions
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "block/coroutines.h"
33 #include "block/dirty-bitmap.h"
34 #include "block/write-threshold.h"
35 #include "qemu/cutils.h"
36 #include "qemu/memalign.h"
37 #include "qapi/error.h"
38 #include "qemu/error-report.h"
39 #include "qemu/main-loop.h"
40 #include "sysemu/replay.h"
41 
42 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
43 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
44 
45 static void bdrv_parent_cb_resize(BlockDriverState *bs);
46 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
47     int64_t offset, int64_t bytes, BdrvRequestFlags flags);
48 
49 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
50 {
51     BdrvChild *c, *next;
52 
53     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
54         if (c == ignore) {
55             continue;
56         }
57         bdrv_parent_drained_begin_single(c);
58     }
59 }
60 
61 void bdrv_parent_drained_end_single(BdrvChild *c)
62 {
63     GLOBAL_STATE_CODE();
64 
65     assert(c->quiesced_parent);
66     c->quiesced_parent = false;
67 
68     if (c->klass->drained_end) {
69         c->klass->drained_end(c);
70     }
71 }
72 
73 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
74 {
75     BdrvChild *c;
76 
77     QLIST_FOREACH(c, &bs->parents, next_parent) {
78         if (c == ignore) {
79             continue;
80         }
81         bdrv_parent_drained_end_single(c);
82     }
83 }
84 
85 bool bdrv_parent_drained_poll_single(BdrvChild *c)
86 {
87     if (c->klass->drained_poll) {
88         return c->klass->drained_poll(c);
89     }
90     return false;
91 }
92 
93 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
94                                      bool ignore_bds_parents)
95 {
96     BdrvChild *c, *next;
97     bool busy = false;
98 
99     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
100         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
101             continue;
102         }
103         busy |= bdrv_parent_drained_poll_single(c);
104     }
105 
106     return busy;
107 }
108 
109 void bdrv_parent_drained_begin_single(BdrvChild *c)
110 {
111     GLOBAL_STATE_CODE();
112 
113     assert(!c->quiesced_parent);
114     c->quiesced_parent = true;
115 
116     if (c->klass->drained_begin) {
117         c->klass->drained_begin(c);
118     }
119 }
120 
121 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
122 {
123     dst->pdiscard_alignment = MAX(dst->pdiscard_alignment,
124                                   src->pdiscard_alignment);
125     dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
126     dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
127     dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer,
128                                         src->max_hw_transfer);
129     dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
130                                  src->opt_mem_alignment);
131     dst->min_mem_alignment = MAX(dst->min_mem_alignment,
132                                  src->min_mem_alignment);
133     dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
134     dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov);
135 }
136 
137 typedef struct BdrvRefreshLimitsState {
138     BlockDriverState *bs;
139     BlockLimits old_bl;
140 } BdrvRefreshLimitsState;
141 
142 static void bdrv_refresh_limits_abort(void *opaque)
143 {
144     BdrvRefreshLimitsState *s = opaque;
145 
146     s->bs->bl = s->old_bl;
147 }
148 
149 static TransactionActionDrv bdrv_refresh_limits_drv = {
150     .abort = bdrv_refresh_limits_abort,
151     .clean = g_free,
152 };
153 
154 /* @tran is allowed to be NULL, in this case no rollback is possible. */
155 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
156 {
157     ERRP_GUARD();
158     BlockDriver *drv = bs->drv;
159     BdrvChild *c;
160     bool have_limits;
161 
162     GLOBAL_STATE_CODE();
163 
164     if (tran) {
165         BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
166         *s = (BdrvRefreshLimitsState) {
167             .bs = bs,
168             .old_bl = bs->bl,
169         };
170         tran_add(tran, &bdrv_refresh_limits_drv, s);
171     }
172 
173     memset(&bs->bl, 0, sizeof(bs->bl));
174 
175     if (!drv) {
176         return;
177     }
178 
179     /* Default alignment based on whether driver has byte interface */
180     bs->bl.request_alignment = (drv->bdrv_co_preadv ||
181                                 drv->bdrv_aio_preadv ||
182                                 drv->bdrv_co_preadv_part) ? 1 : 512;
183 
184     /* Take some limits from the children as a default */
185     have_limits = false;
186     QLIST_FOREACH(c, &bs->children, next) {
187         if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
188         {
189             bdrv_merge_limits(&bs->bl, &c->bs->bl);
190             have_limits = true;
191         }
192 
193         if (c->role & BDRV_CHILD_FILTERED) {
194             bs->bl.has_variable_length |= c->bs->bl.has_variable_length;
195         }
196     }
197 
198     if (!have_limits) {
199         bs->bl.min_mem_alignment = 512;
200         bs->bl.opt_mem_alignment = qemu_real_host_page_size();
201 
202         /* Safe default since most protocols use readv()/writev()/etc */
203         bs->bl.max_iov = IOV_MAX;
204     }
205 
206     /* Then let the driver override it */
207     if (drv->bdrv_refresh_limits) {
208         drv->bdrv_refresh_limits(bs, errp);
209         if (*errp) {
210             return;
211         }
212     }
213 
214     if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
215         error_setg(errp, "Driver requires too large request alignment");
216     }
217 }
218 
219 /**
220  * The copy-on-read flag is actually a reference count so multiple users may
221  * use the feature without worrying about clobbering its previous state.
222  * Copy-on-read stays enabled until all users have called to disable it.
223  */
224 void bdrv_enable_copy_on_read(BlockDriverState *bs)
225 {
226     IO_CODE();
227     qatomic_inc(&bs->copy_on_read);
228 }
229 
230 void bdrv_disable_copy_on_read(BlockDriverState *bs)
231 {
232     int old = qatomic_fetch_dec(&bs->copy_on_read);
233     IO_CODE();
234     assert(old >= 1);
235 }
236 
237 typedef struct {
238     Coroutine *co;
239     BlockDriverState *bs;
240     bool done;
241     bool begin;
242     bool poll;
243     BdrvChild *parent;
244 } BdrvCoDrainData;
245 
246 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
247 bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
248                      bool ignore_bds_parents)
249 {
250     GLOBAL_STATE_CODE();
251 
252     if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
253         return true;
254     }
255 
256     if (qatomic_read(&bs->in_flight)) {
257         return true;
258     }
259 
260     return false;
261 }
262 
263 static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
264                                       BdrvChild *ignore_parent)
265 {
266     return bdrv_drain_poll(bs, ignore_parent, false);
267 }
268 
269 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
270                                   bool poll);
271 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
272 
273 static void bdrv_co_drain_bh_cb(void *opaque)
274 {
275     BdrvCoDrainData *data = opaque;
276     Coroutine *co = data->co;
277     BlockDriverState *bs = data->bs;
278 
279     if (bs) {
280         AioContext *ctx = bdrv_get_aio_context(bs);
281         aio_context_acquire(ctx);
282         bdrv_dec_in_flight(bs);
283         if (data->begin) {
284             bdrv_do_drained_begin(bs, data->parent, data->poll);
285         } else {
286             assert(!data->poll);
287             bdrv_do_drained_end(bs, data->parent);
288         }
289         aio_context_release(ctx);
290     } else {
291         assert(data->begin);
292         bdrv_drain_all_begin();
293     }
294 
295     data->done = true;
296     aio_co_wake(co);
297 }
298 
299 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
300                                                 bool begin,
301                                                 BdrvChild *parent,
302                                                 bool poll)
303 {
304     BdrvCoDrainData data;
305     Coroutine *self = qemu_coroutine_self();
306     AioContext *ctx = bdrv_get_aio_context(bs);
307     AioContext *co_ctx = qemu_coroutine_get_aio_context(self);
308 
309     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
310      * other coroutines run if they were queued by aio_co_enter(). */
311 
312     assert(qemu_in_coroutine());
313     data = (BdrvCoDrainData) {
314         .co = self,
315         .bs = bs,
316         .done = false,
317         .begin = begin,
318         .parent = parent,
319         .poll = poll,
320     };
321 
322     if (bs) {
323         bdrv_inc_in_flight(bs);
324     }
325 
326     /*
327      * Temporarily drop the lock across yield or we would get deadlocks.
328      * bdrv_co_drain_bh_cb() reaquires the lock as needed.
329      *
330      * When we yield below, the lock for the current context will be
331      * released, so if this is actually the lock that protects bs, don't drop
332      * it a second time.
333      */
334     if (ctx != co_ctx) {
335         aio_context_release(ctx);
336     }
337     replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
338                                      bdrv_co_drain_bh_cb, &data);
339 
340     qemu_coroutine_yield();
341     /* If we are resumed from some other event (such as an aio completion or a
342      * timer callback), it is a bug in the caller that should be fixed. */
343     assert(data.done);
344 
345     /* Reacquire the AioContext of bs if we dropped it */
346     if (ctx != co_ctx) {
347         aio_context_acquire(ctx);
348     }
349 }
350 
351 static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
352                                   bool poll)
353 {
354     IO_OR_GS_CODE();
355 
356     if (qemu_in_coroutine()) {
357         bdrv_co_yield_to_drain(bs, true, parent, poll);
358         return;
359     }
360 
361     GLOBAL_STATE_CODE();
362 
363     /* Stop things in parent-to-child order */
364     if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
365         bdrv_parent_drained_begin(bs, parent);
366         if (bs->drv && bs->drv->bdrv_drain_begin) {
367             bs->drv->bdrv_drain_begin(bs);
368         }
369     }
370 
371     /*
372      * Wait for drained requests to finish.
373      *
374      * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
375      * call is needed so things in this AioContext can make progress even
376      * though we don't return to the main AioContext loop - this automatically
377      * includes other nodes in the same AioContext and therefore all child
378      * nodes.
379      */
380     if (poll) {
381         BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent));
382     }
383 }
384 
385 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent)
386 {
387     bdrv_do_drained_begin(bs, parent, false);
388 }
389 
390 void coroutine_mixed_fn
391 bdrv_drained_begin(BlockDriverState *bs)
392 {
393     IO_OR_GS_CODE();
394     bdrv_do_drained_begin(bs, NULL, true);
395 }
396 
397 /**
398  * This function does not poll, nor must any of its recursively called
399  * functions.
400  */
401 static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
402 {
403     int old_quiesce_counter;
404 
405     IO_OR_GS_CODE();
406 
407     if (qemu_in_coroutine()) {
408         bdrv_co_yield_to_drain(bs, false, parent, false);
409         return;
410     }
411     assert(bs->quiesce_counter > 0);
412     GLOBAL_STATE_CODE();
413 
414     /* Re-enable things in child-to-parent order */
415     old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
416     if (old_quiesce_counter == 1) {
417         if (bs->drv && bs->drv->bdrv_drain_end) {
418             bs->drv->bdrv_drain_end(bs);
419         }
420         bdrv_parent_drained_end(bs, parent);
421     }
422 }
423 
424 void bdrv_drained_end(BlockDriverState *bs)
425 {
426     IO_OR_GS_CODE();
427     bdrv_do_drained_end(bs, NULL);
428 }
429 
430 void bdrv_drain(BlockDriverState *bs)
431 {
432     IO_OR_GS_CODE();
433     bdrv_drained_begin(bs);
434     bdrv_drained_end(bs);
435 }
436 
437 static void bdrv_drain_assert_idle(BlockDriverState *bs)
438 {
439     BdrvChild *child, *next;
440 
441     assert(qatomic_read(&bs->in_flight) == 0);
442     QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
443         bdrv_drain_assert_idle(child->bs);
444     }
445 }
446 
447 unsigned int bdrv_drain_all_count = 0;
448 
449 static bool bdrv_drain_all_poll(void)
450 {
451     BlockDriverState *bs = NULL;
452     bool result = false;
453     GLOBAL_STATE_CODE();
454 
455     /* bdrv_drain_poll() can't make changes to the graph and we are holding the
456      * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
457     while ((bs = bdrv_next_all_states(bs))) {
458         AioContext *aio_context = bdrv_get_aio_context(bs);
459         aio_context_acquire(aio_context);
460         result |= bdrv_drain_poll(bs, NULL, true);
461         aio_context_release(aio_context);
462     }
463 
464     return result;
465 }
466 
467 /*
468  * Wait for pending requests to complete across all BlockDriverStates
469  *
470  * This function does not flush data to disk, use bdrv_flush_all() for that
471  * after calling this function.
472  *
473  * This pauses all block jobs and disables external clients. It must
474  * be paired with bdrv_drain_all_end().
475  *
476  * NOTE: no new block jobs or BlockDriverStates can be created between
477  * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
478  */
479 void bdrv_drain_all_begin_nopoll(void)
480 {
481     BlockDriverState *bs = NULL;
482     GLOBAL_STATE_CODE();
483 
484     /*
485      * bdrv queue is managed by record/replay,
486      * waiting for finishing the I/O requests may
487      * be infinite
488      */
489     if (replay_events_enabled()) {
490         return;
491     }
492 
493     /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
494      * loop AioContext, so make sure we're in the main context. */
495     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
496     assert(bdrv_drain_all_count < INT_MAX);
497     bdrv_drain_all_count++;
498 
499     /* Quiesce all nodes, without polling in-flight requests yet. The graph
500      * cannot change during this loop. */
501     while ((bs = bdrv_next_all_states(bs))) {
502         AioContext *aio_context = bdrv_get_aio_context(bs);
503 
504         aio_context_acquire(aio_context);
505         bdrv_do_drained_begin(bs, NULL, false);
506         aio_context_release(aio_context);
507     }
508 }
509 
510 void coroutine_mixed_fn bdrv_drain_all_begin(void)
511 {
512     BlockDriverState *bs = NULL;
513 
514     if (qemu_in_coroutine()) {
515         bdrv_co_yield_to_drain(NULL, true, NULL, true);
516         return;
517     }
518 
519     /*
520      * bdrv queue is managed by record/replay,
521      * waiting for finishing the I/O requests may
522      * be infinite
523      */
524     if (replay_events_enabled()) {
525         return;
526     }
527 
528     bdrv_drain_all_begin_nopoll();
529 
530     /* Now poll the in-flight requests */
531     AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll());
532 
533     while ((bs = bdrv_next_all_states(bs))) {
534         bdrv_drain_assert_idle(bs);
535     }
536 }
537 
538 void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
539 {
540     GLOBAL_STATE_CODE();
541 
542     g_assert(bs->quiesce_counter > 0);
543     g_assert(!bs->refcnt);
544 
545     while (bs->quiesce_counter) {
546         bdrv_do_drained_end(bs, NULL);
547     }
548 }
549 
550 void bdrv_drain_all_end(void)
551 {
552     BlockDriverState *bs = NULL;
553     GLOBAL_STATE_CODE();
554 
555     /*
556      * bdrv queue is managed by record/replay,
557      * waiting for finishing the I/O requests may
558      * be endless
559      */
560     if (replay_events_enabled()) {
561         return;
562     }
563 
564     while ((bs = bdrv_next_all_states(bs))) {
565         AioContext *aio_context = bdrv_get_aio_context(bs);
566 
567         aio_context_acquire(aio_context);
568         bdrv_do_drained_end(bs, NULL);
569         aio_context_release(aio_context);
570     }
571 
572     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
573     assert(bdrv_drain_all_count > 0);
574     bdrv_drain_all_count--;
575 }
576 
577 void bdrv_drain_all(void)
578 {
579     GLOBAL_STATE_CODE();
580     bdrv_drain_all_begin();
581     bdrv_drain_all_end();
582 }
583 
584 /**
585  * Remove an active request from the tracked requests list
586  *
587  * This function should be called when a tracked request is completing.
588  */
589 static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
590 {
591     if (req->serialising) {
592         qatomic_dec(&req->bs->serialising_in_flight);
593     }
594 
595     qemu_mutex_lock(&req->bs->reqs_lock);
596     QLIST_REMOVE(req, list);
597     qemu_mutex_unlock(&req->bs->reqs_lock);
598 
599     /*
600      * At this point qemu_co_queue_wait(&req->wait_queue, ...) won't be called
601      * anymore because the request has been removed from the list, so it's safe
602      * to restart the queue outside reqs_lock to minimize the critical section.
603      */
604     qemu_co_queue_restart_all(&req->wait_queue);
605 }
606 
607 /**
608  * Add an active request to the tracked requests list
609  */
610 static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
611                                                BlockDriverState *bs,
612                                                int64_t offset,
613                                                int64_t bytes,
614                                                enum BdrvTrackedRequestType type)
615 {
616     bdrv_check_request(offset, bytes, &error_abort);
617 
618     *req = (BdrvTrackedRequest){
619         .bs = bs,
620         .offset         = offset,
621         .bytes          = bytes,
622         .type           = type,
623         .co             = qemu_coroutine_self(),
624         .serialising    = false,
625         .overlap_offset = offset,
626         .overlap_bytes  = bytes,
627     };
628 
629     qemu_co_queue_init(&req->wait_queue);
630 
631     qemu_mutex_lock(&bs->reqs_lock);
632     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
633     qemu_mutex_unlock(&bs->reqs_lock);
634 }
635 
636 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
637                                      int64_t offset, int64_t bytes)
638 {
639     bdrv_check_request(offset, bytes, &error_abort);
640 
641     /*        aaaa   bbbb */
642     if (offset >= req->overlap_offset + req->overlap_bytes) {
643         return false;
644     }
645     /* bbbb   aaaa        */
646     if (req->overlap_offset >= offset + bytes) {
647         return false;
648     }
649     return true;
650 }
651 
652 /* Called with self->bs->reqs_lock held */
653 static coroutine_fn BdrvTrackedRequest *
654 bdrv_find_conflicting_request(BdrvTrackedRequest *self)
655 {
656     BdrvTrackedRequest *req;
657 
658     QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
659         if (req == self || (!req->serialising && !self->serialising)) {
660             continue;
661         }
662         if (tracked_request_overlaps(req, self->overlap_offset,
663                                      self->overlap_bytes))
664         {
665             /*
666              * Hitting this means there was a reentrant request, for
667              * example, a block driver issuing nested requests.  This must
668              * never happen since it means deadlock.
669              */
670             assert(qemu_coroutine_self() != req->co);
671 
672             /*
673              * If the request is already (indirectly) waiting for us, or
674              * will wait for us as soon as it wakes up, then just go on
675              * (instead of producing a deadlock in the former case).
676              */
677             if (!req->waiting_for) {
678                 return req;
679             }
680         }
681     }
682 
683     return NULL;
684 }
685 
686 /* Called with self->bs->reqs_lock held */
687 static void coroutine_fn
688 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
689 {
690     BdrvTrackedRequest *req;
691 
692     while ((req = bdrv_find_conflicting_request(self))) {
693         self->waiting_for = req;
694         qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
695         self->waiting_for = NULL;
696     }
697 }
698 
699 /* Called with req->bs->reqs_lock held */
700 static void tracked_request_set_serialising(BdrvTrackedRequest *req,
701                                             uint64_t align)
702 {
703     int64_t overlap_offset = req->offset & ~(align - 1);
704     int64_t overlap_bytes =
705         ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
706 
707     bdrv_check_request(req->offset, req->bytes, &error_abort);
708 
709     if (!req->serialising) {
710         qatomic_inc(&req->bs->serialising_in_flight);
711         req->serialising = true;
712     }
713 
714     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
715     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
716 }
717 
718 /**
719  * Return the tracked request on @bs for the current coroutine, or
720  * NULL if there is none.
721  */
722 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
723 {
724     BdrvTrackedRequest *req;
725     Coroutine *self = qemu_coroutine_self();
726     IO_CODE();
727 
728     QLIST_FOREACH(req, &bs->tracked_requests, list) {
729         if (req->co == self) {
730             return req;
731         }
732     }
733 
734     return NULL;
735 }
736 
737 /**
738  * Round a region to subcluster (if supported) or cluster boundaries
739  */
740 void coroutine_fn GRAPH_RDLOCK
741 bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
742                           int64_t *align_offset, int64_t *align_bytes)
743 {
744     BlockDriverInfo bdi;
745     IO_CODE();
746     if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) {
747         *align_offset = offset;
748         *align_bytes = bytes;
749     } else {
750         int64_t c = bdi.subcluster_size;
751         *align_offset = QEMU_ALIGN_DOWN(offset, c);
752         *align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c);
753     }
754 }
755 
756 static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs)
757 {
758     BlockDriverInfo bdi;
759     int ret;
760 
761     ret = bdrv_co_get_info(bs, &bdi);
762     if (ret < 0 || bdi.cluster_size == 0) {
763         return bs->bl.request_alignment;
764     } else {
765         return bdi.cluster_size;
766     }
767 }
768 
769 void bdrv_inc_in_flight(BlockDriverState *bs)
770 {
771     IO_CODE();
772     qatomic_inc(&bs->in_flight);
773 }
774 
775 void bdrv_wakeup(BlockDriverState *bs)
776 {
777     IO_CODE();
778     aio_wait_kick();
779 }
780 
781 void bdrv_dec_in_flight(BlockDriverState *bs)
782 {
783     IO_CODE();
784     qatomic_dec(&bs->in_flight);
785     bdrv_wakeup(bs);
786 }
787 
788 static void coroutine_fn
789 bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
790 {
791     BlockDriverState *bs = self->bs;
792 
793     if (!qatomic_read(&bs->serialising_in_flight)) {
794         return;
795     }
796 
797     qemu_mutex_lock(&bs->reqs_lock);
798     bdrv_wait_serialising_requests_locked(self);
799     qemu_mutex_unlock(&bs->reqs_lock);
800 }
801 
802 void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
803                                                 uint64_t align)
804 {
805     IO_CODE();
806 
807     qemu_mutex_lock(&req->bs->reqs_lock);
808 
809     tracked_request_set_serialising(req, align);
810     bdrv_wait_serialising_requests_locked(req);
811 
812     qemu_mutex_unlock(&req->bs->reqs_lock);
813 }
814 
815 int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
816                             QEMUIOVector *qiov, size_t qiov_offset,
817                             Error **errp)
818 {
819     /*
820      * Check generic offset/bytes correctness
821      */
822 
823     if (offset < 0) {
824         error_setg(errp, "offset is negative: %" PRIi64, offset);
825         return -EIO;
826     }
827 
828     if (bytes < 0) {
829         error_setg(errp, "bytes is negative: %" PRIi64, bytes);
830         return -EIO;
831     }
832 
833     if (bytes > BDRV_MAX_LENGTH) {
834         error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
835                    bytes, BDRV_MAX_LENGTH);
836         return -EIO;
837     }
838 
839     if (offset > BDRV_MAX_LENGTH) {
840         error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
841                    offset, BDRV_MAX_LENGTH);
842         return -EIO;
843     }
844 
845     if (offset > BDRV_MAX_LENGTH - bytes) {
846         error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
847                    "exceeds maximum(%" PRIi64 ")", offset, bytes,
848                    BDRV_MAX_LENGTH);
849         return -EIO;
850     }
851 
852     if (!qiov) {
853         return 0;
854     }
855 
856     /*
857      * Check qiov and qiov_offset
858      */
859 
860     if (qiov_offset > qiov->size) {
861         error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
862                    qiov_offset, qiov->size);
863         return -EIO;
864     }
865 
866     if (bytes > qiov->size - qiov_offset) {
867         error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
868                    "vector size(%zu)", bytes, qiov_offset, qiov->size);
869         return -EIO;
870     }
871 
872     return 0;
873 }
874 
875 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
876 {
877     return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
878 }
879 
880 static int bdrv_check_request32(int64_t offset, int64_t bytes,
881                                 QEMUIOVector *qiov, size_t qiov_offset)
882 {
883     int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
884     if (ret < 0) {
885         return ret;
886     }
887 
888     if (bytes > BDRV_REQUEST_MAX_BYTES) {
889         return -EIO;
890     }
891 
892     return 0;
893 }
894 
895 /*
896  * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
897  * The operation is sped up by checking the block status and only writing
898  * zeroes to the device if they currently do not return zeroes. Optional
899  * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
900  * BDRV_REQ_FUA).
901  *
902  * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
903  */
904 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
905 {
906     int ret;
907     int64_t target_size, bytes, offset = 0;
908     BlockDriverState *bs = child->bs;
909     IO_CODE();
910 
911     target_size = bdrv_getlength(bs);
912     if (target_size < 0) {
913         return target_size;
914     }
915 
916     for (;;) {
917         bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
918         if (bytes <= 0) {
919             return 0;
920         }
921         ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
922         if (ret < 0) {
923             return ret;
924         }
925         if (ret & BDRV_BLOCK_ZERO) {
926             offset += bytes;
927             continue;
928         }
929         ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
930         if (ret < 0) {
931             return ret;
932         }
933         offset += bytes;
934     }
935 }
936 
937 /*
938  * Writes to the file and ensures that no writes are reordered across this
939  * request (acts as a barrier)
940  *
941  * Returns 0 on success, -errno in error cases.
942  */
943 int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset,
944                                      int64_t bytes, const void *buf,
945                                      BdrvRequestFlags flags)
946 {
947     int ret;
948     IO_CODE();
949     assert_bdrv_graph_readable();
950 
951     ret = bdrv_co_pwrite(child, offset, bytes, buf, flags);
952     if (ret < 0) {
953         return ret;
954     }
955 
956     ret = bdrv_co_flush(child->bs);
957     if (ret < 0) {
958         return ret;
959     }
960 
961     return 0;
962 }
963 
964 typedef struct CoroutineIOCompletion {
965     Coroutine *coroutine;
966     int ret;
967 } CoroutineIOCompletion;
968 
969 static void bdrv_co_io_em_complete(void *opaque, int ret)
970 {
971     CoroutineIOCompletion *co = opaque;
972 
973     co->ret = ret;
974     aio_co_wake(co->coroutine);
975 }
976 
977 static int coroutine_fn GRAPH_RDLOCK
978 bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
979                    QEMUIOVector *qiov, size_t qiov_offset, int flags)
980 {
981     BlockDriver *drv = bs->drv;
982     int64_t sector_num;
983     unsigned int nb_sectors;
984     QEMUIOVector local_qiov;
985     int ret;
986     assert_bdrv_graph_readable();
987 
988     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
989     assert(!(flags & ~bs->supported_read_flags));
990 
991     if (!drv) {
992         return -ENOMEDIUM;
993     }
994 
995     if (drv->bdrv_co_preadv_part) {
996         return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
997                                         flags);
998     }
999 
1000     if (qiov_offset > 0 || bytes != qiov->size) {
1001         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1002         qiov = &local_qiov;
1003     }
1004 
1005     if (drv->bdrv_co_preadv) {
1006         ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1007         goto out;
1008     }
1009 
1010     if (drv->bdrv_aio_preadv) {
1011         BlockAIOCB *acb;
1012         CoroutineIOCompletion co = {
1013             .coroutine = qemu_coroutine_self(),
1014         };
1015 
1016         acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1017                                    bdrv_co_io_em_complete, &co);
1018         if (acb == NULL) {
1019             ret = -EIO;
1020             goto out;
1021         } else {
1022             qemu_coroutine_yield();
1023             ret = co.ret;
1024             goto out;
1025         }
1026     }
1027 
1028     sector_num = offset >> BDRV_SECTOR_BITS;
1029     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1030 
1031     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1032     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1033     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1034     assert(drv->bdrv_co_readv);
1035 
1036     ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1037 
1038 out:
1039     if (qiov == &local_qiov) {
1040         qemu_iovec_destroy(&local_qiov);
1041     }
1042 
1043     return ret;
1044 }
1045 
1046 static int coroutine_fn GRAPH_RDLOCK
1047 bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
1048                     QEMUIOVector *qiov, size_t qiov_offset,
1049                     BdrvRequestFlags flags)
1050 {
1051     BlockDriver *drv = bs->drv;
1052     bool emulate_fua = false;
1053     int64_t sector_num;
1054     unsigned int nb_sectors;
1055     QEMUIOVector local_qiov;
1056     int ret;
1057     assert_bdrv_graph_readable();
1058 
1059     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1060 
1061     if (!drv) {
1062         return -ENOMEDIUM;
1063     }
1064 
1065     if ((flags & BDRV_REQ_FUA) &&
1066         (~bs->supported_write_flags & BDRV_REQ_FUA)) {
1067         flags &= ~BDRV_REQ_FUA;
1068         emulate_fua = true;
1069     }
1070 
1071     flags &= bs->supported_write_flags;
1072 
1073     if (drv->bdrv_co_pwritev_part) {
1074         ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1075                                         flags);
1076         goto emulate_flags;
1077     }
1078 
1079     if (qiov_offset > 0 || bytes != qiov->size) {
1080         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1081         qiov = &local_qiov;
1082     }
1083 
1084     if (drv->bdrv_co_pwritev) {
1085         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
1086         goto emulate_flags;
1087     }
1088 
1089     if (drv->bdrv_aio_pwritev) {
1090         BlockAIOCB *acb;
1091         CoroutineIOCompletion co = {
1092             .coroutine = qemu_coroutine_self(),
1093         };
1094 
1095         acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags,
1096                                     bdrv_co_io_em_complete, &co);
1097         if (acb == NULL) {
1098             ret = -EIO;
1099         } else {
1100             qemu_coroutine_yield();
1101             ret = co.ret;
1102         }
1103         goto emulate_flags;
1104     }
1105 
1106     sector_num = offset >> BDRV_SECTOR_BITS;
1107     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1108 
1109     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1110     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1111     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1112 
1113     assert(drv->bdrv_co_writev);
1114     ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags);
1115 
1116 emulate_flags:
1117     if (ret == 0 && emulate_fua) {
1118         ret = bdrv_co_flush(bs);
1119     }
1120 
1121     if (qiov == &local_qiov) {
1122         qemu_iovec_destroy(&local_qiov);
1123     }
1124 
1125     return ret;
1126 }
1127 
1128 static int coroutine_fn GRAPH_RDLOCK
1129 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
1130                                int64_t bytes, QEMUIOVector *qiov,
1131                                size_t qiov_offset)
1132 {
1133     BlockDriver *drv = bs->drv;
1134     QEMUIOVector local_qiov;
1135     int ret;
1136     assert_bdrv_graph_readable();
1137 
1138     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1139 
1140     if (!drv) {
1141         return -ENOMEDIUM;
1142     }
1143 
1144     if (!block_driver_can_compress(drv)) {
1145         return -ENOTSUP;
1146     }
1147 
1148     if (drv->bdrv_co_pwritev_compressed_part) {
1149         return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1150                                                     qiov, qiov_offset);
1151     }
1152 
1153     if (qiov_offset == 0) {
1154         return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1155     }
1156 
1157     qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1158     ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1159     qemu_iovec_destroy(&local_qiov);
1160 
1161     return ret;
1162 }
1163 
1164 static int coroutine_fn GRAPH_RDLOCK
1165 bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
1166                          QEMUIOVector *qiov, size_t qiov_offset, int flags)
1167 {
1168     BlockDriverState *bs = child->bs;
1169 
1170     /* Perform I/O through a temporary buffer so that users who scribble over
1171      * their read buffer while the operation is in progress do not end up
1172      * modifying the image file.  This is critical for zero-copy guest I/O
1173      * where anything might happen inside guest memory.
1174      */
1175     void *bounce_buffer = NULL;
1176 
1177     BlockDriver *drv = bs->drv;
1178     int64_t align_offset;
1179     int64_t align_bytes;
1180     int64_t skip_bytes;
1181     int ret;
1182     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1183                                     BDRV_REQUEST_MAX_BYTES);
1184     int64_t progress = 0;
1185     bool skip_write;
1186 
1187     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1188 
1189     if (!drv) {
1190         return -ENOMEDIUM;
1191     }
1192 
1193     /*
1194      * Do not write anything when the BDS is inactive.  That is not
1195      * allowed, and it would not help.
1196      */
1197     skip_write = (bs->open_flags & BDRV_O_INACTIVE);
1198 
1199     /* FIXME We cannot require callers to have write permissions when all they
1200      * are doing is a read request. If we did things right, write permissions
1201      * would be obtained anyway, but internally by the copy-on-read code. As
1202      * long as it is implemented here rather than in a separate filter driver,
1203      * the copy-on-read code doesn't have its own BdrvChild, however, for which
1204      * it could request permissions. Therefore we have to bypass the permission
1205      * system for the moment. */
1206     // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1207 
1208     /* Cover entire cluster so no additional backing file I/O is required when
1209      * allocating cluster in the image file.  Note that this value may exceed
1210      * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1211      * is one reason we loop rather than doing it all at once.
1212      */
1213     bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes);
1214     skip_bytes = offset - align_offset;
1215 
1216     trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1217                                    align_offset, align_bytes);
1218 
1219     while (align_bytes) {
1220         int64_t pnum;
1221 
1222         if (skip_write) {
1223             ret = 1; /* "already allocated", so nothing will be copied */
1224             pnum = MIN(align_bytes, max_transfer);
1225         } else {
1226             ret = bdrv_co_is_allocated(bs, align_offset,
1227                                        MIN(align_bytes, max_transfer), &pnum);
1228             if (ret < 0) {
1229                 /*
1230                  * Safe to treat errors in querying allocation as if
1231                  * unallocated; we'll probably fail again soon on the
1232                  * read, but at least that will set a decent errno.
1233                  */
1234                 pnum = MIN(align_bytes, max_transfer);
1235             }
1236 
1237             /* Stop at EOF if the image ends in the middle of the cluster */
1238             if (ret == 0 && pnum == 0) {
1239                 assert(progress >= bytes);
1240                 break;
1241             }
1242 
1243             assert(skip_bytes < pnum);
1244         }
1245 
1246         if (ret <= 0) {
1247             QEMUIOVector local_qiov;
1248 
1249             /* Must copy-on-read; use the bounce buffer */
1250             pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1251             if (!bounce_buffer) {
1252                 int64_t max_we_need = MAX(pnum, align_bytes - pnum);
1253                 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
1254                 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
1255 
1256                 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
1257                 if (!bounce_buffer) {
1258                     ret = -ENOMEM;
1259                     goto err;
1260                 }
1261             }
1262             qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1263 
1264             ret = bdrv_driver_preadv(bs, align_offset, pnum,
1265                                      &local_qiov, 0, 0);
1266             if (ret < 0) {
1267                 goto err;
1268             }
1269 
1270             bdrv_co_debug_event(bs, BLKDBG_COR_WRITE);
1271             if (drv->bdrv_co_pwrite_zeroes &&
1272                 buffer_is_zero(bounce_buffer, pnum)) {
1273                 /* FIXME: Should we (perhaps conditionally) be setting
1274                  * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1275                  * that still correctly reads as zero? */
1276                 ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum,
1277                                                BDRV_REQ_WRITE_UNCHANGED);
1278             } else {
1279                 /* This does not change the data on the disk, it is not
1280                  * necessary to flush even in cache=writethrough mode.
1281                  */
1282                 ret = bdrv_driver_pwritev(bs, align_offset, pnum,
1283                                           &local_qiov, 0,
1284                                           BDRV_REQ_WRITE_UNCHANGED);
1285             }
1286 
1287             if (ret < 0) {
1288                 /* It might be okay to ignore write errors for guest
1289                  * requests.  If this is a deliberate copy-on-read
1290                  * then we don't want to ignore the error.  Simply
1291                  * report it in all cases.
1292                  */
1293                 goto err;
1294             }
1295 
1296             if (!(flags & BDRV_REQ_PREFETCH)) {
1297                 qemu_iovec_from_buf(qiov, qiov_offset + progress,
1298                                     bounce_buffer + skip_bytes,
1299                                     MIN(pnum - skip_bytes, bytes - progress));
1300             }
1301         } else if (!(flags & BDRV_REQ_PREFETCH)) {
1302             /* Read directly into the destination */
1303             ret = bdrv_driver_preadv(bs, offset + progress,
1304                                      MIN(pnum - skip_bytes, bytes - progress),
1305                                      qiov, qiov_offset + progress, 0);
1306             if (ret < 0) {
1307                 goto err;
1308             }
1309         }
1310 
1311         align_offset += pnum;
1312         align_bytes -= pnum;
1313         progress += pnum - skip_bytes;
1314         skip_bytes = 0;
1315     }
1316     ret = 0;
1317 
1318 err:
1319     qemu_vfree(bounce_buffer);
1320     return ret;
1321 }
1322 
1323 /*
1324  * Forwards an already correctly aligned request to the BlockDriver. This
1325  * handles copy on read, zeroing after EOF, and fragmentation of large
1326  * reads; any other features must be implemented by the caller.
1327  */
1328 static int coroutine_fn GRAPH_RDLOCK
1329 bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
1330                     int64_t offset, int64_t bytes, int64_t align,
1331                     QEMUIOVector *qiov, size_t qiov_offset, int flags)
1332 {
1333     BlockDriverState *bs = child->bs;
1334     int64_t total_bytes, max_bytes;
1335     int ret = 0;
1336     int64_t bytes_remaining = bytes;
1337     int max_transfer;
1338 
1339     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1340     assert(is_power_of_2(align));
1341     assert((offset & (align - 1)) == 0);
1342     assert((bytes & (align - 1)) == 0);
1343     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1344     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1345                                    align);
1346 
1347     /*
1348      * TODO: We would need a per-BDS .supported_read_flags and
1349      * potential fallback support, if we ever implement any read flags
1350      * to pass through to drivers.  For now, there aren't any
1351      * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1352      */
1353     assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH |
1354                        BDRV_REQ_REGISTERED_BUF)));
1355 
1356     /* Handle Copy on Read and associated serialisation */
1357     if (flags & BDRV_REQ_COPY_ON_READ) {
1358         /* If we touch the same cluster it counts as an overlap.  This
1359          * guarantees that allocating writes will be serialized and not race
1360          * with each other for the same cluster.  For example, in copy-on-read
1361          * it ensures that the CoR read and write operations are atomic and
1362          * guest writes cannot interleave between them. */
1363         bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
1364     } else {
1365         bdrv_wait_serialising_requests(req);
1366     }
1367 
1368     if (flags & BDRV_REQ_COPY_ON_READ) {
1369         int64_t pnum;
1370 
1371         /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1372         flags &= ~BDRV_REQ_COPY_ON_READ;
1373 
1374         ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum);
1375         if (ret < 0) {
1376             goto out;
1377         }
1378 
1379         if (!ret || pnum != bytes) {
1380             ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
1381                                            qiov, qiov_offset, flags);
1382             goto out;
1383         } else if (flags & BDRV_REQ_PREFETCH) {
1384             goto out;
1385         }
1386     }
1387 
1388     /* Forward the request to the BlockDriver, possibly fragmenting it */
1389     total_bytes = bdrv_co_getlength(bs);
1390     if (total_bytes < 0) {
1391         ret = total_bytes;
1392         goto out;
1393     }
1394 
1395     assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF)));
1396 
1397     max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1398     if (bytes <= max_bytes && bytes <= max_transfer) {
1399         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
1400         goto out;
1401     }
1402 
1403     while (bytes_remaining) {
1404         int64_t num;
1405 
1406         if (max_bytes) {
1407             num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1408             assert(num);
1409 
1410             ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1411                                      num, qiov,
1412                                      qiov_offset + bytes - bytes_remaining,
1413                                      flags);
1414             max_bytes -= num;
1415         } else {
1416             num = bytes_remaining;
1417             ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1418                                     0, bytes_remaining);
1419         }
1420         if (ret < 0) {
1421             goto out;
1422         }
1423         bytes_remaining -= num;
1424     }
1425 
1426 out:
1427     return ret < 0 ? ret : 0;
1428 }
1429 
1430 /*
1431  * Request padding
1432  *
1433  *  |<---- align ----->|                     |<----- align ---->|
1434  *  |<- head ->|<------------- bytes ------------->|<-- tail -->|
1435  *  |          |       |                     |     |            |
1436  * -*----------$-------*-------- ... --------*-----$------------*---
1437  *  |          |       |                     |     |            |
1438  *  |          offset  |                     |     end          |
1439  *  ALIGN_DOWN(offset) ALIGN_UP(offset)      ALIGN_DOWN(end)   ALIGN_UP(end)
1440  *  [buf   ... )                             [tail_buf          )
1441  *
1442  * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1443  * is placed at the beginning of @buf and @tail at the @end.
1444  *
1445  * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1446  * around tail, if tail exists.
1447  *
1448  * @merge_reads is true for small requests,
1449  * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1450  * head and tail exist but @buf_len == align and @tail_buf == @buf.
1451  *
1452  * @write is true for write requests, false for read requests.
1453  *
1454  * If padding makes the vector too long (exceeding IOV_MAX), then we need to
1455  * merge existing vector elements into a single one.  @collapse_bounce_buf acts
1456  * as the bounce buffer in such cases.  @pre_collapse_qiov has the pre-collapse
1457  * I/O vector elements so for read requests, the data can be copied back after
1458  * the read is done.
1459  */
1460 typedef struct BdrvRequestPadding {
1461     uint8_t *buf;
1462     size_t buf_len;
1463     uint8_t *tail_buf;
1464     size_t head;
1465     size_t tail;
1466     bool merge_reads;
1467     bool write;
1468     QEMUIOVector local_qiov;
1469 
1470     uint8_t *collapse_bounce_buf;
1471     size_t collapse_len;
1472     QEMUIOVector pre_collapse_qiov;
1473 } BdrvRequestPadding;
1474 
1475 static bool bdrv_init_padding(BlockDriverState *bs,
1476                               int64_t offset, int64_t bytes,
1477                               bool write,
1478                               BdrvRequestPadding *pad)
1479 {
1480     int64_t align = bs->bl.request_alignment;
1481     int64_t sum;
1482 
1483     bdrv_check_request(offset, bytes, &error_abort);
1484     assert(align <= INT_MAX); /* documented in block/block_int.h */
1485     assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
1486 
1487     memset(pad, 0, sizeof(*pad));
1488 
1489     pad->head = offset & (align - 1);
1490     pad->tail = ((offset + bytes) & (align - 1));
1491     if (pad->tail) {
1492         pad->tail = align - pad->tail;
1493     }
1494 
1495     if (!pad->head && !pad->tail) {
1496         return false;
1497     }
1498 
1499     assert(bytes); /* Nothing good in aligning zero-length requests */
1500 
1501     sum = pad->head + bytes + pad->tail;
1502     pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
1503     pad->buf = qemu_blockalign(bs, pad->buf_len);
1504     pad->merge_reads = sum == pad->buf_len;
1505     if (pad->tail) {
1506         pad->tail_buf = pad->buf + pad->buf_len - align;
1507     }
1508 
1509     pad->write = write;
1510 
1511     return true;
1512 }
1513 
1514 static int coroutine_fn GRAPH_RDLOCK
1515 bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req,
1516                       BdrvRequestPadding *pad, bool zero_middle)
1517 {
1518     QEMUIOVector local_qiov;
1519     BlockDriverState *bs = child->bs;
1520     uint64_t align = bs->bl.request_alignment;
1521     int ret;
1522 
1523     assert(req->serialising && pad->buf);
1524 
1525     if (pad->head || pad->merge_reads) {
1526         int64_t bytes = pad->merge_reads ? pad->buf_len : align;
1527 
1528         qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
1529 
1530         if (pad->head) {
1531             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1532         }
1533         if (pad->merge_reads && pad->tail) {
1534             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1535         }
1536         ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
1537                                   align, &local_qiov, 0, 0);
1538         if (ret < 0) {
1539             return ret;
1540         }
1541         if (pad->head) {
1542             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1543         }
1544         if (pad->merge_reads && pad->tail) {
1545             bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1546         }
1547 
1548         if (pad->merge_reads) {
1549             goto zero_mem;
1550         }
1551     }
1552 
1553     if (pad->tail) {
1554         qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
1555 
1556         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1557         ret = bdrv_aligned_preadv(
1558                 child, req,
1559                 req->overlap_offset + req->overlap_bytes - align,
1560                 align, align, &local_qiov, 0, 0);
1561         if (ret < 0) {
1562             return ret;
1563         }
1564         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1565     }
1566 
1567 zero_mem:
1568     if (zero_middle) {
1569         memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
1570     }
1571 
1572     return 0;
1573 }
1574 
1575 /**
1576  * Free *pad's associated buffers, and perform any necessary finalization steps.
1577  */
1578 static void bdrv_padding_finalize(BdrvRequestPadding *pad)
1579 {
1580     if (pad->collapse_bounce_buf) {
1581         if (!pad->write) {
1582             /*
1583              * If padding required elements in the vector to be collapsed into a
1584              * bounce buffer, copy the bounce buffer content back
1585              */
1586             qemu_iovec_from_buf(&pad->pre_collapse_qiov, 0,
1587                                 pad->collapse_bounce_buf, pad->collapse_len);
1588         }
1589         qemu_vfree(pad->collapse_bounce_buf);
1590         qemu_iovec_destroy(&pad->pre_collapse_qiov);
1591     }
1592     if (pad->buf) {
1593         qemu_vfree(pad->buf);
1594         qemu_iovec_destroy(&pad->local_qiov);
1595     }
1596     memset(pad, 0, sizeof(*pad));
1597 }
1598 
1599 /*
1600  * Create pad->local_qiov by wrapping @iov in the padding head and tail, while
1601  * ensuring that the resulting vector will not exceed IOV_MAX elements.
1602  *
1603  * To ensure this, when necessary, the first two or three elements of @iov are
1604  * merged into pad->collapse_bounce_buf and replaced by a reference to that
1605  * bounce buffer in pad->local_qiov.
1606  *
1607  * After performing a read request, the data from the bounce buffer must be
1608  * copied back into pad->pre_collapse_qiov (e.g. by bdrv_padding_finalize()).
1609  */
1610 static int bdrv_create_padded_qiov(BlockDriverState *bs,
1611                                    BdrvRequestPadding *pad,
1612                                    struct iovec *iov, int niov,
1613                                    size_t iov_offset, size_t bytes)
1614 {
1615     int padded_niov, surplus_count, collapse_count;
1616 
1617     /* Assert this invariant */
1618     assert(niov <= IOV_MAX);
1619 
1620     /*
1621      * Cannot pad if resulting length would exceed SIZE_MAX.  Returning an error
1622      * to the guest is not ideal, but there is little else we can do.  At least
1623      * this will practically never happen on 64-bit systems.
1624      */
1625     if (SIZE_MAX - pad->head < bytes ||
1626         SIZE_MAX - pad->head - bytes < pad->tail)
1627     {
1628         return -EINVAL;
1629     }
1630 
1631     /* Length of the resulting IOV if we just concatenated everything */
1632     padded_niov = !!pad->head + niov + !!pad->tail;
1633 
1634     qemu_iovec_init(&pad->local_qiov, MIN(padded_niov, IOV_MAX));
1635 
1636     if (pad->head) {
1637         qemu_iovec_add(&pad->local_qiov, pad->buf, pad->head);
1638     }
1639 
1640     /*
1641      * If padded_niov > IOV_MAX, we cannot just concatenate everything.
1642      * Instead, merge the first two or three elements of @iov to reduce the
1643      * number of vector elements as necessary.
1644      */
1645     if (padded_niov > IOV_MAX) {
1646         /*
1647          * Only head and tail can have lead to the number of entries exceeding
1648          * IOV_MAX, so we can exceed it by the head and tail at most.  We need
1649          * to reduce the number of elements by `surplus_count`, so we merge that
1650          * many elements plus one into one element.
1651          */
1652         surplus_count = padded_niov - IOV_MAX;
1653         assert(surplus_count <= !!pad->head + !!pad->tail);
1654         collapse_count = surplus_count + 1;
1655 
1656         /*
1657          * Move the elements to collapse into `pad->pre_collapse_qiov`, then
1658          * advance `iov` (and associated variables) by those elements.
1659          */
1660         qemu_iovec_init(&pad->pre_collapse_qiov, collapse_count);
1661         qemu_iovec_concat_iov(&pad->pre_collapse_qiov, iov,
1662                               collapse_count, iov_offset, SIZE_MAX);
1663         iov += collapse_count;
1664         iov_offset = 0;
1665         niov -= collapse_count;
1666         bytes -= pad->pre_collapse_qiov.size;
1667 
1668         /*
1669          * Construct the bounce buffer to match the length of the to-collapse
1670          * vector elements, and for write requests, initialize it with the data
1671          * from those elements.  Then add it to `pad->local_qiov`.
1672          */
1673         pad->collapse_len = pad->pre_collapse_qiov.size;
1674         pad->collapse_bounce_buf = qemu_blockalign(bs, pad->collapse_len);
1675         if (pad->write) {
1676             qemu_iovec_to_buf(&pad->pre_collapse_qiov, 0,
1677                               pad->collapse_bounce_buf, pad->collapse_len);
1678         }
1679         qemu_iovec_add(&pad->local_qiov,
1680                        pad->collapse_bounce_buf, pad->collapse_len);
1681     }
1682 
1683     qemu_iovec_concat_iov(&pad->local_qiov, iov, niov, iov_offset, bytes);
1684 
1685     if (pad->tail) {
1686         qemu_iovec_add(&pad->local_qiov,
1687                        pad->buf + pad->buf_len - pad->tail, pad->tail);
1688     }
1689 
1690     assert(pad->local_qiov.niov == MIN(padded_niov, IOV_MAX));
1691     return 0;
1692 }
1693 
1694 /*
1695  * bdrv_pad_request
1696  *
1697  * Exchange request parameters with padded request if needed. Don't include RMW
1698  * read of padding, bdrv_padding_rmw_read() should be called separately if
1699  * needed.
1700  *
1701  * @write is true for write requests, false for read requests.
1702  *
1703  * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1704  *  - on function start they represent original request
1705  *  - on failure or when padding is not needed they are unchanged
1706  *  - on success when padding is needed they represent padded request
1707  */
1708 static int bdrv_pad_request(BlockDriverState *bs,
1709                             QEMUIOVector **qiov, size_t *qiov_offset,
1710                             int64_t *offset, int64_t *bytes,
1711                             bool write,
1712                             BdrvRequestPadding *pad, bool *padded,
1713                             BdrvRequestFlags *flags)
1714 {
1715     int ret;
1716     struct iovec *sliced_iov;
1717     int sliced_niov;
1718     size_t sliced_head, sliced_tail;
1719 
1720     /* Should have been checked by the caller already */
1721     ret = bdrv_check_request32(*offset, *bytes, *qiov, *qiov_offset);
1722     if (ret < 0) {
1723         return ret;
1724     }
1725 
1726     if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) {
1727         if (padded) {
1728             *padded = false;
1729         }
1730         return 0;
1731     }
1732 
1733     sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes,
1734                                   &sliced_head, &sliced_tail,
1735                                   &sliced_niov);
1736 
1737     /* Guaranteed by bdrv_check_request32() */
1738     assert(*bytes <= SIZE_MAX);
1739     ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov,
1740                                   sliced_head, *bytes);
1741     if (ret < 0) {
1742         bdrv_padding_finalize(pad);
1743         return ret;
1744     }
1745     *bytes += pad->head + pad->tail;
1746     *offset -= pad->head;
1747     *qiov = &pad->local_qiov;
1748     *qiov_offset = 0;
1749     if (padded) {
1750         *padded = true;
1751     }
1752     if (flags) {
1753         /* Can't use optimization hint with bounce buffer */
1754         *flags &= ~BDRV_REQ_REGISTERED_BUF;
1755     }
1756 
1757     return 0;
1758 }
1759 
1760 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1761     int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1762     BdrvRequestFlags flags)
1763 {
1764     IO_CODE();
1765     return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
1766 }
1767 
1768 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
1769     int64_t offset, int64_t bytes,
1770     QEMUIOVector *qiov, size_t qiov_offset,
1771     BdrvRequestFlags flags)
1772 {
1773     BlockDriverState *bs = child->bs;
1774     BdrvTrackedRequest req;
1775     BdrvRequestPadding pad;
1776     int ret;
1777     IO_CODE();
1778 
1779     trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
1780 
1781     if (!bdrv_co_is_inserted(bs)) {
1782         return -ENOMEDIUM;
1783     }
1784 
1785     ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
1786     if (ret < 0) {
1787         return ret;
1788     }
1789 
1790     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1791         /*
1792          * Aligning zero request is nonsense. Even if driver has special meaning
1793          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1794          * it to driver due to request_alignment.
1795          *
1796          * Still, no reason to return an error if someone do unaligned
1797          * zero-length read occasionally.
1798          */
1799         return 0;
1800     }
1801 
1802     bdrv_inc_in_flight(bs);
1803 
1804     /* Don't do copy-on-read if we read data before write operation */
1805     if (qatomic_read(&bs->copy_on_read)) {
1806         flags |= BDRV_REQ_COPY_ON_READ;
1807     }
1808 
1809     ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, false,
1810                            &pad, NULL, &flags);
1811     if (ret < 0) {
1812         goto fail;
1813     }
1814 
1815     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1816     ret = bdrv_aligned_preadv(child, &req, offset, bytes,
1817                               bs->bl.request_alignment,
1818                               qiov, qiov_offset, flags);
1819     tracked_request_end(&req);
1820     bdrv_padding_finalize(&pad);
1821 
1822 fail:
1823     bdrv_dec_in_flight(bs);
1824 
1825     return ret;
1826 }
1827 
1828 static int coroutine_fn GRAPH_RDLOCK
1829 bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1830                          BdrvRequestFlags flags)
1831 {
1832     BlockDriver *drv = bs->drv;
1833     QEMUIOVector qiov;
1834     void *buf = NULL;
1835     int ret = 0;
1836     bool need_flush = false;
1837     int head = 0;
1838     int tail = 0;
1839 
1840     int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
1841                                             INT64_MAX);
1842     int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1843                         bs->bl.request_alignment);
1844     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1845 
1846     assert_bdrv_graph_readable();
1847     bdrv_check_request(offset, bytes, &error_abort);
1848 
1849     if (!drv) {
1850         return -ENOMEDIUM;
1851     }
1852 
1853     if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1854         return -ENOTSUP;
1855     }
1856 
1857     /* By definition there is no user buffer so this flag doesn't make sense */
1858     if (flags & BDRV_REQ_REGISTERED_BUF) {
1859         return -EINVAL;
1860     }
1861 
1862     /* Invalidate the cached block-status data range if this write overlaps */
1863     bdrv_bsc_invalidate_range(bs, offset, bytes);
1864 
1865     assert(alignment % bs->bl.request_alignment == 0);
1866     head = offset % alignment;
1867     tail = (offset + bytes) % alignment;
1868     max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1869     assert(max_write_zeroes >= bs->bl.request_alignment);
1870 
1871     while (bytes > 0 && !ret) {
1872         int64_t num = bytes;
1873 
1874         /* Align request.  Block drivers can expect the "bulk" of the request
1875          * to be aligned, and that unaligned requests do not cross cluster
1876          * boundaries.
1877          */
1878         if (head) {
1879             /* Make a small request up to the first aligned sector. For
1880              * convenience, limit this request to max_transfer even if
1881              * we don't need to fall back to writes.  */
1882             num = MIN(MIN(bytes, max_transfer), alignment - head);
1883             head = (head + num) % alignment;
1884             assert(num < max_write_zeroes);
1885         } else if (tail && num > alignment) {
1886             /* Shorten the request to the last aligned sector.  */
1887             num -= tail;
1888         }
1889 
1890         /* limit request size */
1891         if (num > max_write_zeroes) {
1892             num = max_write_zeroes;
1893         }
1894 
1895         ret = -ENOTSUP;
1896         /* First try the efficient write zeroes operation */
1897         if (drv->bdrv_co_pwrite_zeroes) {
1898             ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1899                                              flags & bs->supported_zero_flags);
1900             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1901                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1902                 need_flush = true;
1903             }
1904         } else {
1905             assert(!bs->supported_zero_flags);
1906         }
1907 
1908         if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
1909             /* Fall back to bounce buffer if write zeroes is unsupported */
1910             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1911 
1912             if ((flags & BDRV_REQ_FUA) &&
1913                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1914                 /* No need for bdrv_driver_pwrite() to do a fallback
1915                  * flush on each chunk; use just one at the end */
1916                 write_flags &= ~BDRV_REQ_FUA;
1917                 need_flush = true;
1918             }
1919             num = MIN(num, max_transfer);
1920             if (buf == NULL) {
1921                 buf = qemu_try_blockalign0(bs, num);
1922                 if (buf == NULL) {
1923                     ret = -ENOMEM;
1924                     goto fail;
1925                 }
1926             }
1927             qemu_iovec_init_buf(&qiov, buf, num);
1928 
1929             ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
1930 
1931             /* Keep bounce buffer around if it is big enough for all
1932              * all future requests.
1933              */
1934             if (num < max_transfer) {
1935                 qemu_vfree(buf);
1936                 buf = NULL;
1937             }
1938         }
1939 
1940         offset += num;
1941         bytes -= num;
1942     }
1943 
1944 fail:
1945     if (ret == 0 && need_flush) {
1946         ret = bdrv_co_flush(bs);
1947     }
1948     qemu_vfree(buf);
1949     return ret;
1950 }
1951 
1952 static inline int coroutine_fn GRAPH_RDLOCK
1953 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
1954                           BdrvTrackedRequest *req, int flags)
1955 {
1956     BlockDriverState *bs = child->bs;
1957 
1958     bdrv_check_request(offset, bytes, &error_abort);
1959 
1960     if (bdrv_is_read_only(bs)) {
1961         return -EPERM;
1962     }
1963 
1964     assert(!(bs->open_flags & BDRV_O_INACTIVE));
1965     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1966     assert(!(flags & ~BDRV_REQ_MASK));
1967     assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
1968 
1969     if (flags & BDRV_REQ_SERIALISING) {
1970         QEMU_LOCK_GUARD(&bs->reqs_lock);
1971 
1972         tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1973 
1974         if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1975             return -EBUSY;
1976         }
1977 
1978         bdrv_wait_serialising_requests_locked(req);
1979     } else {
1980         bdrv_wait_serialising_requests(req);
1981     }
1982 
1983     assert(req->overlap_offset <= offset);
1984     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1985     assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
1986            child->perm & BLK_PERM_RESIZE);
1987 
1988     switch (req->type) {
1989     case BDRV_TRACKED_WRITE:
1990     case BDRV_TRACKED_DISCARD:
1991         if (flags & BDRV_REQ_WRITE_UNCHANGED) {
1992             assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1993         } else {
1994             assert(child->perm & BLK_PERM_WRITE);
1995         }
1996         bdrv_write_threshold_check_write(bs, offset, bytes);
1997         return 0;
1998     case BDRV_TRACKED_TRUNCATE:
1999         assert(child->perm & BLK_PERM_RESIZE);
2000         return 0;
2001     default:
2002         abort();
2003     }
2004 }
2005 
2006 static inline void coroutine_fn
2007 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
2008                          BdrvTrackedRequest *req, int ret)
2009 {
2010     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
2011     BlockDriverState *bs = child->bs;
2012 
2013     bdrv_check_request(offset, bytes, &error_abort);
2014 
2015     qatomic_inc(&bs->write_gen);
2016 
2017     /*
2018      * Discard cannot extend the image, but in error handling cases, such as
2019      * when reverting a qcow2 cluster allocation, the discarded range can pass
2020      * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
2021      * here. Instead, just skip it, since semantically a discard request
2022      * beyond EOF cannot expand the image anyway.
2023      */
2024     if (ret == 0 &&
2025         (req->type == BDRV_TRACKED_TRUNCATE ||
2026          end_sector > bs->total_sectors) &&
2027         req->type != BDRV_TRACKED_DISCARD) {
2028         bs->total_sectors = end_sector;
2029         bdrv_parent_cb_resize(bs);
2030         bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
2031     }
2032     if (req->bytes) {
2033         switch (req->type) {
2034         case BDRV_TRACKED_WRITE:
2035             stat64_max(&bs->wr_highest_offset, offset + bytes);
2036             /* fall through, to set dirty bits */
2037         case BDRV_TRACKED_DISCARD:
2038             bdrv_set_dirty(bs, offset, bytes);
2039             break;
2040         default:
2041             break;
2042         }
2043     }
2044 }
2045 
2046 /*
2047  * Forwards an already correctly aligned write request to the BlockDriver,
2048  * after possibly fragmenting it.
2049  */
2050 static int coroutine_fn GRAPH_RDLOCK
2051 bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req,
2052                      int64_t offset, int64_t bytes, int64_t align,
2053                      QEMUIOVector *qiov, size_t qiov_offset,
2054                      BdrvRequestFlags flags)
2055 {
2056     BlockDriverState *bs = child->bs;
2057     BlockDriver *drv = bs->drv;
2058     int ret;
2059 
2060     int64_t bytes_remaining = bytes;
2061     int max_transfer;
2062 
2063     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
2064 
2065     if (!drv) {
2066         return -ENOMEDIUM;
2067     }
2068 
2069     if (bdrv_has_readonly_bitmaps(bs)) {
2070         return -EPERM;
2071     }
2072 
2073     assert(is_power_of_2(align));
2074     assert((offset & (align - 1)) == 0);
2075     assert((bytes & (align - 1)) == 0);
2076     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
2077                                    align);
2078 
2079     ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
2080 
2081     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
2082         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
2083         qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
2084         flags |= BDRV_REQ_ZERO_WRITE;
2085         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
2086             flags |= BDRV_REQ_MAY_UNMAP;
2087         }
2088 
2089         /* Can't use optimization hint with bufferless zero write */
2090         flags &= ~BDRV_REQ_REGISTERED_BUF;
2091     }
2092 
2093     if (ret < 0) {
2094         /* Do nothing, write notifier decided to fail this request */
2095     } else if (flags & BDRV_REQ_ZERO_WRITE) {
2096         bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO);
2097         ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
2098     } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
2099         ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
2100                                              qiov, qiov_offset);
2101     } else if (bytes <= max_transfer) {
2102         bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
2103         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
2104     } else {
2105         bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
2106         while (bytes_remaining) {
2107             int num = MIN(bytes_remaining, max_transfer);
2108             int local_flags = flags;
2109 
2110             assert(num);
2111             if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
2112                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
2113                 /* If FUA is going to be emulated by flush, we only
2114                  * need to flush on the last iteration */
2115                 local_flags &= ~BDRV_REQ_FUA;
2116             }
2117 
2118             ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
2119                                       num, qiov,
2120                                       qiov_offset + bytes - bytes_remaining,
2121                                       local_flags);
2122             if (ret < 0) {
2123                 break;
2124             }
2125             bytes_remaining -= num;
2126         }
2127     }
2128     bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE);
2129 
2130     if (ret >= 0) {
2131         ret = 0;
2132     }
2133     bdrv_co_write_req_finish(child, offset, bytes, req, ret);
2134 
2135     return ret;
2136 }
2137 
2138 static int coroutine_fn GRAPH_RDLOCK
2139 bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes,
2140                         BdrvRequestFlags flags, BdrvTrackedRequest *req)
2141 {
2142     BlockDriverState *bs = child->bs;
2143     QEMUIOVector local_qiov;
2144     uint64_t align = bs->bl.request_alignment;
2145     int ret = 0;
2146     bool padding;
2147     BdrvRequestPadding pad;
2148 
2149     /* This flag doesn't make sense for padding or zero writes */
2150     flags &= ~BDRV_REQ_REGISTERED_BUF;
2151 
2152     padding = bdrv_init_padding(bs, offset, bytes, true, &pad);
2153     if (padding) {
2154         assert(!(flags & BDRV_REQ_NO_WAIT));
2155         bdrv_make_request_serialising(req, align);
2156 
2157         bdrv_padding_rmw_read(child, req, &pad, true);
2158 
2159         if (pad.head || pad.merge_reads) {
2160             int64_t aligned_offset = offset & ~(align - 1);
2161             int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
2162 
2163             qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
2164             ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
2165                                        align, &local_qiov, 0,
2166                                        flags & ~BDRV_REQ_ZERO_WRITE);
2167             if (ret < 0 || pad.merge_reads) {
2168                 /* Error or all work is done */
2169                 goto out;
2170             }
2171             offset += write_bytes - pad.head;
2172             bytes -= write_bytes - pad.head;
2173         }
2174     }
2175 
2176     assert(!bytes || (offset & (align - 1)) == 0);
2177     if (bytes >= align) {
2178         /* Write the aligned part in the middle. */
2179         int64_t aligned_bytes = bytes & ~(align - 1);
2180         ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
2181                                    NULL, 0, flags);
2182         if (ret < 0) {
2183             goto out;
2184         }
2185         bytes -= aligned_bytes;
2186         offset += aligned_bytes;
2187     }
2188 
2189     assert(!bytes || (offset & (align - 1)) == 0);
2190     if (bytes) {
2191         assert(align == pad.tail + bytes);
2192 
2193         qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
2194         ret = bdrv_aligned_pwritev(child, req, offset, align, align,
2195                                    &local_qiov, 0,
2196                                    flags & ~BDRV_REQ_ZERO_WRITE);
2197     }
2198 
2199 out:
2200     bdrv_padding_finalize(&pad);
2201 
2202     return ret;
2203 }
2204 
2205 /*
2206  * Handle a write request in coroutine context
2207  */
2208 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
2209     int64_t offset, int64_t bytes, QEMUIOVector *qiov,
2210     BdrvRequestFlags flags)
2211 {
2212     IO_CODE();
2213     return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
2214 }
2215 
2216 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
2217     int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
2218     BdrvRequestFlags flags)
2219 {
2220     BlockDriverState *bs = child->bs;
2221     BdrvTrackedRequest req;
2222     uint64_t align = bs->bl.request_alignment;
2223     BdrvRequestPadding pad;
2224     int ret;
2225     bool padded = false;
2226     IO_CODE();
2227 
2228     trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
2229 
2230     if (!bdrv_co_is_inserted(bs)) {
2231         return -ENOMEDIUM;
2232     }
2233 
2234     if (flags & BDRV_REQ_ZERO_WRITE) {
2235         ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
2236     } else {
2237         ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
2238     }
2239     if (ret < 0) {
2240         return ret;
2241     }
2242 
2243     /* If the request is misaligned then we can't make it efficient */
2244     if ((flags & BDRV_REQ_NO_FALLBACK) &&
2245         !QEMU_IS_ALIGNED(offset | bytes, align))
2246     {
2247         return -ENOTSUP;
2248     }
2249 
2250     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2251         /*
2252          * Aligning zero request is nonsense. Even if driver has special meaning
2253          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2254          * it to driver due to request_alignment.
2255          *
2256          * Still, no reason to return an error if someone do unaligned
2257          * zero-length write occasionally.
2258          */
2259         return 0;
2260     }
2261 
2262     if (!(flags & BDRV_REQ_ZERO_WRITE)) {
2263         /*
2264          * Pad request for following read-modify-write cycle.
2265          * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2266          * alignment only if there is no ZERO flag.
2267          */
2268         ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, true,
2269                                &pad, &padded, &flags);
2270         if (ret < 0) {
2271             return ret;
2272         }
2273     }
2274 
2275     bdrv_inc_in_flight(bs);
2276     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
2277 
2278     if (flags & BDRV_REQ_ZERO_WRITE) {
2279         assert(!padded);
2280         ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
2281         goto out;
2282     }
2283 
2284     if (padded) {
2285         /*
2286          * Request was unaligned to request_alignment and therefore
2287          * padded.  We are going to do read-modify-write, and must
2288          * serialize the request to prevent interactions of the
2289          * widened region with other transactions.
2290          */
2291         assert(!(flags & BDRV_REQ_NO_WAIT));
2292         bdrv_make_request_serialising(&req, align);
2293         bdrv_padding_rmw_read(child, &req, &pad, false);
2294     }
2295 
2296     ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
2297                                qiov, qiov_offset, flags);
2298 
2299     bdrv_padding_finalize(&pad);
2300 
2301 out:
2302     tracked_request_end(&req);
2303     bdrv_dec_in_flight(bs);
2304 
2305     return ret;
2306 }
2307 
2308 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2309                                        int64_t bytes, BdrvRequestFlags flags)
2310 {
2311     IO_CODE();
2312     trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
2313     assert_bdrv_graph_readable();
2314 
2315     if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
2316         flags &= ~BDRV_REQ_MAY_UNMAP;
2317     }
2318 
2319     return bdrv_co_pwritev(child, offset, bytes, NULL,
2320                            BDRV_REQ_ZERO_WRITE | flags);
2321 }
2322 
2323 /*
2324  * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2325  */
2326 int bdrv_flush_all(void)
2327 {
2328     BdrvNextIterator it;
2329     BlockDriverState *bs = NULL;
2330     int result = 0;
2331 
2332     GLOBAL_STATE_CODE();
2333     GRAPH_RDLOCK_GUARD_MAINLOOP();
2334 
2335     /*
2336      * bdrv queue is managed by record/replay,
2337      * creating new flush request for stopping
2338      * the VM may break the determinism
2339      */
2340     if (replay_events_enabled()) {
2341         return result;
2342     }
2343 
2344     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2345         AioContext *aio_context = bdrv_get_aio_context(bs);
2346         int ret;
2347 
2348         aio_context_acquire(aio_context);
2349         ret = bdrv_flush(bs);
2350         if (ret < 0 && !result) {
2351             result = ret;
2352         }
2353         aio_context_release(aio_context);
2354     }
2355 
2356     return result;
2357 }
2358 
2359 /*
2360  * Returns the allocation status of the specified sectors.
2361  * Drivers not implementing the functionality are assumed to not support
2362  * backing files, hence all their sectors are reported as allocated.
2363  *
2364  * If 'want_zero' is true, the caller is querying for mapping
2365  * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2366  * _ZERO where possible; otherwise, the result favors larger 'pnum',
2367  * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2368  *
2369  * If 'offset' is beyond the end of the disk image the return value is
2370  * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2371  *
2372  * 'bytes' is the max value 'pnum' should be set to.  If bytes goes
2373  * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2374  * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2375  *
2376  * 'pnum' is set to the number of bytes (including and immediately
2377  * following the specified offset) that are easily known to be in the
2378  * same allocated/unallocated state.  Note that a second call starting
2379  * at the original offset plus returned pnum may have the same status.
2380  * The returned value is non-zero on success except at end-of-file.
2381  *
2382  * Returns negative errno on failure.  Otherwise, if the
2383  * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2384  * set to the host mapping and BDS corresponding to the guest offset.
2385  */
2386 static int coroutine_fn GRAPH_RDLOCK
2387 bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
2388                         int64_t offset, int64_t bytes,
2389                         int64_t *pnum, int64_t *map, BlockDriverState **file)
2390 {
2391     int64_t total_size;
2392     int64_t n; /* bytes */
2393     int ret;
2394     int64_t local_map = 0;
2395     BlockDriverState *local_file = NULL;
2396     int64_t aligned_offset, aligned_bytes;
2397     uint32_t align;
2398     bool has_filtered_child;
2399 
2400     assert(pnum);
2401     assert_bdrv_graph_readable();
2402     *pnum = 0;
2403     total_size = bdrv_co_getlength(bs);
2404     if (total_size < 0) {
2405         ret = total_size;
2406         goto early_out;
2407     }
2408 
2409     if (offset >= total_size) {
2410         ret = BDRV_BLOCK_EOF;
2411         goto early_out;
2412     }
2413     if (!bytes) {
2414         ret = 0;
2415         goto early_out;
2416     }
2417 
2418     n = total_size - offset;
2419     if (n < bytes) {
2420         bytes = n;
2421     }
2422 
2423     /* Must be non-NULL or bdrv_co_getlength() would have failed */
2424     assert(bs->drv);
2425     has_filtered_child = bdrv_filter_child(bs);
2426     if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
2427         *pnum = bytes;
2428         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2429         if (offset + bytes == total_size) {
2430             ret |= BDRV_BLOCK_EOF;
2431         }
2432         if (bs->drv->protocol_name) {
2433             ret |= BDRV_BLOCK_OFFSET_VALID;
2434             local_map = offset;
2435             local_file = bs;
2436         }
2437         goto early_out;
2438     }
2439 
2440     bdrv_inc_in_flight(bs);
2441 
2442     /* Round out to request_alignment boundaries */
2443     align = bs->bl.request_alignment;
2444     aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2445     aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2446 
2447     if (bs->drv->bdrv_co_block_status) {
2448         /*
2449          * Use the block-status cache only for protocol nodes: Format
2450          * drivers are generally quick to inquire the status, but protocol
2451          * drivers often need to get information from outside of qemu, so
2452          * we do not have control over the actual implementation.  There
2453          * have been cases where inquiring the status took an unreasonably
2454          * long time, and we can do nothing in qemu to fix it.
2455          * This is especially problematic for images with large data areas,
2456          * because finding the few holes in them and giving them special
2457          * treatment does not gain much performance.  Therefore, we try to
2458          * cache the last-identified data region.
2459          *
2460          * Second, limiting ourselves to protocol nodes allows us to assume
2461          * the block status for data regions to be DATA | OFFSET_VALID, and
2462          * that the host offset is the same as the guest offset.
2463          *
2464          * Note that it is possible that external writers zero parts of
2465          * the cached regions without the cache being invalidated, and so
2466          * we may report zeroes as data.  This is not catastrophic,
2467          * however, because reporting zeroes as data is fine.
2468          */
2469         if (QLIST_EMPTY(&bs->children) &&
2470             bdrv_bsc_is_data(bs, aligned_offset, pnum))
2471         {
2472             ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
2473             local_file = bs;
2474             local_map = aligned_offset;
2475         } else {
2476             ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2477                                                 aligned_bytes, pnum, &local_map,
2478                                                 &local_file);
2479 
2480             /*
2481              * Note that checking QLIST_EMPTY(&bs->children) is also done when
2482              * the cache is queried above.  Technically, we do not need to check
2483              * it here; the worst that can happen is that we fill the cache for
2484              * non-protocol nodes, and then it is never used.  However, filling
2485              * the cache requires an RCU update, so double check here to avoid
2486              * such an update if possible.
2487              *
2488              * Check want_zero, because we only want to update the cache when we
2489              * have accurate information about what is zero and what is data.
2490              */
2491             if (want_zero &&
2492                 ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
2493                 QLIST_EMPTY(&bs->children))
2494             {
2495                 /*
2496                  * When a protocol driver reports BLOCK_OFFSET_VALID, the
2497                  * returned local_map value must be the same as the offset we
2498                  * have passed (aligned_offset), and local_bs must be the node
2499                  * itself.
2500                  * Assert this, because we follow this rule when reading from
2501                  * the cache (see the `local_file = bs` and
2502                  * `local_map = aligned_offset` assignments above), and the
2503                  * result the cache delivers must be the same as the driver
2504                  * would deliver.
2505                  */
2506                 assert(local_file == bs);
2507                 assert(local_map == aligned_offset);
2508                 bdrv_bsc_fill(bs, aligned_offset, *pnum);
2509             }
2510         }
2511     } else {
2512         /* Default code for filters */
2513 
2514         local_file = bdrv_filter_bs(bs);
2515         assert(local_file);
2516 
2517         *pnum = aligned_bytes;
2518         local_map = aligned_offset;
2519         ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2520     }
2521     if (ret < 0) {
2522         *pnum = 0;
2523         goto out;
2524     }
2525 
2526     /*
2527      * The driver's result must be a non-zero multiple of request_alignment.
2528      * Clamp pnum and adjust map to original request.
2529      */
2530     assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2531            align > offset - aligned_offset);
2532     if (ret & BDRV_BLOCK_RECURSE) {
2533         assert(ret & BDRV_BLOCK_DATA);
2534         assert(ret & BDRV_BLOCK_OFFSET_VALID);
2535         assert(!(ret & BDRV_BLOCK_ZERO));
2536     }
2537 
2538     *pnum -= offset - aligned_offset;
2539     if (*pnum > bytes) {
2540         *pnum = bytes;
2541     }
2542     if (ret & BDRV_BLOCK_OFFSET_VALID) {
2543         local_map += offset - aligned_offset;
2544     }
2545 
2546     if (ret & BDRV_BLOCK_RAW) {
2547         assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2548         ret = bdrv_co_do_block_status(local_file, want_zero, local_map,
2549                                       *pnum, pnum, &local_map, &local_file);
2550         goto out;
2551     }
2552 
2553     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2554         ret |= BDRV_BLOCK_ALLOCATED;
2555     } else if (bs->drv->supports_backing) {
2556         BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2557 
2558         if (!cow_bs) {
2559             ret |= BDRV_BLOCK_ZERO;
2560         } else if (want_zero) {
2561             int64_t size2 = bdrv_co_getlength(cow_bs);
2562 
2563             if (size2 >= 0 && offset >= size2) {
2564                 ret |= BDRV_BLOCK_ZERO;
2565             }
2566         }
2567     }
2568 
2569     if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2570         local_file && local_file != bs &&
2571         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2572         (ret & BDRV_BLOCK_OFFSET_VALID)) {
2573         int64_t file_pnum;
2574         int ret2;
2575 
2576         ret2 = bdrv_co_do_block_status(local_file, want_zero, local_map,
2577                                        *pnum, &file_pnum, NULL, NULL);
2578         if (ret2 >= 0) {
2579             /* Ignore errors.  This is just providing extra information, it
2580              * is useful but not necessary.
2581              */
2582             if (ret2 & BDRV_BLOCK_EOF &&
2583                 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2584                 /*
2585                  * It is valid for the format block driver to read
2586                  * beyond the end of the underlying file's current
2587                  * size; such areas read as zero.
2588                  */
2589                 ret |= BDRV_BLOCK_ZERO;
2590             } else {
2591                 /* Limit request to the range reported by the protocol driver */
2592                 *pnum = file_pnum;
2593                 ret |= (ret2 & BDRV_BLOCK_ZERO);
2594             }
2595         }
2596     }
2597 
2598 out:
2599     bdrv_dec_in_flight(bs);
2600     if (ret >= 0 && offset + *pnum == total_size) {
2601         ret |= BDRV_BLOCK_EOF;
2602     }
2603 early_out:
2604     if (file) {
2605         *file = local_file;
2606     }
2607     if (map) {
2608         *map = local_map;
2609     }
2610     return ret;
2611 }
2612 
2613 int coroutine_fn
2614 bdrv_co_common_block_status_above(BlockDriverState *bs,
2615                                   BlockDriverState *base,
2616                                   bool include_base,
2617                                   bool want_zero,
2618                                   int64_t offset,
2619                                   int64_t bytes,
2620                                   int64_t *pnum,
2621                                   int64_t *map,
2622                                   BlockDriverState **file,
2623                                   int *depth)
2624 {
2625     int ret;
2626     BlockDriverState *p;
2627     int64_t eof = 0;
2628     int dummy;
2629     IO_CODE();
2630 
2631     assert(!include_base || base); /* Can't include NULL base */
2632     assert_bdrv_graph_readable();
2633 
2634     if (!depth) {
2635         depth = &dummy;
2636     }
2637     *depth = 0;
2638 
2639     if (!include_base && bs == base) {
2640         *pnum = bytes;
2641         return 0;
2642     }
2643 
2644     ret = bdrv_co_do_block_status(bs, want_zero, offset, bytes, pnum,
2645                                   map, file);
2646     ++*depth;
2647     if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
2648         return ret;
2649     }
2650 
2651     if (ret & BDRV_BLOCK_EOF) {
2652         eof = offset + *pnum;
2653     }
2654 
2655     assert(*pnum <= bytes);
2656     bytes = *pnum;
2657 
2658     for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
2659          p = bdrv_filter_or_cow_bs(p))
2660     {
2661         ret = bdrv_co_do_block_status(p, want_zero, offset, bytes, pnum,
2662                                       map, file);
2663         ++*depth;
2664         if (ret < 0) {
2665             return ret;
2666         }
2667         if (*pnum == 0) {
2668             /*
2669              * The top layer deferred to this layer, and because this layer is
2670              * short, any zeroes that we synthesize beyond EOF behave as if they
2671              * were allocated at this layer.
2672              *
2673              * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2674              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2675              * below.
2676              */
2677             assert(ret & BDRV_BLOCK_EOF);
2678             *pnum = bytes;
2679             if (file) {
2680                 *file = p;
2681             }
2682             ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2683             break;
2684         }
2685         if (ret & BDRV_BLOCK_ALLOCATED) {
2686             /*
2687              * We've found the node and the status, we must break.
2688              *
2689              * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2690              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2691              * below.
2692              */
2693             ret &= ~BDRV_BLOCK_EOF;
2694             break;
2695         }
2696 
2697         if (p == base) {
2698             assert(include_base);
2699             break;
2700         }
2701 
2702         /*
2703          * OK, [offset, offset + *pnum) region is unallocated on this layer,
2704          * let's continue the diving.
2705          */
2706         assert(*pnum <= bytes);
2707         bytes = *pnum;
2708     }
2709 
2710     if (offset + *pnum == eof) {
2711         ret |= BDRV_BLOCK_EOF;
2712     }
2713 
2714     return ret;
2715 }
2716 
2717 int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
2718                                             BlockDriverState *base,
2719                                             int64_t offset, int64_t bytes,
2720                                             int64_t *pnum, int64_t *map,
2721                                             BlockDriverState **file)
2722 {
2723     IO_CODE();
2724     return bdrv_co_common_block_status_above(bs, base, false, true, offset,
2725                                              bytes, pnum, map, file, NULL);
2726 }
2727 
2728 int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset,
2729                                       int64_t bytes, int64_t *pnum,
2730                                       int64_t *map, BlockDriverState **file)
2731 {
2732     IO_CODE();
2733     return bdrv_co_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
2734                                       offset, bytes, pnum, map, file);
2735 }
2736 
2737 /*
2738  * Check @bs (and its backing chain) to see if the range defined
2739  * by @offset and @bytes is known to read as zeroes.
2740  * Return 1 if that is the case, 0 otherwise and -errno on error.
2741  * This test is meant to be fast rather than accurate so returning 0
2742  * does not guarantee non-zero data.
2743  */
2744 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
2745                                       int64_t bytes)
2746 {
2747     int ret;
2748     int64_t pnum = bytes;
2749     IO_CODE();
2750 
2751     if (!bytes) {
2752         return 1;
2753     }
2754 
2755     ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset,
2756                                             bytes, &pnum, NULL, NULL, NULL);
2757 
2758     if (ret < 0) {
2759         return ret;
2760     }
2761 
2762     return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
2763 }
2764 
2765 int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
2766                                       int64_t bytes, int64_t *pnum)
2767 {
2768     int ret;
2769     int64_t dummy;
2770     IO_CODE();
2771 
2772     ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset,
2773                                             bytes, pnum ? pnum : &dummy, NULL,
2774                                             NULL, NULL);
2775     if (ret < 0) {
2776         return ret;
2777     }
2778     return !!(ret & BDRV_BLOCK_ALLOCATED);
2779 }
2780 
2781 /*
2782  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2783  *
2784  * Return a positive depth if (a prefix of) the given range is allocated
2785  * in any image between BASE and TOP (BASE is only included if include_base
2786  * is set).  Depth 1 is TOP, 2 is the first backing layer, and so forth.
2787  * BASE can be NULL to check if the given offset is allocated in any
2788  * image of the chain.  Return 0 otherwise, or negative errno on
2789  * failure.
2790  *
2791  * 'pnum' is set to the number of bytes (including and immediately
2792  * following the specified offset) that are known to be in the same
2793  * allocated/unallocated state.  Note that a subsequent call starting
2794  * at 'offset + *pnum' may return the same allocation status (in other
2795  * words, the result is not necessarily the maximum possible range);
2796  * but 'pnum' will only be 0 when end of file is reached.
2797  */
2798 int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs,
2799                                             BlockDriverState *base,
2800                                             bool include_base, int64_t offset,
2801                                             int64_t bytes, int64_t *pnum)
2802 {
2803     int depth;
2804     int ret;
2805     IO_CODE();
2806 
2807     ret = bdrv_co_common_block_status_above(bs, base, include_base, false,
2808                                             offset, bytes, pnum, NULL, NULL,
2809                                             &depth);
2810     if (ret < 0) {
2811         return ret;
2812     }
2813 
2814     if (ret & BDRV_BLOCK_ALLOCATED) {
2815         return depth;
2816     }
2817     return 0;
2818 }
2819 
2820 int coroutine_fn
2821 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2822 {
2823     BlockDriver *drv = bs->drv;
2824     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2825     int ret;
2826     IO_CODE();
2827     assert_bdrv_graph_readable();
2828 
2829     ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2830     if (ret < 0) {
2831         return ret;
2832     }
2833 
2834     if (!drv) {
2835         return -ENOMEDIUM;
2836     }
2837 
2838     bdrv_inc_in_flight(bs);
2839 
2840     if (drv->bdrv_co_load_vmstate) {
2841         ret = drv->bdrv_co_load_vmstate(bs, qiov, pos);
2842     } else if (child_bs) {
2843         ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
2844     } else {
2845         ret = -ENOTSUP;
2846     }
2847 
2848     bdrv_dec_in_flight(bs);
2849 
2850     return ret;
2851 }
2852 
2853 int coroutine_fn
2854 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2855 {
2856     BlockDriver *drv = bs->drv;
2857     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2858     int ret;
2859     IO_CODE();
2860     assert_bdrv_graph_readable();
2861 
2862     ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2863     if (ret < 0) {
2864         return ret;
2865     }
2866 
2867     if (!drv) {
2868         return -ENOMEDIUM;
2869     }
2870 
2871     bdrv_inc_in_flight(bs);
2872 
2873     if (drv->bdrv_co_save_vmstate) {
2874         ret = drv->bdrv_co_save_vmstate(bs, qiov, pos);
2875     } else if (child_bs) {
2876         ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2877     } else {
2878         ret = -ENOTSUP;
2879     }
2880 
2881     bdrv_dec_in_flight(bs);
2882 
2883     return ret;
2884 }
2885 
2886 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2887                       int64_t pos, int size)
2888 {
2889     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2890     int ret = bdrv_writev_vmstate(bs, &qiov, pos);
2891     IO_CODE();
2892 
2893     return ret < 0 ? ret : size;
2894 }
2895 
2896 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2897                       int64_t pos, int size)
2898 {
2899     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2900     int ret = bdrv_readv_vmstate(bs, &qiov, pos);
2901     IO_CODE();
2902 
2903     return ret < 0 ? ret : size;
2904 }
2905 
2906 /**************************************************************/
2907 /* async I/Os */
2908 
2909 /**
2910  * Synchronously cancels an acb. Must be called with the BQL held and the acb
2911  * must be processed with the BQL held too (IOThreads are not allowed).
2912  *
2913  * Use bdrv_aio_cancel_async() instead when possible.
2914  */
2915 void bdrv_aio_cancel(BlockAIOCB *acb)
2916 {
2917     GLOBAL_STATE_CODE();
2918     qemu_aio_ref(acb);
2919     bdrv_aio_cancel_async(acb);
2920     AIO_WAIT_WHILE_UNLOCKED(NULL, acb->refcnt > 1);
2921     qemu_aio_unref(acb);
2922 }
2923 
2924 /* Async version of aio cancel. The caller is not blocked if the acb implements
2925  * cancel_async, otherwise we do nothing and let the request normally complete.
2926  * In either case the completion callback must be called. */
2927 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2928 {
2929     IO_CODE();
2930     if (acb->aiocb_info->cancel_async) {
2931         acb->aiocb_info->cancel_async(acb);
2932     }
2933 }
2934 
2935 /**************************************************************/
2936 /* Coroutine block device emulation */
2937 
2938 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2939 {
2940     BdrvChild *primary_child = bdrv_primary_child(bs);
2941     BdrvChild *child;
2942     int current_gen;
2943     int ret = 0;
2944     IO_CODE();
2945 
2946     assert_bdrv_graph_readable();
2947     bdrv_inc_in_flight(bs);
2948 
2949     if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) ||
2950         bdrv_is_sg(bs)) {
2951         goto early_exit;
2952     }
2953 
2954     qemu_mutex_lock(&bs->reqs_lock);
2955     current_gen = qatomic_read(&bs->write_gen);
2956 
2957     /* Wait until any previous flushes are completed */
2958     while (bs->active_flush_req) {
2959         qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2960     }
2961 
2962     /* Flushes reach this point in nondecreasing current_gen order.  */
2963     bs->active_flush_req = true;
2964     qemu_mutex_unlock(&bs->reqs_lock);
2965 
2966     /* Write back all layers by calling one driver function */
2967     if (bs->drv->bdrv_co_flush) {
2968         ret = bs->drv->bdrv_co_flush(bs);
2969         goto out;
2970     }
2971 
2972     /* Write back cached data to the OS even with cache=unsafe */
2973     BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
2974     if (bs->drv->bdrv_co_flush_to_os) {
2975         ret = bs->drv->bdrv_co_flush_to_os(bs);
2976         if (ret < 0) {
2977             goto out;
2978         }
2979     }
2980 
2981     /* But don't actually force it to the disk with cache=unsafe */
2982     if (bs->open_flags & BDRV_O_NO_FLUSH) {
2983         goto flush_children;
2984     }
2985 
2986     /* Check if we really need to flush anything */
2987     if (bs->flushed_gen == current_gen) {
2988         goto flush_children;
2989     }
2990 
2991     BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
2992     if (!bs->drv) {
2993         /* bs->drv->bdrv_co_flush() might have ejected the BDS
2994          * (even in case of apparent success) */
2995         ret = -ENOMEDIUM;
2996         goto out;
2997     }
2998     if (bs->drv->bdrv_co_flush_to_disk) {
2999         ret = bs->drv->bdrv_co_flush_to_disk(bs);
3000     } else if (bs->drv->bdrv_aio_flush) {
3001         BlockAIOCB *acb;
3002         CoroutineIOCompletion co = {
3003             .coroutine = qemu_coroutine_self(),
3004         };
3005 
3006         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
3007         if (acb == NULL) {
3008             ret = -EIO;
3009         } else {
3010             qemu_coroutine_yield();
3011             ret = co.ret;
3012         }
3013     } else {
3014         /*
3015          * Some block drivers always operate in either writethrough or unsafe
3016          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
3017          * know how the server works (because the behaviour is hardcoded or
3018          * depends on server-side configuration), so we can't ensure that
3019          * everything is safe on disk. Returning an error doesn't work because
3020          * that would break guests even if the server operates in writethrough
3021          * mode.
3022          *
3023          * Let's hope the user knows what he's doing.
3024          */
3025         ret = 0;
3026     }
3027 
3028     if (ret < 0) {
3029         goto out;
3030     }
3031 
3032     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
3033      * in the case of cache=unsafe, so there are no useless flushes.
3034      */
3035 flush_children:
3036     ret = 0;
3037     QLIST_FOREACH(child, &bs->children, next) {
3038         if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
3039             int this_child_ret = bdrv_co_flush(child->bs);
3040             if (!ret) {
3041                 ret = this_child_ret;
3042             }
3043         }
3044     }
3045 
3046 out:
3047     /* Notify any pending flushes that we have completed */
3048     if (ret == 0) {
3049         bs->flushed_gen = current_gen;
3050     }
3051 
3052     qemu_mutex_lock(&bs->reqs_lock);
3053     bs->active_flush_req = false;
3054     /* Return value is ignored - it's ok if wait queue is empty */
3055     qemu_co_queue_next(&bs->flush_queue);
3056     qemu_mutex_unlock(&bs->reqs_lock);
3057 
3058 early_exit:
3059     bdrv_dec_in_flight(bs);
3060     return ret;
3061 }
3062 
3063 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
3064                                   int64_t bytes)
3065 {
3066     BdrvTrackedRequest req;
3067     int ret;
3068     int64_t max_pdiscard;
3069     int head, tail, align;
3070     BlockDriverState *bs = child->bs;
3071     IO_CODE();
3072     assert_bdrv_graph_readable();
3073 
3074     if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) {
3075         return -ENOMEDIUM;
3076     }
3077 
3078     if (bdrv_has_readonly_bitmaps(bs)) {
3079         return -EPERM;
3080     }
3081 
3082     ret = bdrv_check_request(offset, bytes, NULL);
3083     if (ret < 0) {
3084         return ret;
3085     }
3086 
3087     /* Do nothing if disabled.  */
3088     if (!(bs->open_flags & BDRV_O_UNMAP)) {
3089         return 0;
3090     }
3091 
3092     if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
3093         return 0;
3094     }
3095 
3096     /* Invalidate the cached block-status data range if this discard overlaps */
3097     bdrv_bsc_invalidate_range(bs, offset, bytes);
3098 
3099     /* Discard is advisory, but some devices track and coalesce
3100      * unaligned requests, so we must pass everything down rather than
3101      * round here.  Still, most devices will just silently ignore
3102      * unaligned requests (by returning -ENOTSUP), so we must fragment
3103      * the request accordingly.  */
3104     align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
3105     assert(align % bs->bl.request_alignment == 0);
3106     head = offset % align;
3107     tail = (offset + bytes) % align;
3108 
3109     bdrv_inc_in_flight(bs);
3110     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
3111 
3112     ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
3113     if (ret < 0) {
3114         goto out;
3115     }
3116 
3117     max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
3118                                    align);
3119     assert(max_pdiscard >= bs->bl.request_alignment);
3120 
3121     while (bytes > 0) {
3122         int64_t num = bytes;
3123 
3124         if (head) {
3125             /* Make small requests to get to alignment boundaries. */
3126             num = MIN(bytes, align - head);
3127             if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
3128                 num %= bs->bl.request_alignment;
3129             }
3130             head = (head + num) % align;
3131             assert(num < max_pdiscard);
3132         } else if (tail) {
3133             if (num > align) {
3134                 /* Shorten the request to the last aligned cluster.  */
3135                 num -= tail;
3136             } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
3137                        tail > bs->bl.request_alignment) {
3138                 tail %= bs->bl.request_alignment;
3139                 num -= tail;
3140             }
3141         }
3142         /* limit request size */
3143         if (num > max_pdiscard) {
3144             num = max_pdiscard;
3145         }
3146 
3147         if (!bs->drv) {
3148             ret = -ENOMEDIUM;
3149             goto out;
3150         }
3151         if (bs->drv->bdrv_co_pdiscard) {
3152             ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
3153         } else {
3154             BlockAIOCB *acb;
3155             CoroutineIOCompletion co = {
3156                 .coroutine = qemu_coroutine_self(),
3157             };
3158 
3159             acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
3160                                              bdrv_co_io_em_complete, &co);
3161             if (acb == NULL) {
3162                 ret = -EIO;
3163                 goto out;
3164             } else {
3165                 qemu_coroutine_yield();
3166                 ret = co.ret;
3167             }
3168         }
3169         if (ret && ret != -ENOTSUP) {
3170             goto out;
3171         }
3172 
3173         offset += num;
3174         bytes -= num;
3175     }
3176     ret = 0;
3177 out:
3178     bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
3179     tracked_request_end(&req);
3180     bdrv_dec_in_flight(bs);
3181     return ret;
3182 }
3183 
3184 int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
3185 {
3186     BlockDriver *drv = bs->drv;
3187     CoroutineIOCompletion co = {
3188         .coroutine = qemu_coroutine_self(),
3189     };
3190     BlockAIOCB *acb;
3191     IO_CODE();
3192     assert_bdrv_graph_readable();
3193 
3194     bdrv_inc_in_flight(bs);
3195     if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
3196         co.ret = -ENOTSUP;
3197         goto out;
3198     }
3199 
3200     if (drv->bdrv_co_ioctl) {
3201         co.ret = drv->bdrv_co_ioctl(bs, req, buf);
3202     } else {
3203         acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
3204         if (!acb) {
3205             co.ret = -ENOTSUP;
3206             goto out;
3207         }
3208         qemu_coroutine_yield();
3209     }
3210 out:
3211     bdrv_dec_in_flight(bs);
3212     return co.ret;
3213 }
3214 
3215 int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset,
3216                         unsigned int *nr_zones,
3217                         BlockZoneDescriptor *zones)
3218 {
3219     BlockDriver *drv = bs->drv;
3220     CoroutineIOCompletion co = {
3221             .coroutine = qemu_coroutine_self(),
3222     };
3223     IO_CODE();
3224 
3225     bdrv_inc_in_flight(bs);
3226     if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) {
3227         co.ret = -ENOTSUP;
3228         goto out;
3229     }
3230     co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones);
3231 out:
3232     bdrv_dec_in_flight(bs);
3233     return co.ret;
3234 }
3235 
3236 int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
3237         int64_t offset, int64_t len)
3238 {
3239     BlockDriver *drv = bs->drv;
3240     CoroutineIOCompletion co = {
3241             .coroutine = qemu_coroutine_self(),
3242     };
3243     IO_CODE();
3244 
3245     bdrv_inc_in_flight(bs);
3246     if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) {
3247         co.ret = -ENOTSUP;
3248         goto out;
3249     }
3250     co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len);
3251 out:
3252     bdrv_dec_in_flight(bs);
3253     return co.ret;
3254 }
3255 
3256 int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset,
3257                         QEMUIOVector *qiov,
3258                         BdrvRequestFlags flags)
3259 {
3260     int ret;
3261     BlockDriver *drv = bs->drv;
3262     CoroutineIOCompletion co = {
3263             .coroutine = qemu_coroutine_self(),
3264     };
3265     IO_CODE();
3266 
3267     ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL);
3268     if (ret < 0) {
3269         return ret;
3270     }
3271 
3272     bdrv_inc_in_flight(bs);
3273     if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) {
3274         co.ret = -ENOTSUP;
3275         goto out;
3276     }
3277     co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags);
3278 out:
3279     bdrv_dec_in_flight(bs);
3280     return co.ret;
3281 }
3282 
3283 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3284 {
3285     IO_CODE();
3286     return qemu_memalign(bdrv_opt_mem_align(bs), size);
3287 }
3288 
3289 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
3290 {
3291     IO_CODE();
3292     return memset(qemu_blockalign(bs, size), 0, size);
3293 }
3294 
3295 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
3296 {
3297     size_t align = bdrv_opt_mem_align(bs);
3298     IO_CODE();
3299 
3300     /* Ensure that NULL is never returned on success */
3301     assert(align > 0);
3302     if (size == 0) {
3303         size = align;
3304     }
3305 
3306     return qemu_try_memalign(align, size);
3307 }
3308 
3309 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
3310 {
3311     void *mem = qemu_try_blockalign(bs, size);
3312     IO_CODE();
3313 
3314     if (mem) {
3315         memset(mem, 0, size);
3316     }
3317 
3318     return mem;
3319 }
3320 
3321 /* Helper that undoes bdrv_register_buf() when it fails partway through */
3322 static void GRAPH_RDLOCK
3323 bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size,
3324                            BdrvChild *final_child)
3325 {
3326     BdrvChild *child;
3327 
3328     GLOBAL_STATE_CODE();
3329     assert_bdrv_graph_readable();
3330 
3331     QLIST_FOREACH(child, &bs->children, next) {
3332         if (child == final_child) {
3333             break;
3334         }
3335 
3336         bdrv_unregister_buf(child->bs, host, size);
3337     }
3338 
3339     if (bs->drv && bs->drv->bdrv_unregister_buf) {
3340         bs->drv->bdrv_unregister_buf(bs, host, size);
3341     }
3342 }
3343 
3344 bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
3345                        Error **errp)
3346 {
3347     BdrvChild *child;
3348 
3349     GLOBAL_STATE_CODE();
3350     GRAPH_RDLOCK_GUARD_MAINLOOP();
3351 
3352     if (bs->drv && bs->drv->bdrv_register_buf) {
3353         if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) {
3354             return false;
3355         }
3356     }
3357     QLIST_FOREACH(child, &bs->children, next) {
3358         if (!bdrv_register_buf(child->bs, host, size, errp)) {
3359             bdrv_register_buf_rollback(bs, host, size, child);
3360             return false;
3361         }
3362     }
3363     return true;
3364 }
3365 
3366 void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size)
3367 {
3368     BdrvChild *child;
3369 
3370     GLOBAL_STATE_CODE();
3371     GRAPH_RDLOCK_GUARD_MAINLOOP();
3372 
3373     if (bs->drv && bs->drv->bdrv_unregister_buf) {
3374         bs->drv->bdrv_unregister_buf(bs, host, size);
3375     }
3376     QLIST_FOREACH(child, &bs->children, next) {
3377         bdrv_unregister_buf(child->bs, host, size);
3378     }
3379 }
3380 
3381 static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal(
3382         BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3383         int64_t dst_offset, int64_t bytes,
3384         BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3385         bool recurse_src)
3386 {
3387     BdrvTrackedRequest req;
3388     int ret;
3389     assert_bdrv_graph_readable();
3390 
3391     /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3392     assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3393     assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
3394     assert(!(read_flags & BDRV_REQ_NO_WAIT));
3395     assert(!(write_flags & BDRV_REQ_NO_WAIT));
3396 
3397     if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) {
3398         return -ENOMEDIUM;
3399     }
3400     ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
3401     if (ret) {
3402         return ret;
3403     }
3404     if (write_flags & BDRV_REQ_ZERO_WRITE) {
3405         return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3406     }
3407 
3408     if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) {
3409         return -ENOMEDIUM;
3410     }
3411     ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
3412     if (ret) {
3413         return ret;
3414     }
3415 
3416     if (!src->bs->drv->bdrv_co_copy_range_from
3417         || !dst->bs->drv->bdrv_co_copy_range_to
3418         || src->bs->encrypted || dst->bs->encrypted) {
3419         return -ENOTSUP;
3420     }
3421 
3422     if (recurse_src) {
3423         bdrv_inc_in_flight(src->bs);
3424         tracked_request_begin(&req, src->bs, src_offset, bytes,
3425                               BDRV_TRACKED_READ);
3426 
3427         /* BDRV_REQ_SERIALISING is only for write operation */
3428         assert(!(read_flags & BDRV_REQ_SERIALISING));
3429         bdrv_wait_serialising_requests(&req);
3430 
3431         ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3432                                                     src, src_offset,
3433                                                     dst, dst_offset,
3434                                                     bytes,
3435                                                     read_flags, write_flags);
3436 
3437         tracked_request_end(&req);
3438         bdrv_dec_in_flight(src->bs);
3439     } else {
3440         bdrv_inc_in_flight(dst->bs);
3441         tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3442                               BDRV_TRACKED_WRITE);
3443         ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3444                                         write_flags);
3445         if (!ret) {
3446             ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3447                                                       src, src_offset,
3448                                                       dst, dst_offset,
3449                                                       bytes,
3450                                                       read_flags, write_flags);
3451         }
3452         bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3453         tracked_request_end(&req);
3454         bdrv_dec_in_flight(dst->bs);
3455     }
3456 
3457     return ret;
3458 }
3459 
3460 /* Copy range from @src to @dst.
3461  *
3462  * See the comment of bdrv_co_copy_range for the parameter and return value
3463  * semantics. */
3464 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3465                                          BdrvChild *dst, int64_t dst_offset,
3466                                          int64_t bytes,
3467                                          BdrvRequestFlags read_flags,
3468                                          BdrvRequestFlags write_flags)
3469 {
3470     IO_CODE();
3471     assert_bdrv_graph_readable();
3472     trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3473                                   read_flags, write_flags);
3474     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3475                                        bytes, read_flags, write_flags, true);
3476 }
3477 
3478 /* Copy range from @src to @dst.
3479  *
3480  * See the comment of bdrv_co_copy_range for the parameter and return value
3481  * semantics. */
3482 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3483                                        BdrvChild *dst, int64_t dst_offset,
3484                                        int64_t bytes,
3485                                        BdrvRequestFlags read_flags,
3486                                        BdrvRequestFlags write_flags)
3487 {
3488     IO_CODE();
3489     assert_bdrv_graph_readable();
3490     trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3491                                 read_flags, write_flags);
3492     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3493                                        bytes, read_flags, write_flags, false);
3494 }
3495 
3496 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3497                                     BdrvChild *dst, int64_t dst_offset,
3498                                     int64_t bytes, BdrvRequestFlags read_flags,
3499                                     BdrvRequestFlags write_flags)
3500 {
3501     IO_CODE();
3502     assert_bdrv_graph_readable();
3503 
3504     return bdrv_co_copy_range_from(src, src_offset,
3505                                    dst, dst_offset,
3506                                    bytes, read_flags, write_flags);
3507 }
3508 
3509 static void bdrv_parent_cb_resize(BlockDriverState *bs)
3510 {
3511     BdrvChild *c;
3512     QLIST_FOREACH(c, &bs->parents, next_parent) {
3513         if (c->klass->resize) {
3514             c->klass->resize(c);
3515         }
3516     }
3517 }
3518 
3519 /**
3520  * Truncate file to 'offset' bytes (needed only for file protocols)
3521  *
3522  * If 'exact' is true, the file must be resized to exactly the given
3523  * 'offset'.  Otherwise, it is sufficient for the node to be at least
3524  * 'offset' bytes in length.
3525  */
3526 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
3527                                   PreallocMode prealloc, BdrvRequestFlags flags,
3528                                   Error **errp)
3529 {
3530     BlockDriverState *bs = child->bs;
3531     BdrvChild *filtered, *backing;
3532     BlockDriver *drv = bs->drv;
3533     BdrvTrackedRequest req;
3534     int64_t old_size, new_bytes;
3535     int ret;
3536     IO_CODE();
3537     assert_bdrv_graph_readable();
3538 
3539     /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3540     if (!drv) {
3541         error_setg(errp, "No medium inserted");
3542         return -ENOMEDIUM;
3543     }
3544     if (offset < 0) {
3545         error_setg(errp, "Image size cannot be negative");
3546         return -EINVAL;
3547     }
3548 
3549     ret = bdrv_check_request(offset, 0, errp);
3550     if (ret < 0) {
3551         return ret;
3552     }
3553 
3554     old_size = bdrv_co_getlength(bs);
3555     if (old_size < 0) {
3556         error_setg_errno(errp, -old_size, "Failed to get old image size");
3557         return old_size;
3558     }
3559 
3560     if (bdrv_is_read_only(bs)) {
3561         error_setg(errp, "Image is read-only");
3562         return -EACCES;
3563     }
3564 
3565     if (offset > old_size) {
3566         new_bytes = offset - old_size;
3567     } else {
3568         new_bytes = 0;
3569     }
3570 
3571     bdrv_inc_in_flight(bs);
3572     tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3573                           BDRV_TRACKED_TRUNCATE);
3574 
3575     /* If we are growing the image and potentially using preallocation for the
3576      * new area, we need to make sure that no write requests are made to it
3577      * concurrently or they might be overwritten by preallocation. */
3578     if (new_bytes) {
3579         bdrv_make_request_serialising(&req, 1);
3580     }
3581     ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3582                                     0);
3583     if (ret < 0) {
3584         error_setg_errno(errp, -ret,
3585                          "Failed to prepare request for truncation");
3586         goto out;
3587     }
3588 
3589     filtered = bdrv_filter_child(bs);
3590     backing = bdrv_cow_child(bs);
3591 
3592     /*
3593      * If the image has a backing file that is large enough that it would
3594      * provide data for the new area, we cannot leave it unallocated because
3595      * then the backing file content would become visible. Instead, zero-fill
3596      * the new area.
3597      *
3598      * Note that if the image has a backing file, but was opened without the
3599      * backing file, taking care of keeping things consistent with that backing
3600      * file is the user's responsibility.
3601      */
3602     if (new_bytes && backing) {
3603         int64_t backing_len;
3604 
3605         backing_len = bdrv_co_getlength(backing->bs);
3606         if (backing_len < 0) {
3607             ret = backing_len;
3608             error_setg_errno(errp, -ret, "Could not get backing file size");
3609             goto out;
3610         }
3611 
3612         if (backing_len > old_size) {
3613             flags |= BDRV_REQ_ZERO_WRITE;
3614         }
3615     }
3616 
3617     if (drv->bdrv_co_truncate) {
3618         if (flags & ~bs->supported_truncate_flags) {
3619             error_setg(errp, "Block driver does not support requested flags");
3620             ret = -ENOTSUP;
3621             goto out;
3622         }
3623         ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
3624     } else if (filtered) {
3625         ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
3626     } else {
3627         error_setg(errp, "Image format driver does not support resize");
3628         ret = -ENOTSUP;
3629         goto out;
3630     }
3631     if (ret < 0) {
3632         goto out;
3633     }
3634 
3635     ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3636     if (ret < 0) {
3637         error_setg_errno(errp, -ret, "Could not refresh total sector count");
3638     } else {
3639         offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3640     }
3641     /*
3642      * It's possible that truncation succeeded but bdrv_refresh_total_sectors
3643      * failed, but the latter doesn't affect how we should finish the request.
3644      * Pass 0 as the last parameter so that dirty bitmaps etc. are handled.
3645      */
3646     bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3647 
3648 out:
3649     tracked_request_end(&req);
3650     bdrv_dec_in_flight(bs);
3651 
3652     return ret;
3653 }
3654 
3655 void bdrv_cancel_in_flight(BlockDriverState *bs)
3656 {
3657     GLOBAL_STATE_CODE();
3658     if (!bs || !bs->drv) {
3659         return;
3660     }
3661 
3662     if (bs->drv->bdrv_cancel_in_flight) {
3663         bs->drv->bdrv_cancel_in_flight(bs);
3664     }
3665 }
3666 
3667 int coroutine_fn
3668 bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
3669                         QEMUIOVector *qiov, size_t qiov_offset)
3670 {
3671     BlockDriverState *bs = child->bs;
3672     BlockDriver *drv = bs->drv;
3673     int ret;
3674     IO_CODE();
3675     assert_bdrv_graph_readable();
3676 
3677     if (!drv) {
3678         return -ENOMEDIUM;
3679     }
3680 
3681     if (!drv->bdrv_co_preadv_snapshot) {
3682         return -ENOTSUP;
3683     }
3684 
3685     bdrv_inc_in_flight(bs);
3686     ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
3687     bdrv_dec_in_flight(bs);
3688 
3689     return ret;
3690 }
3691 
3692 int coroutine_fn
3693 bdrv_co_snapshot_block_status(BlockDriverState *bs,
3694                               bool want_zero, int64_t offset, int64_t bytes,
3695                               int64_t *pnum, int64_t *map,
3696                               BlockDriverState **file)
3697 {
3698     BlockDriver *drv = bs->drv;
3699     int ret;
3700     IO_CODE();
3701     assert_bdrv_graph_readable();
3702 
3703     if (!drv) {
3704         return -ENOMEDIUM;
3705     }
3706 
3707     if (!drv->bdrv_co_snapshot_block_status) {
3708         return -ENOTSUP;
3709     }
3710 
3711     bdrv_inc_in_flight(bs);
3712     ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes,
3713                                              pnum, map, file);
3714     bdrv_dec_in_flight(bs);
3715 
3716     return ret;
3717 }
3718 
3719 int coroutine_fn
3720 bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
3721 {
3722     BlockDriver *drv = bs->drv;
3723     int ret;
3724     IO_CODE();
3725     assert_bdrv_graph_readable();
3726 
3727     if (!drv) {
3728         return -ENOMEDIUM;
3729     }
3730 
3731     if (!drv->bdrv_co_pdiscard_snapshot) {
3732         return -ENOTSUP;
3733     }
3734 
3735     bdrv_inc_in_flight(bs);
3736     ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
3737     bdrv_dec_in_flight(bs);
3738 
3739     return ret;
3740 }
3741