xref: /openbmc/qemu/block/io.c (revision 1580b897)
1 /*
2  * Block layer I/O functions
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/aio-wait.h"
29 #include "block/blockjob.h"
30 #include "block/blockjob_int.h"
31 #include "block/block_int.h"
32 #include "block/coroutines.h"
33 #include "block/write-threshold.h"
34 #include "qemu/cutils.h"
35 #include "qapi/error.h"
36 #include "qemu/error-report.h"
37 #include "qemu/main-loop.h"
38 #include "sysemu/replay.h"
39 
40 /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
41 #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
42 
43 static void bdrv_parent_cb_resize(BlockDriverState *bs);
44 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
45     int64_t offset, int64_t bytes, BdrvRequestFlags flags);
46 
47 static void bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore,
48                                       bool ignore_bds_parents)
49 {
50     BdrvChild *c, *next;
51 
52     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
53         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
54             continue;
55         }
56         bdrv_parent_drained_begin_single(c, false);
57     }
58 }
59 
60 static void bdrv_parent_drained_end_single_no_poll(BdrvChild *c,
61                                                    int *drained_end_counter)
62 {
63     assert(c->parent_quiesce_counter > 0);
64     c->parent_quiesce_counter--;
65     if (c->klass->drained_end) {
66         c->klass->drained_end(c, drained_end_counter);
67     }
68 }
69 
70 void bdrv_parent_drained_end_single(BdrvChild *c)
71 {
72     int drained_end_counter = 0;
73     bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter);
74     BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0);
75 }
76 
77 static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore,
78                                     bool ignore_bds_parents,
79                                     int *drained_end_counter)
80 {
81     BdrvChild *c;
82 
83     QLIST_FOREACH(c, &bs->parents, next_parent) {
84         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
85             continue;
86         }
87         bdrv_parent_drained_end_single_no_poll(c, drained_end_counter);
88     }
89 }
90 
91 static bool bdrv_parent_drained_poll_single(BdrvChild *c)
92 {
93     if (c->klass->drained_poll) {
94         return c->klass->drained_poll(c);
95     }
96     return false;
97 }
98 
99 static bool bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
100                                      bool ignore_bds_parents)
101 {
102     BdrvChild *c, *next;
103     bool busy = false;
104 
105     QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
106         if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
107             continue;
108         }
109         busy |= bdrv_parent_drained_poll_single(c);
110     }
111 
112     return busy;
113 }
114 
115 void bdrv_parent_drained_begin_single(BdrvChild *c, bool poll)
116 {
117     c->parent_quiesce_counter++;
118     if (c->klass->drained_begin) {
119         c->klass->drained_begin(c);
120     }
121     if (poll) {
122         BDRV_POLL_WHILE(c->bs, bdrv_parent_drained_poll_single(c));
123     }
124 }
125 
126 static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
127 {
128     dst->pdiscard_alignment = MAX(dst->pdiscard_alignment,
129                                   src->pdiscard_alignment);
130     dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
131     dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
132     dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer,
133                                         src->max_hw_transfer);
134     dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
135                                  src->opt_mem_alignment);
136     dst->min_mem_alignment = MAX(dst->min_mem_alignment,
137                                  src->min_mem_alignment);
138     dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
139 }
140 
141 typedef struct BdrvRefreshLimitsState {
142     BlockDriverState *bs;
143     BlockLimits old_bl;
144 } BdrvRefreshLimitsState;
145 
146 static void bdrv_refresh_limits_abort(void *opaque)
147 {
148     BdrvRefreshLimitsState *s = opaque;
149 
150     s->bs->bl = s->old_bl;
151 }
152 
153 static TransactionActionDrv bdrv_refresh_limits_drv = {
154     .abort = bdrv_refresh_limits_abort,
155     .clean = g_free,
156 };
157 
158 /* @tran is allowed to be NULL, in this case no rollback is possible. */
159 void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
160 {
161     ERRP_GUARD();
162     BlockDriver *drv = bs->drv;
163     BdrvChild *c;
164     bool have_limits;
165 
166     if (tran) {
167         BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
168         *s = (BdrvRefreshLimitsState) {
169             .bs = bs,
170             .old_bl = bs->bl,
171         };
172         tran_add(tran, &bdrv_refresh_limits_drv, s);
173     }
174 
175     memset(&bs->bl, 0, sizeof(bs->bl));
176 
177     if (!drv) {
178         return;
179     }
180 
181     /* Default alignment based on whether driver has byte interface */
182     bs->bl.request_alignment = (drv->bdrv_co_preadv ||
183                                 drv->bdrv_aio_preadv ||
184                                 drv->bdrv_co_preadv_part) ? 1 : 512;
185 
186     /* Take some limits from the children as a default */
187     have_limits = false;
188     QLIST_FOREACH(c, &bs->children, next) {
189         if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
190         {
191             bdrv_refresh_limits(c->bs, tran, errp);
192             if (*errp) {
193                 return;
194             }
195             bdrv_merge_limits(&bs->bl, &c->bs->bl);
196             have_limits = true;
197         }
198     }
199 
200     if (!have_limits) {
201         bs->bl.min_mem_alignment = 512;
202         bs->bl.opt_mem_alignment = qemu_real_host_page_size;
203 
204         /* Safe default since most protocols use readv()/writev()/etc */
205         bs->bl.max_iov = IOV_MAX;
206     }
207 
208     /* Then let the driver override it */
209     if (drv->bdrv_refresh_limits) {
210         drv->bdrv_refresh_limits(bs, errp);
211         if (*errp) {
212             return;
213         }
214     }
215 
216     if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
217         error_setg(errp, "Driver requires too large request alignment");
218     }
219 }
220 
221 /**
222  * The copy-on-read flag is actually a reference count so multiple users may
223  * use the feature without worrying about clobbering its previous state.
224  * Copy-on-read stays enabled until all users have called to disable it.
225  */
226 void bdrv_enable_copy_on_read(BlockDriverState *bs)
227 {
228     qatomic_inc(&bs->copy_on_read);
229 }
230 
231 void bdrv_disable_copy_on_read(BlockDriverState *bs)
232 {
233     int old = qatomic_fetch_dec(&bs->copy_on_read);
234     assert(old >= 1);
235 }
236 
237 typedef struct {
238     Coroutine *co;
239     BlockDriverState *bs;
240     bool done;
241     bool begin;
242     bool recursive;
243     bool poll;
244     BdrvChild *parent;
245     bool ignore_bds_parents;
246     int *drained_end_counter;
247 } BdrvCoDrainData;
248 
249 static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
250 {
251     BdrvCoDrainData *data = opaque;
252     BlockDriverState *bs = data->bs;
253 
254     if (data->begin) {
255         bs->drv->bdrv_co_drain_begin(bs);
256     } else {
257         bs->drv->bdrv_co_drain_end(bs);
258     }
259 
260     /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */
261     qatomic_mb_set(&data->done, true);
262     if (!data->begin) {
263         qatomic_dec(data->drained_end_counter);
264     }
265     bdrv_dec_in_flight(bs);
266 
267     g_free(data);
268 }
269 
270 /* Recursively call BlockDriver.bdrv_co_drain_begin/end callbacks */
271 static void bdrv_drain_invoke(BlockDriverState *bs, bool begin,
272                               int *drained_end_counter)
273 {
274     BdrvCoDrainData *data;
275 
276     if (!bs->drv || (begin && !bs->drv->bdrv_co_drain_begin) ||
277             (!begin && !bs->drv->bdrv_co_drain_end)) {
278         return;
279     }
280 
281     data = g_new(BdrvCoDrainData, 1);
282     *data = (BdrvCoDrainData) {
283         .bs = bs,
284         .done = false,
285         .begin = begin,
286         .drained_end_counter = drained_end_counter,
287     };
288 
289     if (!begin) {
290         qatomic_inc(drained_end_counter);
291     }
292 
293     /* Make sure the driver callback completes during the polling phase for
294      * drain_begin. */
295     bdrv_inc_in_flight(bs);
296     data->co = qemu_coroutine_create(bdrv_drain_invoke_entry, data);
297     aio_co_schedule(bdrv_get_aio_context(bs), data->co);
298 }
299 
300 /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
301 bool bdrv_drain_poll(BlockDriverState *bs, bool recursive,
302                      BdrvChild *ignore_parent, bool ignore_bds_parents)
303 {
304     BdrvChild *child, *next;
305 
306     if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
307         return true;
308     }
309 
310     if (qatomic_read(&bs->in_flight)) {
311         return true;
312     }
313 
314     if (recursive) {
315         assert(!ignore_bds_parents);
316         QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
317             if (bdrv_drain_poll(child->bs, recursive, child, false)) {
318                 return true;
319             }
320         }
321     }
322 
323     return false;
324 }
325 
326 static bool bdrv_drain_poll_top_level(BlockDriverState *bs, bool recursive,
327                                       BdrvChild *ignore_parent)
328 {
329     return bdrv_drain_poll(bs, recursive, ignore_parent, false);
330 }
331 
332 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
333                                   BdrvChild *parent, bool ignore_bds_parents,
334                                   bool poll);
335 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
336                                 BdrvChild *parent, bool ignore_bds_parents,
337                                 int *drained_end_counter);
338 
339 static void bdrv_co_drain_bh_cb(void *opaque)
340 {
341     BdrvCoDrainData *data = opaque;
342     Coroutine *co = data->co;
343     BlockDriverState *bs = data->bs;
344 
345     if (bs) {
346         AioContext *ctx = bdrv_get_aio_context(bs);
347         aio_context_acquire(ctx);
348         bdrv_dec_in_flight(bs);
349         if (data->begin) {
350             assert(!data->drained_end_counter);
351             bdrv_do_drained_begin(bs, data->recursive, data->parent,
352                                   data->ignore_bds_parents, data->poll);
353         } else {
354             assert(!data->poll);
355             bdrv_do_drained_end(bs, data->recursive, data->parent,
356                                 data->ignore_bds_parents,
357                                 data->drained_end_counter);
358         }
359         aio_context_release(ctx);
360     } else {
361         assert(data->begin);
362         bdrv_drain_all_begin();
363     }
364 
365     data->done = true;
366     aio_co_wake(co);
367 }
368 
369 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
370                                                 bool begin, bool recursive,
371                                                 BdrvChild *parent,
372                                                 bool ignore_bds_parents,
373                                                 bool poll,
374                                                 int *drained_end_counter)
375 {
376     BdrvCoDrainData data;
377     Coroutine *self = qemu_coroutine_self();
378     AioContext *ctx = bdrv_get_aio_context(bs);
379     AioContext *co_ctx = qemu_coroutine_get_aio_context(self);
380 
381     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
382      * other coroutines run if they were queued by aio_co_enter(). */
383 
384     assert(qemu_in_coroutine());
385     data = (BdrvCoDrainData) {
386         .co = self,
387         .bs = bs,
388         .done = false,
389         .begin = begin,
390         .recursive = recursive,
391         .parent = parent,
392         .ignore_bds_parents = ignore_bds_parents,
393         .poll = poll,
394         .drained_end_counter = drained_end_counter,
395     };
396 
397     if (bs) {
398         bdrv_inc_in_flight(bs);
399     }
400 
401     /*
402      * Temporarily drop the lock across yield or we would get deadlocks.
403      * bdrv_co_drain_bh_cb() reaquires the lock as needed.
404      *
405      * When we yield below, the lock for the current context will be
406      * released, so if this is actually the lock that protects bs, don't drop
407      * it a second time.
408      */
409     if (ctx != co_ctx) {
410         aio_context_release(ctx);
411     }
412     replay_bh_schedule_oneshot_event(ctx, bdrv_co_drain_bh_cb, &data);
413 
414     qemu_coroutine_yield();
415     /* If we are resumed from some other event (such as an aio completion or a
416      * timer callback), it is a bug in the caller that should be fixed. */
417     assert(data.done);
418 
419     /* Reaquire the AioContext of bs if we dropped it */
420     if (ctx != co_ctx) {
421         aio_context_acquire(ctx);
422     }
423 }
424 
425 void bdrv_do_drained_begin_quiesce(BlockDriverState *bs,
426                                    BdrvChild *parent, bool ignore_bds_parents)
427 {
428     assert(!qemu_in_coroutine());
429 
430     /* Stop things in parent-to-child order */
431     if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
432         aio_disable_external(bdrv_get_aio_context(bs));
433     }
434 
435     bdrv_parent_drained_begin(bs, parent, ignore_bds_parents);
436     bdrv_drain_invoke(bs, true, NULL);
437 }
438 
439 static void bdrv_do_drained_begin(BlockDriverState *bs, bool recursive,
440                                   BdrvChild *parent, bool ignore_bds_parents,
441                                   bool poll)
442 {
443     BdrvChild *child, *next;
444 
445     if (qemu_in_coroutine()) {
446         bdrv_co_yield_to_drain(bs, true, recursive, parent, ignore_bds_parents,
447                                poll, NULL);
448         return;
449     }
450 
451     bdrv_do_drained_begin_quiesce(bs, parent, ignore_bds_parents);
452 
453     if (recursive) {
454         assert(!ignore_bds_parents);
455         bs->recursive_quiesce_counter++;
456         QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
457             bdrv_do_drained_begin(child->bs, true, child, ignore_bds_parents,
458                                   false);
459         }
460     }
461 
462     /*
463      * Wait for drained requests to finish.
464      *
465      * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
466      * call is needed so things in this AioContext can make progress even
467      * though we don't return to the main AioContext loop - this automatically
468      * includes other nodes in the same AioContext and therefore all child
469      * nodes.
470      */
471     if (poll) {
472         assert(!ignore_bds_parents);
473         BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, recursive, parent));
474     }
475 }
476 
477 void bdrv_drained_begin(BlockDriverState *bs)
478 {
479     bdrv_do_drained_begin(bs, false, NULL, false, true);
480 }
481 
482 void bdrv_subtree_drained_begin(BlockDriverState *bs)
483 {
484     bdrv_do_drained_begin(bs, true, NULL, false, true);
485 }
486 
487 /**
488  * This function does not poll, nor must any of its recursively called
489  * functions.  The *drained_end_counter pointee will be incremented
490  * once for every background operation scheduled, and decremented once
491  * the operation settles.  Therefore, the pointer must remain valid
492  * until the pointee reaches 0.  That implies that whoever sets up the
493  * pointee has to poll until it is 0.
494  *
495  * We use atomic operations to access *drained_end_counter, because
496  * (1) when called from bdrv_set_aio_context_ignore(), the subgraph of
497  *     @bs may contain nodes in different AioContexts,
498  * (2) bdrv_drain_all_end() uses the same counter for all nodes,
499  *     regardless of which AioContext they are in.
500  */
501 static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive,
502                                 BdrvChild *parent, bool ignore_bds_parents,
503                                 int *drained_end_counter)
504 {
505     BdrvChild *child;
506     int old_quiesce_counter;
507 
508     assert(drained_end_counter != NULL);
509 
510     if (qemu_in_coroutine()) {
511         bdrv_co_yield_to_drain(bs, false, recursive, parent, ignore_bds_parents,
512                                false, drained_end_counter);
513         return;
514     }
515     assert(bs->quiesce_counter > 0);
516 
517     /* Re-enable things in child-to-parent order */
518     bdrv_drain_invoke(bs, false, drained_end_counter);
519     bdrv_parent_drained_end(bs, parent, ignore_bds_parents,
520                             drained_end_counter);
521 
522     old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
523     if (old_quiesce_counter == 1) {
524         aio_enable_external(bdrv_get_aio_context(bs));
525     }
526 
527     if (recursive) {
528         assert(!ignore_bds_parents);
529         bs->recursive_quiesce_counter--;
530         QLIST_FOREACH(child, &bs->children, next) {
531             bdrv_do_drained_end(child->bs, true, child, ignore_bds_parents,
532                                 drained_end_counter);
533         }
534     }
535 }
536 
537 void bdrv_drained_end(BlockDriverState *bs)
538 {
539     int drained_end_counter = 0;
540     bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter);
541     BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
542 }
543 
544 void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter)
545 {
546     bdrv_do_drained_end(bs, false, NULL, false, drained_end_counter);
547 }
548 
549 void bdrv_subtree_drained_end(BlockDriverState *bs)
550 {
551     int drained_end_counter = 0;
552     bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter);
553     BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
554 }
555 
556 void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent)
557 {
558     int i;
559 
560     for (i = 0; i < new_parent->recursive_quiesce_counter; i++) {
561         bdrv_do_drained_begin(child->bs, true, child, false, true);
562     }
563 }
564 
565 void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent)
566 {
567     int drained_end_counter = 0;
568     int i;
569 
570     for (i = 0; i < old_parent->recursive_quiesce_counter; i++) {
571         bdrv_do_drained_end(child->bs, true, child, false,
572                             &drained_end_counter);
573     }
574 
575     BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0);
576 }
577 
578 /*
579  * Wait for pending requests to complete on a single BlockDriverState subtree,
580  * and suspend block driver's internal I/O until next request arrives.
581  *
582  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
583  * AioContext.
584  */
585 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
586 {
587     assert(qemu_in_coroutine());
588     bdrv_drained_begin(bs);
589     bdrv_drained_end(bs);
590 }
591 
592 void bdrv_drain(BlockDriverState *bs)
593 {
594     bdrv_drained_begin(bs);
595     bdrv_drained_end(bs);
596 }
597 
598 static void bdrv_drain_assert_idle(BlockDriverState *bs)
599 {
600     BdrvChild *child, *next;
601 
602     assert(qatomic_read(&bs->in_flight) == 0);
603     QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
604         bdrv_drain_assert_idle(child->bs);
605     }
606 }
607 
608 unsigned int bdrv_drain_all_count = 0;
609 
610 static bool bdrv_drain_all_poll(void)
611 {
612     BlockDriverState *bs = NULL;
613     bool result = false;
614 
615     /* bdrv_drain_poll() can't make changes to the graph and we are holding the
616      * main AioContext lock, so iterating bdrv_next_all_states() is safe. */
617     while ((bs = bdrv_next_all_states(bs))) {
618         AioContext *aio_context = bdrv_get_aio_context(bs);
619         aio_context_acquire(aio_context);
620         result |= bdrv_drain_poll(bs, false, NULL, true);
621         aio_context_release(aio_context);
622     }
623 
624     return result;
625 }
626 
627 /*
628  * Wait for pending requests to complete across all BlockDriverStates
629  *
630  * This function does not flush data to disk, use bdrv_flush_all() for that
631  * after calling this function.
632  *
633  * This pauses all block jobs and disables external clients. It must
634  * be paired with bdrv_drain_all_end().
635  *
636  * NOTE: no new block jobs or BlockDriverStates can be created between
637  * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
638  */
639 void bdrv_drain_all_begin(void)
640 {
641     BlockDriverState *bs = NULL;
642 
643     if (qemu_in_coroutine()) {
644         bdrv_co_yield_to_drain(NULL, true, false, NULL, true, true, NULL);
645         return;
646     }
647 
648     /*
649      * bdrv queue is managed by record/replay,
650      * waiting for finishing the I/O requests may
651      * be infinite
652      */
653     if (replay_events_enabled()) {
654         return;
655     }
656 
657     /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
658      * loop AioContext, so make sure we're in the main context. */
659     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
660     assert(bdrv_drain_all_count < INT_MAX);
661     bdrv_drain_all_count++;
662 
663     /* Quiesce all nodes, without polling in-flight requests yet. The graph
664      * cannot change during this loop. */
665     while ((bs = bdrv_next_all_states(bs))) {
666         AioContext *aio_context = bdrv_get_aio_context(bs);
667 
668         aio_context_acquire(aio_context);
669         bdrv_do_drained_begin(bs, false, NULL, true, false);
670         aio_context_release(aio_context);
671     }
672 
673     /* Now poll the in-flight requests */
674     AIO_WAIT_WHILE(NULL, bdrv_drain_all_poll());
675 
676     while ((bs = bdrv_next_all_states(bs))) {
677         bdrv_drain_assert_idle(bs);
678     }
679 }
680 
681 void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
682 {
683     int drained_end_counter = 0;
684 
685     g_assert(bs->quiesce_counter > 0);
686     g_assert(!bs->refcnt);
687 
688     while (bs->quiesce_counter) {
689         bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
690     }
691     BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0);
692 }
693 
694 void bdrv_drain_all_end(void)
695 {
696     BlockDriverState *bs = NULL;
697     int drained_end_counter = 0;
698 
699     /*
700      * bdrv queue is managed by record/replay,
701      * waiting for finishing the I/O requests may
702      * be endless
703      */
704     if (replay_events_enabled()) {
705         return;
706     }
707 
708     while ((bs = bdrv_next_all_states(bs))) {
709         AioContext *aio_context = bdrv_get_aio_context(bs);
710 
711         aio_context_acquire(aio_context);
712         bdrv_do_drained_end(bs, false, NULL, true, &drained_end_counter);
713         aio_context_release(aio_context);
714     }
715 
716     assert(qemu_get_current_aio_context() == qemu_get_aio_context());
717     AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0);
718 
719     assert(bdrv_drain_all_count > 0);
720     bdrv_drain_all_count--;
721 }
722 
723 void bdrv_drain_all(void)
724 {
725     bdrv_drain_all_begin();
726     bdrv_drain_all_end();
727 }
728 
729 /**
730  * Remove an active request from the tracked requests list
731  *
732  * This function should be called when a tracked request is completing.
733  */
734 static void tracked_request_end(BdrvTrackedRequest *req)
735 {
736     if (req->serialising) {
737         qatomic_dec(&req->bs->serialising_in_flight);
738     }
739 
740     qemu_co_mutex_lock(&req->bs->reqs_lock);
741     QLIST_REMOVE(req, list);
742     qemu_co_queue_restart_all(&req->wait_queue);
743     qemu_co_mutex_unlock(&req->bs->reqs_lock);
744 }
745 
746 /**
747  * Add an active request to the tracked requests list
748  */
749 static void tracked_request_begin(BdrvTrackedRequest *req,
750                                   BlockDriverState *bs,
751                                   int64_t offset,
752                                   int64_t bytes,
753                                   enum BdrvTrackedRequestType type)
754 {
755     bdrv_check_request(offset, bytes, &error_abort);
756 
757     *req = (BdrvTrackedRequest){
758         .bs = bs,
759         .offset         = offset,
760         .bytes          = bytes,
761         .type           = type,
762         .co             = qemu_coroutine_self(),
763         .serialising    = false,
764         .overlap_offset = offset,
765         .overlap_bytes  = bytes,
766     };
767 
768     qemu_co_queue_init(&req->wait_queue);
769 
770     qemu_co_mutex_lock(&bs->reqs_lock);
771     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
772     qemu_co_mutex_unlock(&bs->reqs_lock);
773 }
774 
775 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
776                                      int64_t offset, int64_t bytes)
777 {
778     bdrv_check_request(offset, bytes, &error_abort);
779 
780     /*        aaaa   bbbb */
781     if (offset >= req->overlap_offset + req->overlap_bytes) {
782         return false;
783     }
784     /* bbbb   aaaa        */
785     if (req->overlap_offset >= offset + bytes) {
786         return false;
787     }
788     return true;
789 }
790 
791 /* Called with self->bs->reqs_lock held */
792 static BdrvTrackedRequest *
793 bdrv_find_conflicting_request(BdrvTrackedRequest *self)
794 {
795     BdrvTrackedRequest *req;
796 
797     QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
798         if (req == self || (!req->serialising && !self->serialising)) {
799             continue;
800         }
801         if (tracked_request_overlaps(req, self->overlap_offset,
802                                      self->overlap_bytes))
803         {
804             /*
805              * Hitting this means there was a reentrant request, for
806              * example, a block driver issuing nested requests.  This must
807              * never happen since it means deadlock.
808              */
809             assert(qemu_coroutine_self() != req->co);
810 
811             /*
812              * If the request is already (indirectly) waiting for us, or
813              * will wait for us as soon as it wakes up, then just go on
814              * (instead of producing a deadlock in the former case).
815              */
816             if (!req->waiting_for) {
817                 return req;
818             }
819         }
820     }
821 
822     return NULL;
823 }
824 
825 /* Called with self->bs->reqs_lock held */
826 static bool coroutine_fn
827 bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
828 {
829     BdrvTrackedRequest *req;
830     bool waited = false;
831 
832     while ((req = bdrv_find_conflicting_request(self))) {
833         self->waiting_for = req;
834         qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
835         self->waiting_for = NULL;
836         waited = true;
837     }
838 
839     return waited;
840 }
841 
842 /* Called with req->bs->reqs_lock held */
843 static void tracked_request_set_serialising(BdrvTrackedRequest *req,
844                                             uint64_t align)
845 {
846     int64_t overlap_offset = req->offset & ~(align - 1);
847     int64_t overlap_bytes =
848         ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
849 
850     bdrv_check_request(req->offset, req->bytes, &error_abort);
851 
852     if (!req->serialising) {
853         qatomic_inc(&req->bs->serialising_in_flight);
854         req->serialising = true;
855     }
856 
857     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
858     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
859 }
860 
861 /**
862  * Return the tracked request on @bs for the current coroutine, or
863  * NULL if there is none.
864  */
865 BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
866 {
867     BdrvTrackedRequest *req;
868     Coroutine *self = qemu_coroutine_self();
869 
870     QLIST_FOREACH(req, &bs->tracked_requests, list) {
871         if (req->co == self) {
872             return req;
873         }
874     }
875 
876     return NULL;
877 }
878 
879 /**
880  * Round a region to cluster boundaries
881  */
882 void bdrv_round_to_clusters(BlockDriverState *bs,
883                             int64_t offset, int64_t bytes,
884                             int64_t *cluster_offset,
885                             int64_t *cluster_bytes)
886 {
887     BlockDriverInfo bdi;
888 
889     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
890         *cluster_offset = offset;
891         *cluster_bytes = bytes;
892     } else {
893         int64_t c = bdi.cluster_size;
894         *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
895         *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
896     }
897 }
898 
899 static int bdrv_get_cluster_size(BlockDriverState *bs)
900 {
901     BlockDriverInfo bdi;
902     int ret;
903 
904     ret = bdrv_get_info(bs, &bdi);
905     if (ret < 0 || bdi.cluster_size == 0) {
906         return bs->bl.request_alignment;
907     } else {
908         return bdi.cluster_size;
909     }
910 }
911 
912 void bdrv_inc_in_flight(BlockDriverState *bs)
913 {
914     qatomic_inc(&bs->in_flight);
915 }
916 
917 void bdrv_wakeup(BlockDriverState *bs)
918 {
919     aio_wait_kick();
920 }
921 
922 void bdrv_dec_in_flight(BlockDriverState *bs)
923 {
924     qatomic_dec(&bs->in_flight);
925     bdrv_wakeup(bs);
926 }
927 
928 static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
929 {
930     BlockDriverState *bs = self->bs;
931     bool waited = false;
932 
933     if (!qatomic_read(&bs->serialising_in_flight)) {
934         return false;
935     }
936 
937     qemu_co_mutex_lock(&bs->reqs_lock);
938     waited = bdrv_wait_serialising_requests_locked(self);
939     qemu_co_mutex_unlock(&bs->reqs_lock);
940 
941     return waited;
942 }
943 
944 bool coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
945                                                 uint64_t align)
946 {
947     bool waited;
948 
949     qemu_co_mutex_lock(&req->bs->reqs_lock);
950 
951     tracked_request_set_serialising(req, align);
952     waited = bdrv_wait_serialising_requests_locked(req);
953 
954     qemu_co_mutex_unlock(&req->bs->reqs_lock);
955 
956     return waited;
957 }
958 
959 static int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
960                                    QEMUIOVector *qiov, size_t qiov_offset,
961                                    Error **errp)
962 {
963     /*
964      * Check generic offset/bytes correctness
965      */
966 
967     if (offset < 0) {
968         error_setg(errp, "offset is negative: %" PRIi64, offset);
969         return -EIO;
970     }
971 
972     if (bytes < 0) {
973         error_setg(errp, "bytes is negative: %" PRIi64, bytes);
974         return -EIO;
975     }
976 
977     if (bytes > BDRV_MAX_LENGTH) {
978         error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
979                    bytes, BDRV_MAX_LENGTH);
980         return -EIO;
981     }
982 
983     if (offset > BDRV_MAX_LENGTH) {
984         error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
985                    offset, BDRV_MAX_LENGTH);
986         return -EIO;
987     }
988 
989     if (offset > BDRV_MAX_LENGTH - bytes) {
990         error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
991                    "exceeds maximum(%" PRIi64 ")", offset, bytes,
992                    BDRV_MAX_LENGTH);
993         return -EIO;
994     }
995 
996     if (!qiov) {
997         return 0;
998     }
999 
1000     /*
1001      * Check qiov and qiov_offset
1002      */
1003 
1004     if (qiov_offset > qiov->size) {
1005         error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
1006                    qiov_offset, qiov->size);
1007         return -EIO;
1008     }
1009 
1010     if (bytes > qiov->size - qiov_offset) {
1011         error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
1012                    "vector size(%zu)", bytes, qiov_offset, qiov->size);
1013         return -EIO;
1014     }
1015 
1016     return 0;
1017 }
1018 
1019 int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
1020 {
1021     return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
1022 }
1023 
1024 static int bdrv_check_request32(int64_t offset, int64_t bytes,
1025                                 QEMUIOVector *qiov, size_t qiov_offset)
1026 {
1027     int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
1028     if (ret < 0) {
1029         return ret;
1030     }
1031 
1032     if (bytes > BDRV_REQUEST_MAX_BYTES) {
1033         return -EIO;
1034     }
1035 
1036     return 0;
1037 }
1038 
1039 int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
1040                        int64_t bytes, BdrvRequestFlags flags)
1041 {
1042     return bdrv_pwritev(child, offset, bytes, NULL,
1043                         BDRV_REQ_ZERO_WRITE | flags);
1044 }
1045 
1046 /*
1047  * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
1048  * The operation is sped up by checking the block status and only writing
1049  * zeroes to the device if they currently do not return zeroes. Optional
1050  * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
1051  * BDRV_REQ_FUA).
1052  *
1053  * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
1054  */
1055 int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
1056 {
1057     int ret;
1058     int64_t target_size, bytes, offset = 0;
1059     BlockDriverState *bs = child->bs;
1060 
1061     target_size = bdrv_getlength(bs);
1062     if (target_size < 0) {
1063         return target_size;
1064     }
1065 
1066     for (;;) {
1067         bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
1068         if (bytes <= 0) {
1069             return 0;
1070         }
1071         ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
1072         if (ret < 0) {
1073             return ret;
1074         }
1075         if (ret & BDRV_BLOCK_ZERO) {
1076             offset += bytes;
1077             continue;
1078         }
1079         ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
1080         if (ret < 0) {
1081             return ret;
1082         }
1083         offset += bytes;
1084     }
1085 }
1086 
1087 /* See bdrv_pwrite() for the return codes */
1088 int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int64_t bytes)
1089 {
1090     int ret;
1091     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1092 
1093     if (bytes < 0) {
1094         return -EINVAL;
1095     }
1096 
1097     ret = bdrv_preadv(child, offset, bytes, &qiov,  0);
1098 
1099     return ret < 0 ? ret : bytes;
1100 }
1101 
1102 /* Return no. of bytes on success or < 0 on error. Important errors are:
1103   -EIO         generic I/O error (may happen for all errors)
1104   -ENOMEDIUM   No media inserted.
1105   -EINVAL      Invalid offset or number of bytes
1106   -EACCES      Trying to write a read-only device
1107 */
1108 int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf,
1109                 int64_t bytes)
1110 {
1111     int ret;
1112     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1113 
1114     if (bytes < 0) {
1115         return -EINVAL;
1116     }
1117 
1118     ret = bdrv_pwritev(child, offset, bytes, &qiov, 0);
1119 
1120     return ret < 0 ? ret : bytes;
1121 }
1122 
1123 /*
1124  * Writes to the file and ensures that no writes are reordered across this
1125  * request (acts as a barrier)
1126  *
1127  * Returns 0 on success, -errno in error cases.
1128  */
1129 int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
1130                      const void *buf, int64_t count)
1131 {
1132     int ret;
1133 
1134     ret = bdrv_pwrite(child, offset, buf, count);
1135     if (ret < 0) {
1136         return ret;
1137     }
1138 
1139     ret = bdrv_flush(child->bs);
1140     if (ret < 0) {
1141         return ret;
1142     }
1143 
1144     return 0;
1145 }
1146 
1147 typedef struct CoroutineIOCompletion {
1148     Coroutine *coroutine;
1149     int ret;
1150 } CoroutineIOCompletion;
1151 
1152 static void bdrv_co_io_em_complete(void *opaque, int ret)
1153 {
1154     CoroutineIOCompletion *co = opaque;
1155 
1156     co->ret = ret;
1157     aio_co_wake(co->coroutine);
1158 }
1159 
1160 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
1161                                            int64_t offset, int64_t bytes,
1162                                            QEMUIOVector *qiov,
1163                                            size_t qiov_offset, int flags)
1164 {
1165     BlockDriver *drv = bs->drv;
1166     int64_t sector_num;
1167     unsigned int nb_sectors;
1168     QEMUIOVector local_qiov;
1169     int ret;
1170 
1171     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1172     assert(!(flags & ~BDRV_REQ_MASK));
1173     assert(!(flags & BDRV_REQ_NO_FALLBACK));
1174 
1175     if (!drv) {
1176         return -ENOMEDIUM;
1177     }
1178 
1179     if (drv->bdrv_co_preadv_part) {
1180         return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
1181                                         flags);
1182     }
1183 
1184     if (qiov_offset > 0 || bytes != qiov->size) {
1185         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1186         qiov = &local_qiov;
1187     }
1188 
1189     if (drv->bdrv_co_preadv) {
1190         ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1191         goto out;
1192     }
1193 
1194     if (drv->bdrv_aio_preadv) {
1195         BlockAIOCB *acb;
1196         CoroutineIOCompletion co = {
1197             .coroutine = qemu_coroutine_self(),
1198         };
1199 
1200         acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
1201                                    bdrv_co_io_em_complete, &co);
1202         if (acb == NULL) {
1203             ret = -EIO;
1204             goto out;
1205         } else {
1206             qemu_coroutine_yield();
1207             ret = co.ret;
1208             goto out;
1209         }
1210     }
1211 
1212     sector_num = offset >> BDRV_SECTOR_BITS;
1213     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1214 
1215     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1216     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1217     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1218     assert(drv->bdrv_co_readv);
1219 
1220     ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1221 
1222 out:
1223     if (qiov == &local_qiov) {
1224         qemu_iovec_destroy(&local_qiov);
1225     }
1226 
1227     return ret;
1228 }
1229 
1230 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
1231                                             int64_t offset, int64_t bytes,
1232                                             QEMUIOVector *qiov,
1233                                             size_t qiov_offset, int flags)
1234 {
1235     BlockDriver *drv = bs->drv;
1236     int64_t sector_num;
1237     unsigned int nb_sectors;
1238     QEMUIOVector local_qiov;
1239     int ret;
1240 
1241     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1242     assert(!(flags & ~BDRV_REQ_MASK));
1243     assert(!(flags & BDRV_REQ_NO_FALLBACK));
1244 
1245     if (!drv) {
1246         return -ENOMEDIUM;
1247     }
1248 
1249     if (drv->bdrv_co_pwritev_part) {
1250         ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1251                                         flags & bs->supported_write_flags);
1252         flags &= ~bs->supported_write_flags;
1253         goto emulate_flags;
1254     }
1255 
1256     if (qiov_offset > 0 || bytes != qiov->size) {
1257         qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1258         qiov = &local_qiov;
1259     }
1260 
1261     if (drv->bdrv_co_pwritev) {
1262         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
1263                                    flags & bs->supported_write_flags);
1264         flags &= ~bs->supported_write_flags;
1265         goto emulate_flags;
1266     }
1267 
1268     if (drv->bdrv_aio_pwritev) {
1269         BlockAIOCB *acb;
1270         CoroutineIOCompletion co = {
1271             .coroutine = qemu_coroutine_self(),
1272         };
1273 
1274         acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov,
1275                                     flags & bs->supported_write_flags,
1276                                     bdrv_co_io_em_complete, &co);
1277         flags &= ~bs->supported_write_flags;
1278         if (acb == NULL) {
1279             ret = -EIO;
1280         } else {
1281             qemu_coroutine_yield();
1282             ret = co.ret;
1283         }
1284         goto emulate_flags;
1285     }
1286 
1287     sector_num = offset >> BDRV_SECTOR_BITS;
1288     nb_sectors = bytes >> BDRV_SECTOR_BITS;
1289 
1290     assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1291     assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
1292     assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1293 
1294     assert(drv->bdrv_co_writev);
1295     ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov,
1296                               flags & bs->supported_write_flags);
1297     flags &= ~bs->supported_write_flags;
1298 
1299 emulate_flags:
1300     if (ret == 0 && (flags & BDRV_REQ_FUA)) {
1301         ret = bdrv_co_flush(bs);
1302     }
1303 
1304     if (qiov == &local_qiov) {
1305         qemu_iovec_destroy(&local_qiov);
1306     }
1307 
1308     return ret;
1309 }
1310 
1311 static int coroutine_fn
1312 bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
1313                                int64_t bytes, QEMUIOVector *qiov,
1314                                size_t qiov_offset)
1315 {
1316     BlockDriver *drv = bs->drv;
1317     QEMUIOVector local_qiov;
1318     int ret;
1319 
1320     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1321 
1322     if (!drv) {
1323         return -ENOMEDIUM;
1324     }
1325 
1326     if (!block_driver_can_compress(drv)) {
1327         return -ENOTSUP;
1328     }
1329 
1330     if (drv->bdrv_co_pwritev_compressed_part) {
1331         return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1332                                                     qiov, qiov_offset);
1333     }
1334 
1335     if (qiov_offset == 0) {
1336         return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
1337     }
1338 
1339     qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1340     ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1341     qemu_iovec_destroy(&local_qiov);
1342 
1343     return ret;
1344 }
1345 
1346 static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
1347         int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1348         size_t qiov_offset, int flags)
1349 {
1350     BlockDriverState *bs = child->bs;
1351 
1352     /* Perform I/O through a temporary buffer so that users who scribble over
1353      * their read buffer while the operation is in progress do not end up
1354      * modifying the image file.  This is critical for zero-copy guest I/O
1355      * where anything might happen inside guest memory.
1356      */
1357     void *bounce_buffer = NULL;
1358 
1359     BlockDriver *drv = bs->drv;
1360     int64_t cluster_offset;
1361     int64_t cluster_bytes;
1362     int64_t skip_bytes;
1363     int ret;
1364     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1365                                     BDRV_REQUEST_MAX_BYTES);
1366     int64_t progress = 0;
1367     bool skip_write;
1368 
1369     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1370 
1371     if (!drv) {
1372         return -ENOMEDIUM;
1373     }
1374 
1375     /*
1376      * Do not write anything when the BDS is inactive.  That is not
1377      * allowed, and it would not help.
1378      */
1379     skip_write = (bs->open_flags & BDRV_O_INACTIVE);
1380 
1381     /* FIXME We cannot require callers to have write permissions when all they
1382      * are doing is a read request. If we did things right, write permissions
1383      * would be obtained anyway, but internally by the copy-on-read code. As
1384      * long as it is implemented here rather than in a separate filter driver,
1385      * the copy-on-read code doesn't have its own BdrvChild, however, for which
1386      * it could request permissions. Therefore we have to bypass the permission
1387      * system for the moment. */
1388     // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1389 
1390     /* Cover entire cluster so no additional backing file I/O is required when
1391      * allocating cluster in the image file.  Note that this value may exceed
1392      * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1393      * is one reason we loop rather than doing it all at once.
1394      */
1395     bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
1396     skip_bytes = offset - cluster_offset;
1397 
1398     trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1399                                    cluster_offset, cluster_bytes);
1400 
1401     while (cluster_bytes) {
1402         int64_t pnum;
1403 
1404         if (skip_write) {
1405             ret = 1; /* "already allocated", so nothing will be copied */
1406             pnum = MIN(cluster_bytes, max_transfer);
1407         } else {
1408             ret = bdrv_is_allocated(bs, cluster_offset,
1409                                     MIN(cluster_bytes, max_transfer), &pnum);
1410             if (ret < 0) {
1411                 /*
1412                  * Safe to treat errors in querying allocation as if
1413                  * unallocated; we'll probably fail again soon on the
1414                  * read, but at least that will set a decent errno.
1415                  */
1416                 pnum = MIN(cluster_bytes, max_transfer);
1417             }
1418 
1419             /* Stop at EOF if the image ends in the middle of the cluster */
1420             if (ret == 0 && pnum == 0) {
1421                 assert(progress >= bytes);
1422                 break;
1423             }
1424 
1425             assert(skip_bytes < pnum);
1426         }
1427 
1428         if (ret <= 0) {
1429             QEMUIOVector local_qiov;
1430 
1431             /* Must copy-on-read; use the bounce buffer */
1432             pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1433             if (!bounce_buffer) {
1434                 int64_t max_we_need = MAX(pnum, cluster_bytes - pnum);
1435                 int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
1436                 int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
1437 
1438                 bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
1439                 if (!bounce_buffer) {
1440                     ret = -ENOMEM;
1441                     goto err;
1442                 }
1443             }
1444             qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1445 
1446             ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1447                                      &local_qiov, 0, 0);
1448             if (ret < 0) {
1449                 goto err;
1450             }
1451 
1452             bdrv_debug_event(bs, BLKDBG_COR_WRITE);
1453             if (drv->bdrv_co_pwrite_zeroes &&
1454                 buffer_is_zero(bounce_buffer, pnum)) {
1455                 /* FIXME: Should we (perhaps conditionally) be setting
1456                  * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1457                  * that still correctly reads as zero? */
1458                 ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum,
1459                                                BDRV_REQ_WRITE_UNCHANGED);
1460             } else {
1461                 /* This does not change the data on the disk, it is not
1462                  * necessary to flush even in cache=writethrough mode.
1463                  */
1464                 ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1465                                           &local_qiov, 0,
1466                                           BDRV_REQ_WRITE_UNCHANGED);
1467             }
1468 
1469             if (ret < 0) {
1470                 /* It might be okay to ignore write errors for guest
1471                  * requests.  If this is a deliberate copy-on-read
1472                  * then we don't want to ignore the error.  Simply
1473                  * report it in all cases.
1474                  */
1475                 goto err;
1476             }
1477 
1478             if (!(flags & BDRV_REQ_PREFETCH)) {
1479                 qemu_iovec_from_buf(qiov, qiov_offset + progress,
1480                                     bounce_buffer + skip_bytes,
1481                                     MIN(pnum - skip_bytes, bytes - progress));
1482             }
1483         } else if (!(flags & BDRV_REQ_PREFETCH)) {
1484             /* Read directly into the destination */
1485             ret = bdrv_driver_preadv(bs, offset + progress,
1486                                      MIN(pnum - skip_bytes, bytes - progress),
1487                                      qiov, qiov_offset + progress, 0);
1488             if (ret < 0) {
1489                 goto err;
1490             }
1491         }
1492 
1493         cluster_offset += pnum;
1494         cluster_bytes -= pnum;
1495         progress += pnum - skip_bytes;
1496         skip_bytes = 0;
1497     }
1498     ret = 0;
1499 
1500 err:
1501     qemu_vfree(bounce_buffer);
1502     return ret;
1503 }
1504 
1505 /*
1506  * Forwards an already correctly aligned request to the BlockDriver. This
1507  * handles copy on read, zeroing after EOF, and fragmentation of large
1508  * reads; any other features must be implemented by the caller.
1509  */
1510 static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
1511     BdrvTrackedRequest *req, int64_t offset, int64_t bytes,
1512     int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
1513 {
1514     BlockDriverState *bs = child->bs;
1515     int64_t total_bytes, max_bytes;
1516     int ret = 0;
1517     int64_t bytes_remaining = bytes;
1518     int max_transfer;
1519 
1520     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1521     assert(is_power_of_2(align));
1522     assert((offset & (align - 1)) == 0);
1523     assert((bytes & (align - 1)) == 0);
1524     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1525     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1526                                    align);
1527 
1528     /* TODO: We would need a per-BDS .supported_read_flags and
1529      * potential fallback support, if we ever implement any read flags
1530      * to pass through to drivers.  For now, there aren't any
1531      * passthrough flags.  */
1532     assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH)));
1533 
1534     /* Handle Copy on Read and associated serialisation */
1535     if (flags & BDRV_REQ_COPY_ON_READ) {
1536         /* If we touch the same cluster it counts as an overlap.  This
1537          * guarantees that allocating writes will be serialized and not race
1538          * with each other for the same cluster.  For example, in copy-on-read
1539          * it ensures that the CoR read and write operations are atomic and
1540          * guest writes cannot interleave between them. */
1541         bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
1542     } else {
1543         bdrv_wait_serialising_requests(req);
1544     }
1545 
1546     if (flags & BDRV_REQ_COPY_ON_READ) {
1547         int64_t pnum;
1548 
1549         /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1550         flags &= ~BDRV_REQ_COPY_ON_READ;
1551 
1552         ret = bdrv_is_allocated(bs, offset, bytes, &pnum);
1553         if (ret < 0) {
1554             goto out;
1555         }
1556 
1557         if (!ret || pnum != bytes) {
1558             ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
1559                                            qiov, qiov_offset, flags);
1560             goto out;
1561         } else if (flags & BDRV_REQ_PREFETCH) {
1562             goto out;
1563         }
1564     }
1565 
1566     /* Forward the request to the BlockDriver, possibly fragmenting it */
1567     total_bytes = bdrv_getlength(bs);
1568     if (total_bytes < 0) {
1569         ret = total_bytes;
1570         goto out;
1571     }
1572 
1573     assert(!(flags & ~bs->supported_read_flags));
1574 
1575     max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1576     if (bytes <= max_bytes && bytes <= max_transfer) {
1577         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
1578         goto out;
1579     }
1580 
1581     while (bytes_remaining) {
1582         int64_t num;
1583 
1584         if (max_bytes) {
1585             num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1586             assert(num);
1587 
1588             ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1589                                      num, qiov,
1590                                      qiov_offset + bytes - bytes_remaining,
1591                                      flags);
1592             max_bytes -= num;
1593         } else {
1594             num = bytes_remaining;
1595             ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1596                                     0, bytes_remaining);
1597         }
1598         if (ret < 0) {
1599             goto out;
1600         }
1601         bytes_remaining -= num;
1602     }
1603 
1604 out:
1605     return ret < 0 ? ret : 0;
1606 }
1607 
1608 /*
1609  * Request padding
1610  *
1611  *  |<---- align ----->|                     |<----- align ---->|
1612  *  |<- head ->|<------------- bytes ------------->|<-- tail -->|
1613  *  |          |       |                     |     |            |
1614  * -*----------$-------*-------- ... --------*-----$------------*---
1615  *  |          |       |                     |     |            |
1616  *  |          offset  |                     |     end          |
1617  *  ALIGN_DOWN(offset) ALIGN_UP(offset)      ALIGN_DOWN(end)   ALIGN_UP(end)
1618  *  [buf   ... )                             [tail_buf          )
1619  *
1620  * @buf is an aligned allocation needed to store @head and @tail paddings. @head
1621  * is placed at the beginning of @buf and @tail at the @end.
1622  *
1623  * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
1624  * around tail, if tail exists.
1625  *
1626  * @merge_reads is true for small requests,
1627  * if @buf_len == @head + bytes + @tail. In this case it is possible that both
1628  * head and tail exist but @buf_len == align and @tail_buf == @buf.
1629  */
1630 typedef struct BdrvRequestPadding {
1631     uint8_t *buf;
1632     size_t buf_len;
1633     uint8_t *tail_buf;
1634     size_t head;
1635     size_t tail;
1636     bool merge_reads;
1637     QEMUIOVector local_qiov;
1638 } BdrvRequestPadding;
1639 
1640 static bool bdrv_init_padding(BlockDriverState *bs,
1641                               int64_t offset, int64_t bytes,
1642                               BdrvRequestPadding *pad)
1643 {
1644     int64_t align = bs->bl.request_alignment;
1645     int64_t sum;
1646 
1647     bdrv_check_request(offset, bytes, &error_abort);
1648     assert(align <= INT_MAX); /* documented in block/block_int.h */
1649     assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
1650 
1651     memset(pad, 0, sizeof(*pad));
1652 
1653     pad->head = offset & (align - 1);
1654     pad->tail = ((offset + bytes) & (align - 1));
1655     if (pad->tail) {
1656         pad->tail = align - pad->tail;
1657     }
1658 
1659     if (!pad->head && !pad->tail) {
1660         return false;
1661     }
1662 
1663     assert(bytes); /* Nothing good in aligning zero-length requests */
1664 
1665     sum = pad->head + bytes + pad->tail;
1666     pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
1667     pad->buf = qemu_blockalign(bs, pad->buf_len);
1668     pad->merge_reads = sum == pad->buf_len;
1669     if (pad->tail) {
1670         pad->tail_buf = pad->buf + pad->buf_len - align;
1671     }
1672 
1673     return true;
1674 }
1675 
1676 static int bdrv_padding_rmw_read(BdrvChild *child,
1677                                  BdrvTrackedRequest *req,
1678                                  BdrvRequestPadding *pad,
1679                                  bool zero_middle)
1680 {
1681     QEMUIOVector local_qiov;
1682     BlockDriverState *bs = child->bs;
1683     uint64_t align = bs->bl.request_alignment;
1684     int ret;
1685 
1686     assert(req->serialising && pad->buf);
1687 
1688     if (pad->head || pad->merge_reads) {
1689         int64_t bytes = pad->merge_reads ? pad->buf_len : align;
1690 
1691         qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
1692 
1693         if (pad->head) {
1694             bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1695         }
1696         if (pad->merge_reads && pad->tail) {
1697             bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1698         }
1699         ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
1700                                   align, &local_qiov, 0, 0);
1701         if (ret < 0) {
1702             return ret;
1703         }
1704         if (pad->head) {
1705             bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1706         }
1707         if (pad->merge_reads && pad->tail) {
1708             bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1709         }
1710 
1711         if (pad->merge_reads) {
1712             goto zero_mem;
1713         }
1714     }
1715 
1716     if (pad->tail) {
1717         qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
1718 
1719         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1720         ret = bdrv_aligned_preadv(
1721                 child, req,
1722                 req->overlap_offset + req->overlap_bytes - align,
1723                 align, align, &local_qiov, 0, 0);
1724         if (ret < 0) {
1725             return ret;
1726         }
1727         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1728     }
1729 
1730 zero_mem:
1731     if (zero_middle) {
1732         memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
1733     }
1734 
1735     return 0;
1736 }
1737 
1738 static void bdrv_padding_destroy(BdrvRequestPadding *pad)
1739 {
1740     if (pad->buf) {
1741         qemu_vfree(pad->buf);
1742         qemu_iovec_destroy(&pad->local_qiov);
1743     }
1744     memset(pad, 0, sizeof(*pad));
1745 }
1746 
1747 /*
1748  * bdrv_pad_request
1749  *
1750  * Exchange request parameters with padded request if needed. Don't include RMW
1751  * read of padding, bdrv_padding_rmw_read() should be called separately if
1752  * needed.
1753  *
1754  * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
1755  *  - on function start they represent original request
1756  *  - on failure or when padding is not needed they are unchanged
1757  *  - on success when padding is needed they represent padded request
1758  */
1759 static int bdrv_pad_request(BlockDriverState *bs,
1760                             QEMUIOVector **qiov, size_t *qiov_offset,
1761                             int64_t *offset, int64_t *bytes,
1762                             BdrvRequestPadding *pad, bool *padded)
1763 {
1764     int ret;
1765 
1766     bdrv_check_qiov_request(*offset, *bytes, *qiov, *qiov_offset, &error_abort);
1767 
1768     if (!bdrv_init_padding(bs, *offset, *bytes, pad)) {
1769         if (padded) {
1770             *padded = false;
1771         }
1772         return 0;
1773     }
1774 
1775     ret = qemu_iovec_init_extended(&pad->local_qiov, pad->buf, pad->head,
1776                                    *qiov, *qiov_offset, *bytes,
1777                                    pad->buf + pad->buf_len - pad->tail,
1778                                    pad->tail);
1779     if (ret < 0) {
1780         bdrv_padding_destroy(pad);
1781         return ret;
1782     }
1783     *bytes += pad->head + pad->tail;
1784     *offset -= pad->head;
1785     *qiov = &pad->local_qiov;
1786     *qiov_offset = 0;
1787     if (padded) {
1788         *padded = true;
1789     }
1790 
1791     return 0;
1792 }
1793 
1794 int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1795     int64_t offset, int64_t bytes, QEMUIOVector *qiov,
1796     BdrvRequestFlags flags)
1797 {
1798     return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
1799 }
1800 
1801 int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
1802     int64_t offset, int64_t bytes,
1803     QEMUIOVector *qiov, size_t qiov_offset,
1804     BdrvRequestFlags flags)
1805 {
1806     BlockDriverState *bs = child->bs;
1807     BdrvTrackedRequest req;
1808     BdrvRequestPadding pad;
1809     int ret;
1810 
1811     trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
1812 
1813     if (!bdrv_is_inserted(bs)) {
1814         return -ENOMEDIUM;
1815     }
1816 
1817     ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
1818     if (ret < 0) {
1819         return ret;
1820     }
1821 
1822     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1823         /*
1824          * Aligning zero request is nonsense. Even if driver has special meaning
1825          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1826          * it to driver due to request_alignment.
1827          *
1828          * Still, no reason to return an error if someone do unaligned
1829          * zero-length read occasionally.
1830          */
1831         return 0;
1832     }
1833 
1834     bdrv_inc_in_flight(bs);
1835 
1836     /* Don't do copy-on-read if we read data before write operation */
1837     if (qatomic_read(&bs->copy_on_read)) {
1838         flags |= BDRV_REQ_COPY_ON_READ;
1839     }
1840 
1841     ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
1842                            NULL);
1843     if (ret < 0) {
1844         return ret;
1845     }
1846 
1847     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1848     ret = bdrv_aligned_preadv(child, &req, offset, bytes,
1849                               bs->bl.request_alignment,
1850                               qiov, qiov_offset, flags);
1851     tracked_request_end(&req);
1852     bdrv_dec_in_flight(bs);
1853 
1854     bdrv_padding_destroy(&pad);
1855 
1856     return ret;
1857 }
1858 
1859 static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1860     int64_t offset, int64_t bytes, BdrvRequestFlags flags)
1861 {
1862     BlockDriver *drv = bs->drv;
1863     QEMUIOVector qiov;
1864     void *buf = NULL;
1865     int ret = 0;
1866     bool need_flush = false;
1867     int head = 0;
1868     int tail = 0;
1869 
1870     int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1871     int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1872                         bs->bl.request_alignment);
1873     int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1874 
1875     bdrv_check_request(offset, bytes, &error_abort);
1876 
1877     if (!drv) {
1878         return -ENOMEDIUM;
1879     }
1880 
1881     if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1882         return -ENOTSUP;
1883     }
1884 
1885     assert(alignment % bs->bl.request_alignment == 0);
1886     head = offset % alignment;
1887     tail = (offset + bytes) % alignment;
1888     max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1889     assert(max_write_zeroes >= bs->bl.request_alignment);
1890 
1891     while (bytes > 0 && !ret) {
1892         int64_t num = bytes;
1893 
1894         /* Align request.  Block drivers can expect the "bulk" of the request
1895          * to be aligned, and that unaligned requests do not cross cluster
1896          * boundaries.
1897          */
1898         if (head) {
1899             /* Make a small request up to the first aligned sector. For
1900              * convenience, limit this request to max_transfer even if
1901              * we don't need to fall back to writes.  */
1902             num = MIN(MIN(bytes, max_transfer), alignment - head);
1903             head = (head + num) % alignment;
1904             assert(num < max_write_zeroes);
1905         } else if (tail && num > alignment) {
1906             /* Shorten the request to the last aligned sector.  */
1907             num -= tail;
1908         }
1909 
1910         /* limit request size */
1911         if (num > max_write_zeroes) {
1912             num = max_write_zeroes;
1913         }
1914 
1915         ret = -ENOTSUP;
1916         /* First try the efficient write zeroes operation */
1917         if (drv->bdrv_co_pwrite_zeroes) {
1918             ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1919                                              flags & bs->supported_zero_flags);
1920             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1921                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1922                 need_flush = true;
1923             }
1924         } else {
1925             assert(!bs->supported_zero_flags);
1926         }
1927 
1928         if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
1929             /* Fall back to bounce buffer if write zeroes is unsupported */
1930             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1931 
1932             if ((flags & BDRV_REQ_FUA) &&
1933                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1934                 /* No need for bdrv_driver_pwrite() to do a fallback
1935                  * flush on each chunk; use just one at the end */
1936                 write_flags &= ~BDRV_REQ_FUA;
1937                 need_flush = true;
1938             }
1939             num = MIN(num, max_transfer);
1940             if (buf == NULL) {
1941                 buf = qemu_try_blockalign0(bs, num);
1942                 if (buf == NULL) {
1943                     ret = -ENOMEM;
1944                     goto fail;
1945                 }
1946             }
1947             qemu_iovec_init_buf(&qiov, buf, num);
1948 
1949             ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
1950 
1951             /* Keep bounce buffer around if it is big enough for all
1952              * all future requests.
1953              */
1954             if (num < max_transfer) {
1955                 qemu_vfree(buf);
1956                 buf = NULL;
1957             }
1958         }
1959 
1960         offset += num;
1961         bytes -= num;
1962     }
1963 
1964 fail:
1965     if (ret == 0 && need_flush) {
1966         ret = bdrv_co_flush(bs);
1967     }
1968     qemu_vfree(buf);
1969     return ret;
1970 }
1971 
1972 static inline int coroutine_fn
1973 bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
1974                           BdrvTrackedRequest *req, int flags)
1975 {
1976     BlockDriverState *bs = child->bs;
1977 
1978     bdrv_check_request(offset, bytes, &error_abort);
1979 
1980     if (bdrv_is_read_only(bs)) {
1981         return -EPERM;
1982     }
1983 
1984     assert(!(bs->open_flags & BDRV_O_INACTIVE));
1985     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1986     assert(!(flags & ~BDRV_REQ_MASK));
1987     assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
1988 
1989     if (flags & BDRV_REQ_SERIALISING) {
1990         QEMU_LOCK_GUARD(&bs->reqs_lock);
1991 
1992         tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1993 
1994         if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1995             return -EBUSY;
1996         }
1997 
1998         bdrv_wait_serialising_requests_locked(req);
1999     } else {
2000         bdrv_wait_serialising_requests(req);
2001     }
2002 
2003     assert(req->overlap_offset <= offset);
2004     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
2005     assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
2006            child->perm & BLK_PERM_RESIZE);
2007 
2008     switch (req->type) {
2009     case BDRV_TRACKED_WRITE:
2010     case BDRV_TRACKED_DISCARD:
2011         if (flags & BDRV_REQ_WRITE_UNCHANGED) {
2012             assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
2013         } else {
2014             assert(child->perm & BLK_PERM_WRITE);
2015         }
2016         bdrv_write_threshold_check_write(bs, offset, bytes);
2017         return 0;
2018     case BDRV_TRACKED_TRUNCATE:
2019         assert(child->perm & BLK_PERM_RESIZE);
2020         return 0;
2021     default:
2022         abort();
2023     }
2024 }
2025 
2026 static inline void coroutine_fn
2027 bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
2028                          BdrvTrackedRequest *req, int ret)
2029 {
2030     int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
2031     BlockDriverState *bs = child->bs;
2032 
2033     bdrv_check_request(offset, bytes, &error_abort);
2034 
2035     qatomic_inc(&bs->write_gen);
2036 
2037     /*
2038      * Discard cannot extend the image, but in error handling cases, such as
2039      * when reverting a qcow2 cluster allocation, the discarded range can pass
2040      * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
2041      * here. Instead, just skip it, since semantically a discard request
2042      * beyond EOF cannot expand the image anyway.
2043      */
2044     if (ret == 0 &&
2045         (req->type == BDRV_TRACKED_TRUNCATE ||
2046          end_sector > bs->total_sectors) &&
2047         req->type != BDRV_TRACKED_DISCARD) {
2048         bs->total_sectors = end_sector;
2049         bdrv_parent_cb_resize(bs);
2050         bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
2051     }
2052     if (req->bytes) {
2053         switch (req->type) {
2054         case BDRV_TRACKED_WRITE:
2055             stat64_max(&bs->wr_highest_offset, offset + bytes);
2056             /* fall through, to set dirty bits */
2057         case BDRV_TRACKED_DISCARD:
2058             bdrv_set_dirty(bs, offset, bytes);
2059             break;
2060         default:
2061             break;
2062         }
2063     }
2064 }
2065 
2066 /*
2067  * Forwards an already correctly aligned write request to the BlockDriver,
2068  * after possibly fragmenting it.
2069  */
2070 static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
2071     BdrvTrackedRequest *req, int64_t offset, int64_t bytes,
2072     int64_t align, QEMUIOVector *qiov, size_t qiov_offset, int flags)
2073 {
2074     BlockDriverState *bs = child->bs;
2075     BlockDriver *drv = bs->drv;
2076     int ret;
2077 
2078     int64_t bytes_remaining = bytes;
2079     int max_transfer;
2080 
2081     bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
2082 
2083     if (!drv) {
2084         return -ENOMEDIUM;
2085     }
2086 
2087     if (bdrv_has_readonly_bitmaps(bs)) {
2088         return -EPERM;
2089     }
2090 
2091     assert(is_power_of_2(align));
2092     assert((offset & (align - 1)) == 0);
2093     assert((bytes & (align - 1)) == 0);
2094     max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
2095                                    align);
2096 
2097     ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
2098 
2099     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
2100         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
2101         qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
2102         flags |= BDRV_REQ_ZERO_WRITE;
2103         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
2104             flags |= BDRV_REQ_MAY_UNMAP;
2105         }
2106     }
2107 
2108     if (ret < 0) {
2109         /* Do nothing, write notifier decided to fail this request */
2110     } else if (flags & BDRV_REQ_ZERO_WRITE) {
2111         bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
2112         ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
2113     } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
2114         ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
2115                                              qiov, qiov_offset);
2116     } else if (bytes <= max_transfer) {
2117         bdrv_debug_event(bs, BLKDBG_PWRITEV);
2118         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
2119     } else {
2120         bdrv_debug_event(bs, BLKDBG_PWRITEV);
2121         while (bytes_remaining) {
2122             int num = MIN(bytes_remaining, max_transfer);
2123             int local_flags = flags;
2124 
2125             assert(num);
2126             if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
2127                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
2128                 /* If FUA is going to be emulated by flush, we only
2129                  * need to flush on the last iteration */
2130                 local_flags &= ~BDRV_REQ_FUA;
2131             }
2132 
2133             ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
2134                                       num, qiov,
2135                                       qiov_offset + bytes - bytes_remaining,
2136                                       local_flags);
2137             if (ret < 0) {
2138                 break;
2139             }
2140             bytes_remaining -= num;
2141         }
2142     }
2143     bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
2144 
2145     if (ret >= 0) {
2146         ret = 0;
2147     }
2148     bdrv_co_write_req_finish(child, offset, bytes, req, ret);
2149 
2150     return ret;
2151 }
2152 
2153 static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
2154                                                 int64_t offset,
2155                                                 int64_t bytes,
2156                                                 BdrvRequestFlags flags,
2157                                                 BdrvTrackedRequest *req)
2158 {
2159     BlockDriverState *bs = child->bs;
2160     QEMUIOVector local_qiov;
2161     uint64_t align = bs->bl.request_alignment;
2162     int ret = 0;
2163     bool padding;
2164     BdrvRequestPadding pad;
2165 
2166     padding = bdrv_init_padding(bs, offset, bytes, &pad);
2167     if (padding) {
2168         bdrv_make_request_serialising(req, align);
2169 
2170         bdrv_padding_rmw_read(child, req, &pad, true);
2171 
2172         if (pad.head || pad.merge_reads) {
2173             int64_t aligned_offset = offset & ~(align - 1);
2174             int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
2175 
2176             qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
2177             ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
2178                                        align, &local_qiov, 0,
2179                                        flags & ~BDRV_REQ_ZERO_WRITE);
2180             if (ret < 0 || pad.merge_reads) {
2181                 /* Error or all work is done */
2182                 goto out;
2183             }
2184             offset += write_bytes - pad.head;
2185             bytes -= write_bytes - pad.head;
2186         }
2187     }
2188 
2189     assert(!bytes || (offset & (align - 1)) == 0);
2190     if (bytes >= align) {
2191         /* Write the aligned part in the middle. */
2192         int64_t aligned_bytes = bytes & ~(align - 1);
2193         ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
2194                                    NULL, 0, flags);
2195         if (ret < 0) {
2196             goto out;
2197         }
2198         bytes -= aligned_bytes;
2199         offset += aligned_bytes;
2200     }
2201 
2202     assert(!bytes || (offset & (align - 1)) == 0);
2203     if (bytes) {
2204         assert(align == pad.tail + bytes);
2205 
2206         qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
2207         ret = bdrv_aligned_pwritev(child, req, offset, align, align,
2208                                    &local_qiov, 0,
2209                                    flags & ~BDRV_REQ_ZERO_WRITE);
2210     }
2211 
2212 out:
2213     bdrv_padding_destroy(&pad);
2214 
2215     return ret;
2216 }
2217 
2218 /*
2219  * Handle a write request in coroutine context
2220  */
2221 int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
2222     int64_t offset, int64_t bytes, QEMUIOVector *qiov,
2223     BdrvRequestFlags flags)
2224 {
2225     return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
2226 }
2227 
2228 int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
2229     int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
2230     BdrvRequestFlags flags)
2231 {
2232     BlockDriverState *bs = child->bs;
2233     BdrvTrackedRequest req;
2234     uint64_t align = bs->bl.request_alignment;
2235     BdrvRequestPadding pad;
2236     int ret;
2237     bool padded = false;
2238 
2239     trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
2240 
2241     if (!bdrv_is_inserted(bs)) {
2242         return -ENOMEDIUM;
2243     }
2244 
2245     ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
2246     if (ret < 0) {
2247         return ret;
2248     }
2249 
2250     /* If the request is misaligned then we can't make it efficient */
2251     if ((flags & BDRV_REQ_NO_FALLBACK) &&
2252         !QEMU_IS_ALIGNED(offset | bytes, align))
2253     {
2254         return -ENOTSUP;
2255     }
2256 
2257     if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2258         /*
2259          * Aligning zero request is nonsense. Even if driver has special meaning
2260          * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2261          * it to driver due to request_alignment.
2262          *
2263          * Still, no reason to return an error if someone do unaligned
2264          * zero-length write occasionally.
2265          */
2266         return 0;
2267     }
2268 
2269     if (!(flags & BDRV_REQ_ZERO_WRITE)) {
2270         /*
2271          * Pad request for following read-modify-write cycle.
2272          * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2273          * alignment only if there is no ZERO flag.
2274          */
2275         ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, &pad,
2276                                &padded);
2277         if (ret < 0) {
2278             return ret;
2279         }
2280     }
2281 
2282     bdrv_inc_in_flight(bs);
2283     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
2284 
2285     if (flags & BDRV_REQ_ZERO_WRITE) {
2286         assert(!padded);
2287         ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
2288         goto out;
2289     }
2290 
2291     if (padded) {
2292         /*
2293          * Request was unaligned to request_alignment and therefore
2294          * padded.  We are going to do read-modify-write, and must
2295          * serialize the request to prevent interactions of the
2296          * widened region with other transactions.
2297          */
2298         bdrv_make_request_serialising(&req, align);
2299         bdrv_padding_rmw_read(child, &req, &pad, false);
2300     }
2301 
2302     ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
2303                                qiov, qiov_offset, flags);
2304 
2305     bdrv_padding_destroy(&pad);
2306 
2307 out:
2308     tracked_request_end(&req);
2309     bdrv_dec_in_flight(bs);
2310 
2311     return ret;
2312 }
2313 
2314 int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2315                                        int64_t bytes, BdrvRequestFlags flags)
2316 {
2317     trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
2318 
2319     if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
2320         flags &= ~BDRV_REQ_MAY_UNMAP;
2321     }
2322 
2323     return bdrv_co_pwritev(child, offset, bytes, NULL,
2324                            BDRV_REQ_ZERO_WRITE | flags);
2325 }
2326 
2327 /*
2328  * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
2329  */
2330 int bdrv_flush_all(void)
2331 {
2332     BdrvNextIterator it;
2333     BlockDriverState *bs = NULL;
2334     int result = 0;
2335 
2336     /*
2337      * bdrv queue is managed by record/replay,
2338      * creating new flush request for stopping
2339      * the VM may break the determinism
2340      */
2341     if (replay_events_enabled()) {
2342         return result;
2343     }
2344 
2345     for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2346         AioContext *aio_context = bdrv_get_aio_context(bs);
2347         int ret;
2348 
2349         aio_context_acquire(aio_context);
2350         ret = bdrv_flush(bs);
2351         if (ret < 0 && !result) {
2352             result = ret;
2353         }
2354         aio_context_release(aio_context);
2355     }
2356 
2357     return result;
2358 }
2359 
2360 /*
2361  * Returns the allocation status of the specified sectors.
2362  * Drivers not implementing the functionality are assumed to not support
2363  * backing files, hence all their sectors are reported as allocated.
2364  *
2365  * If 'want_zero' is true, the caller is querying for mapping
2366  * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
2367  * _ZERO where possible; otherwise, the result favors larger 'pnum',
2368  * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2369  *
2370  * If 'offset' is beyond the end of the disk image the return value is
2371  * BDRV_BLOCK_EOF and 'pnum' is set to 0.
2372  *
2373  * 'bytes' is the max value 'pnum' should be set to.  If bytes goes
2374  * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2375  * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
2376  *
2377  * 'pnum' is set to the number of bytes (including and immediately
2378  * following the specified offset) that are easily known to be in the
2379  * same allocated/unallocated state.  Note that a second call starting
2380  * at the original offset plus returned pnum may have the same status.
2381  * The returned value is non-zero on success except at end-of-file.
2382  *
2383  * Returns negative errno on failure.  Otherwise, if the
2384  * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
2385  * set to the host mapping and BDS corresponding to the guest offset.
2386  */
2387 static int coroutine_fn bdrv_co_block_status(BlockDriverState *bs,
2388                                              bool want_zero,
2389                                              int64_t offset, int64_t bytes,
2390                                              int64_t *pnum, int64_t *map,
2391                                              BlockDriverState **file)
2392 {
2393     int64_t total_size;
2394     int64_t n; /* bytes */
2395     int ret;
2396     int64_t local_map = 0;
2397     BlockDriverState *local_file = NULL;
2398     int64_t aligned_offset, aligned_bytes;
2399     uint32_t align;
2400     bool has_filtered_child;
2401 
2402     assert(pnum);
2403     *pnum = 0;
2404     total_size = bdrv_getlength(bs);
2405     if (total_size < 0) {
2406         ret = total_size;
2407         goto early_out;
2408     }
2409 
2410     if (offset >= total_size) {
2411         ret = BDRV_BLOCK_EOF;
2412         goto early_out;
2413     }
2414     if (!bytes) {
2415         ret = 0;
2416         goto early_out;
2417     }
2418 
2419     n = total_size - offset;
2420     if (n < bytes) {
2421         bytes = n;
2422     }
2423 
2424     /* Must be non-NULL or bdrv_getlength() would have failed */
2425     assert(bs->drv);
2426     has_filtered_child = bdrv_filter_child(bs);
2427     if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
2428         *pnum = bytes;
2429         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
2430         if (offset + bytes == total_size) {
2431             ret |= BDRV_BLOCK_EOF;
2432         }
2433         if (bs->drv->protocol_name) {
2434             ret |= BDRV_BLOCK_OFFSET_VALID;
2435             local_map = offset;
2436             local_file = bs;
2437         }
2438         goto early_out;
2439     }
2440 
2441     bdrv_inc_in_flight(bs);
2442 
2443     /* Round out to request_alignment boundaries */
2444     align = bs->bl.request_alignment;
2445     aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2446     aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2447 
2448     if (bs->drv->bdrv_co_block_status) {
2449         ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
2450                                             aligned_bytes, pnum, &local_map,
2451                                             &local_file);
2452     } else {
2453         /* Default code for filters */
2454 
2455         local_file = bdrv_filter_bs(bs);
2456         assert(local_file);
2457 
2458         *pnum = aligned_bytes;
2459         local_map = aligned_offset;
2460         ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2461     }
2462     if (ret < 0) {
2463         *pnum = 0;
2464         goto out;
2465     }
2466 
2467     /*
2468      * The driver's result must be a non-zero multiple of request_alignment.
2469      * Clamp pnum and adjust map to original request.
2470      */
2471     assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2472            align > offset - aligned_offset);
2473     if (ret & BDRV_BLOCK_RECURSE) {
2474         assert(ret & BDRV_BLOCK_DATA);
2475         assert(ret & BDRV_BLOCK_OFFSET_VALID);
2476         assert(!(ret & BDRV_BLOCK_ZERO));
2477     }
2478 
2479     *pnum -= offset - aligned_offset;
2480     if (*pnum > bytes) {
2481         *pnum = bytes;
2482     }
2483     if (ret & BDRV_BLOCK_OFFSET_VALID) {
2484         local_map += offset - aligned_offset;
2485     }
2486 
2487     if (ret & BDRV_BLOCK_RAW) {
2488         assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2489         ret = bdrv_co_block_status(local_file, want_zero, local_map,
2490                                    *pnum, pnum, &local_map, &local_file);
2491         goto out;
2492     }
2493 
2494     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
2495         ret |= BDRV_BLOCK_ALLOCATED;
2496     } else if (bs->drv->supports_backing) {
2497         BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2498 
2499         if (!cow_bs) {
2500             ret |= BDRV_BLOCK_ZERO;
2501         } else if (want_zero) {
2502             int64_t size2 = bdrv_getlength(cow_bs);
2503 
2504             if (size2 >= 0 && offset >= size2) {
2505                 ret |= BDRV_BLOCK_ZERO;
2506             }
2507         }
2508     }
2509 
2510     if (want_zero && ret & BDRV_BLOCK_RECURSE &&
2511         local_file && local_file != bs &&
2512         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
2513         (ret & BDRV_BLOCK_OFFSET_VALID)) {
2514         int64_t file_pnum;
2515         int ret2;
2516 
2517         ret2 = bdrv_co_block_status(local_file, want_zero, local_map,
2518                                     *pnum, &file_pnum, NULL, NULL);
2519         if (ret2 >= 0) {
2520             /* Ignore errors.  This is just providing extra information, it
2521              * is useful but not necessary.
2522              */
2523             if (ret2 & BDRV_BLOCK_EOF &&
2524                 (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2525                 /*
2526                  * It is valid for the format block driver to read
2527                  * beyond the end of the underlying file's current
2528                  * size; such areas read as zero.
2529                  */
2530                 ret |= BDRV_BLOCK_ZERO;
2531             } else {
2532                 /* Limit request to the range reported by the protocol driver */
2533                 *pnum = file_pnum;
2534                 ret |= (ret2 & BDRV_BLOCK_ZERO);
2535             }
2536         }
2537     }
2538 
2539 out:
2540     bdrv_dec_in_flight(bs);
2541     if (ret >= 0 && offset + *pnum == total_size) {
2542         ret |= BDRV_BLOCK_EOF;
2543     }
2544 early_out:
2545     if (file) {
2546         *file = local_file;
2547     }
2548     if (map) {
2549         *map = local_map;
2550     }
2551     return ret;
2552 }
2553 
2554 int coroutine_fn
2555 bdrv_co_common_block_status_above(BlockDriverState *bs,
2556                                   BlockDriverState *base,
2557                                   bool include_base,
2558                                   bool want_zero,
2559                                   int64_t offset,
2560                                   int64_t bytes,
2561                                   int64_t *pnum,
2562                                   int64_t *map,
2563                                   BlockDriverState **file,
2564                                   int *depth)
2565 {
2566     int ret;
2567     BlockDriverState *p;
2568     int64_t eof = 0;
2569     int dummy;
2570 
2571     assert(!include_base || base); /* Can't include NULL base */
2572 
2573     if (!depth) {
2574         depth = &dummy;
2575     }
2576     *depth = 0;
2577 
2578     if (!include_base && bs == base) {
2579         *pnum = bytes;
2580         return 0;
2581     }
2582 
2583     ret = bdrv_co_block_status(bs, want_zero, offset, bytes, pnum, map, file);
2584     ++*depth;
2585     if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
2586         return ret;
2587     }
2588 
2589     if (ret & BDRV_BLOCK_EOF) {
2590         eof = offset + *pnum;
2591     }
2592 
2593     assert(*pnum <= bytes);
2594     bytes = *pnum;
2595 
2596     for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
2597          p = bdrv_filter_or_cow_bs(p))
2598     {
2599         ret = bdrv_co_block_status(p, want_zero, offset, bytes, pnum, map,
2600                                    file);
2601         ++*depth;
2602         if (ret < 0) {
2603             return ret;
2604         }
2605         if (*pnum == 0) {
2606             /*
2607              * The top layer deferred to this layer, and because this layer is
2608              * short, any zeroes that we synthesize beyond EOF behave as if they
2609              * were allocated at this layer.
2610              *
2611              * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
2612              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2613              * below.
2614              */
2615             assert(ret & BDRV_BLOCK_EOF);
2616             *pnum = bytes;
2617             if (file) {
2618                 *file = p;
2619             }
2620             ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2621             break;
2622         }
2623         if (ret & BDRV_BLOCK_ALLOCATED) {
2624             /*
2625              * We've found the node and the status, we must break.
2626              *
2627              * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
2628              * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
2629              * below.
2630              */
2631             ret &= ~BDRV_BLOCK_EOF;
2632             break;
2633         }
2634 
2635         if (p == base) {
2636             assert(include_base);
2637             break;
2638         }
2639 
2640         /*
2641          * OK, [offset, offset + *pnum) region is unallocated on this layer,
2642          * let's continue the diving.
2643          */
2644         assert(*pnum <= bytes);
2645         bytes = *pnum;
2646     }
2647 
2648     if (offset + *pnum == eof) {
2649         ret |= BDRV_BLOCK_EOF;
2650     }
2651 
2652     return ret;
2653 }
2654 
2655 int bdrv_block_status_above(BlockDriverState *bs, BlockDriverState *base,
2656                             int64_t offset, int64_t bytes, int64_t *pnum,
2657                             int64_t *map, BlockDriverState **file)
2658 {
2659     return bdrv_common_block_status_above(bs, base, false, true, offset, bytes,
2660                                           pnum, map, file, NULL);
2661 }
2662 
2663 int bdrv_block_status(BlockDriverState *bs, int64_t offset, int64_t bytes,
2664                       int64_t *pnum, int64_t *map, BlockDriverState **file)
2665 {
2666     return bdrv_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
2667                                    offset, bytes, pnum, map, file);
2668 }
2669 
2670 /*
2671  * Check @bs (and its backing chain) to see if the range defined
2672  * by @offset and @bytes is known to read as zeroes.
2673  * Return 1 if that is the case, 0 otherwise and -errno on error.
2674  * This test is meant to be fast rather than accurate so returning 0
2675  * does not guarantee non-zero data.
2676  */
2677 int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
2678                                       int64_t bytes)
2679 {
2680     int ret;
2681     int64_t pnum = bytes;
2682 
2683     if (!bytes) {
2684         return 1;
2685     }
2686 
2687     ret = bdrv_common_block_status_above(bs, NULL, false, false, offset,
2688                                          bytes, &pnum, NULL, NULL, NULL);
2689 
2690     if (ret < 0) {
2691         return ret;
2692     }
2693 
2694     return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
2695 }
2696 
2697 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
2698                                    int64_t bytes, int64_t *pnum)
2699 {
2700     int ret;
2701     int64_t dummy;
2702 
2703     ret = bdrv_common_block_status_above(bs, bs, true, false, offset,
2704                                          bytes, pnum ? pnum : &dummy, NULL,
2705                                          NULL, NULL);
2706     if (ret < 0) {
2707         return ret;
2708     }
2709     return !!(ret & BDRV_BLOCK_ALLOCATED);
2710 }
2711 
2712 /*
2713  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2714  *
2715  * Return a positive depth if (a prefix of) the given range is allocated
2716  * in any image between BASE and TOP (BASE is only included if include_base
2717  * is set).  Depth 1 is TOP, 2 is the first backing layer, and so forth.
2718  * BASE can be NULL to check if the given offset is allocated in any
2719  * image of the chain.  Return 0 otherwise, or negative errno on
2720  * failure.
2721  *
2722  * 'pnum' is set to the number of bytes (including and immediately
2723  * following the specified offset) that are known to be in the same
2724  * allocated/unallocated state.  Note that a subsequent call starting
2725  * at 'offset + *pnum' may return the same allocation status (in other
2726  * words, the result is not necessarily the maximum possible range);
2727  * but 'pnum' will only be 0 when end of file is reached.
2728  */
2729 int bdrv_is_allocated_above(BlockDriverState *top,
2730                             BlockDriverState *base,
2731                             bool include_base, int64_t offset,
2732                             int64_t bytes, int64_t *pnum)
2733 {
2734     int depth;
2735     int ret = bdrv_common_block_status_above(top, base, include_base, false,
2736                                              offset, bytes, pnum, NULL, NULL,
2737                                              &depth);
2738     if (ret < 0) {
2739         return ret;
2740     }
2741 
2742     if (ret & BDRV_BLOCK_ALLOCATED) {
2743         return depth;
2744     }
2745     return 0;
2746 }
2747 
2748 int coroutine_fn
2749 bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2750 {
2751     BlockDriver *drv = bs->drv;
2752     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2753     int ret = -ENOTSUP;
2754 
2755     if (!drv) {
2756         return -ENOMEDIUM;
2757     }
2758 
2759     bdrv_inc_in_flight(bs);
2760 
2761     if (drv->bdrv_load_vmstate) {
2762         ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2763     } else if (child_bs) {
2764         ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
2765     }
2766 
2767     bdrv_dec_in_flight(bs);
2768 
2769     return ret;
2770 }
2771 
2772 int coroutine_fn
2773 bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2774 {
2775     BlockDriver *drv = bs->drv;
2776     BlockDriverState *child_bs = bdrv_primary_bs(bs);
2777     int ret = -ENOTSUP;
2778 
2779     if (!drv) {
2780         return -ENOMEDIUM;
2781     }
2782 
2783     bdrv_inc_in_flight(bs);
2784 
2785     if (drv->bdrv_save_vmstate) {
2786         ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2787     } else if (child_bs) {
2788         ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2789     }
2790 
2791     bdrv_dec_in_flight(bs);
2792 
2793     return ret;
2794 }
2795 
2796 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2797                       int64_t pos, int size)
2798 {
2799     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2800     int ret = bdrv_writev_vmstate(bs, &qiov, pos);
2801 
2802     return ret < 0 ? ret : size;
2803 }
2804 
2805 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2806                       int64_t pos, int size)
2807 {
2808     QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2809     int ret = bdrv_readv_vmstate(bs, &qiov, pos);
2810 
2811     return ret < 0 ? ret : size;
2812 }
2813 
2814 /**************************************************************/
2815 /* async I/Os */
2816 
2817 void bdrv_aio_cancel(BlockAIOCB *acb)
2818 {
2819     qemu_aio_ref(acb);
2820     bdrv_aio_cancel_async(acb);
2821     while (acb->refcnt > 1) {
2822         if (acb->aiocb_info->get_aio_context) {
2823             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2824         } else if (acb->bs) {
2825             /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2826              * assert that we're not using an I/O thread.  Thread-safe
2827              * code should use bdrv_aio_cancel_async exclusively.
2828              */
2829             assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
2830             aio_poll(bdrv_get_aio_context(acb->bs), true);
2831         } else {
2832             abort();
2833         }
2834     }
2835     qemu_aio_unref(acb);
2836 }
2837 
2838 /* Async version of aio cancel. The caller is not blocked if the acb implements
2839  * cancel_async, otherwise we do nothing and let the request normally complete.
2840  * In either case the completion callback must be called. */
2841 void bdrv_aio_cancel_async(BlockAIOCB *acb)
2842 {
2843     if (acb->aiocb_info->cancel_async) {
2844         acb->aiocb_info->cancel_async(acb);
2845     }
2846 }
2847 
2848 /**************************************************************/
2849 /* Coroutine block device emulation */
2850 
2851 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2852 {
2853     BdrvChild *primary_child = bdrv_primary_child(bs);
2854     BdrvChild *child;
2855     int current_gen;
2856     int ret = 0;
2857 
2858     bdrv_inc_in_flight(bs);
2859 
2860     if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2861         bdrv_is_sg(bs)) {
2862         goto early_exit;
2863     }
2864 
2865     qemu_co_mutex_lock(&bs->reqs_lock);
2866     current_gen = qatomic_read(&bs->write_gen);
2867 
2868     /* Wait until any previous flushes are completed */
2869     while (bs->active_flush_req) {
2870         qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2871     }
2872 
2873     /* Flushes reach this point in nondecreasing current_gen order.  */
2874     bs->active_flush_req = true;
2875     qemu_co_mutex_unlock(&bs->reqs_lock);
2876 
2877     /* Write back all layers by calling one driver function */
2878     if (bs->drv->bdrv_co_flush) {
2879         ret = bs->drv->bdrv_co_flush(bs);
2880         goto out;
2881     }
2882 
2883     /* Write back cached data to the OS even with cache=unsafe */
2884     BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
2885     if (bs->drv->bdrv_co_flush_to_os) {
2886         ret = bs->drv->bdrv_co_flush_to_os(bs);
2887         if (ret < 0) {
2888             goto out;
2889         }
2890     }
2891 
2892     /* But don't actually force it to the disk with cache=unsafe */
2893     if (bs->open_flags & BDRV_O_NO_FLUSH) {
2894         goto flush_children;
2895     }
2896 
2897     /* Check if we really need to flush anything */
2898     if (bs->flushed_gen == current_gen) {
2899         goto flush_children;
2900     }
2901 
2902     BLKDBG_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
2903     if (!bs->drv) {
2904         /* bs->drv->bdrv_co_flush() might have ejected the BDS
2905          * (even in case of apparent success) */
2906         ret = -ENOMEDIUM;
2907         goto out;
2908     }
2909     if (bs->drv->bdrv_co_flush_to_disk) {
2910         ret = bs->drv->bdrv_co_flush_to_disk(bs);
2911     } else if (bs->drv->bdrv_aio_flush) {
2912         BlockAIOCB *acb;
2913         CoroutineIOCompletion co = {
2914             .coroutine = qemu_coroutine_self(),
2915         };
2916 
2917         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2918         if (acb == NULL) {
2919             ret = -EIO;
2920         } else {
2921             qemu_coroutine_yield();
2922             ret = co.ret;
2923         }
2924     } else {
2925         /*
2926          * Some block drivers always operate in either writethrough or unsafe
2927          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2928          * know how the server works (because the behaviour is hardcoded or
2929          * depends on server-side configuration), so we can't ensure that
2930          * everything is safe on disk. Returning an error doesn't work because
2931          * that would break guests even if the server operates in writethrough
2932          * mode.
2933          *
2934          * Let's hope the user knows what he's doing.
2935          */
2936         ret = 0;
2937     }
2938 
2939     if (ret < 0) {
2940         goto out;
2941     }
2942 
2943     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
2944      * in the case of cache=unsafe, so there are no useless flushes.
2945      */
2946 flush_children:
2947     ret = 0;
2948     QLIST_FOREACH(child, &bs->children, next) {
2949         if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
2950             int this_child_ret = bdrv_co_flush(child->bs);
2951             if (!ret) {
2952                 ret = this_child_ret;
2953             }
2954         }
2955     }
2956 
2957 out:
2958     /* Notify any pending flushes that we have completed */
2959     if (ret == 0) {
2960         bs->flushed_gen = current_gen;
2961     }
2962 
2963     qemu_co_mutex_lock(&bs->reqs_lock);
2964     bs->active_flush_req = false;
2965     /* Return value is ignored - it's ok if wait queue is empty */
2966     qemu_co_queue_next(&bs->flush_queue);
2967     qemu_co_mutex_unlock(&bs->reqs_lock);
2968 
2969 early_exit:
2970     bdrv_dec_in_flight(bs);
2971     return ret;
2972 }
2973 
2974 int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
2975                                   int64_t bytes)
2976 {
2977     BdrvTrackedRequest req;
2978     int max_pdiscard, ret;
2979     int head, tail, align;
2980     BlockDriverState *bs = child->bs;
2981 
2982     if (!bs || !bs->drv || !bdrv_is_inserted(bs)) {
2983         return -ENOMEDIUM;
2984     }
2985 
2986     if (bdrv_has_readonly_bitmaps(bs)) {
2987         return -EPERM;
2988     }
2989 
2990     ret = bdrv_check_request(offset, bytes, NULL);
2991     if (ret < 0) {
2992         return ret;
2993     }
2994 
2995     /* Do nothing if disabled.  */
2996     if (!(bs->open_flags & BDRV_O_UNMAP)) {
2997         return 0;
2998     }
2999 
3000     if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
3001         return 0;
3002     }
3003 
3004     /* Discard is advisory, but some devices track and coalesce
3005      * unaligned requests, so we must pass everything down rather than
3006      * round here.  Still, most devices will just silently ignore
3007      * unaligned requests (by returning -ENOTSUP), so we must fragment
3008      * the request accordingly.  */
3009     align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
3010     assert(align % bs->bl.request_alignment == 0);
3011     head = offset % align;
3012     tail = (offset + bytes) % align;
3013 
3014     bdrv_inc_in_flight(bs);
3015     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
3016 
3017     ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
3018     if (ret < 0) {
3019         goto out;
3020     }
3021 
3022     max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
3023                                    align);
3024     assert(max_pdiscard >= bs->bl.request_alignment);
3025 
3026     while (bytes > 0) {
3027         int64_t num = bytes;
3028 
3029         if (head) {
3030             /* Make small requests to get to alignment boundaries. */
3031             num = MIN(bytes, align - head);
3032             if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
3033                 num %= bs->bl.request_alignment;
3034             }
3035             head = (head + num) % align;
3036             assert(num < max_pdiscard);
3037         } else if (tail) {
3038             if (num > align) {
3039                 /* Shorten the request to the last aligned cluster.  */
3040                 num -= tail;
3041             } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
3042                        tail > bs->bl.request_alignment) {
3043                 tail %= bs->bl.request_alignment;
3044                 num -= tail;
3045             }
3046         }
3047         /* limit request size */
3048         if (num > max_pdiscard) {
3049             num = max_pdiscard;
3050         }
3051 
3052         if (!bs->drv) {
3053             ret = -ENOMEDIUM;
3054             goto out;
3055         }
3056         if (bs->drv->bdrv_co_pdiscard) {
3057             ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
3058         } else {
3059             BlockAIOCB *acb;
3060             CoroutineIOCompletion co = {
3061                 .coroutine = qemu_coroutine_self(),
3062             };
3063 
3064             acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
3065                                              bdrv_co_io_em_complete, &co);
3066             if (acb == NULL) {
3067                 ret = -EIO;
3068                 goto out;
3069             } else {
3070                 qemu_coroutine_yield();
3071                 ret = co.ret;
3072             }
3073         }
3074         if (ret && ret != -ENOTSUP) {
3075             goto out;
3076         }
3077 
3078         offset += num;
3079         bytes -= num;
3080     }
3081     ret = 0;
3082 out:
3083     bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
3084     tracked_request_end(&req);
3085     bdrv_dec_in_flight(bs);
3086     return ret;
3087 }
3088 
3089 int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
3090 {
3091     BlockDriver *drv = bs->drv;
3092     CoroutineIOCompletion co = {
3093         .coroutine = qemu_coroutine_self(),
3094     };
3095     BlockAIOCB *acb;
3096 
3097     bdrv_inc_in_flight(bs);
3098     if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
3099         co.ret = -ENOTSUP;
3100         goto out;
3101     }
3102 
3103     if (drv->bdrv_co_ioctl) {
3104         co.ret = drv->bdrv_co_ioctl(bs, req, buf);
3105     } else {
3106         acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
3107         if (!acb) {
3108             co.ret = -ENOTSUP;
3109             goto out;
3110         }
3111         qemu_coroutine_yield();
3112     }
3113 out:
3114     bdrv_dec_in_flight(bs);
3115     return co.ret;
3116 }
3117 
3118 void *qemu_blockalign(BlockDriverState *bs, size_t size)
3119 {
3120     return qemu_memalign(bdrv_opt_mem_align(bs), size);
3121 }
3122 
3123 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
3124 {
3125     return memset(qemu_blockalign(bs, size), 0, size);
3126 }
3127 
3128 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
3129 {
3130     size_t align = bdrv_opt_mem_align(bs);
3131 
3132     /* Ensure that NULL is never returned on success */
3133     assert(align > 0);
3134     if (size == 0) {
3135         size = align;
3136     }
3137 
3138     return qemu_try_memalign(align, size);
3139 }
3140 
3141 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
3142 {
3143     void *mem = qemu_try_blockalign(bs, size);
3144 
3145     if (mem) {
3146         memset(mem, 0, size);
3147     }
3148 
3149     return mem;
3150 }
3151 
3152 /*
3153  * Check if all memory in this vector is sector aligned.
3154  */
3155 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
3156 {
3157     int i;
3158     size_t alignment = bdrv_min_mem_align(bs);
3159 
3160     for (i = 0; i < qiov->niov; i++) {
3161         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
3162             return false;
3163         }
3164         if (qiov->iov[i].iov_len % alignment) {
3165             return false;
3166         }
3167     }
3168 
3169     return true;
3170 }
3171 
3172 void bdrv_io_plug(BlockDriverState *bs)
3173 {
3174     BdrvChild *child;
3175 
3176     QLIST_FOREACH(child, &bs->children, next) {
3177         bdrv_io_plug(child->bs);
3178     }
3179 
3180     if (qatomic_fetch_inc(&bs->io_plugged) == 0) {
3181         BlockDriver *drv = bs->drv;
3182         if (drv && drv->bdrv_io_plug) {
3183             drv->bdrv_io_plug(bs);
3184         }
3185     }
3186 }
3187 
3188 void bdrv_io_unplug(BlockDriverState *bs)
3189 {
3190     BdrvChild *child;
3191 
3192     assert(bs->io_plugged);
3193     if (qatomic_fetch_dec(&bs->io_plugged) == 1) {
3194         BlockDriver *drv = bs->drv;
3195         if (drv && drv->bdrv_io_unplug) {
3196             drv->bdrv_io_unplug(bs);
3197         }
3198     }
3199 
3200     QLIST_FOREACH(child, &bs->children, next) {
3201         bdrv_io_unplug(child->bs);
3202     }
3203 }
3204 
3205 void bdrv_register_buf(BlockDriverState *bs, void *host, size_t size)
3206 {
3207     BdrvChild *child;
3208 
3209     if (bs->drv && bs->drv->bdrv_register_buf) {
3210         bs->drv->bdrv_register_buf(bs, host, size);
3211     }
3212     QLIST_FOREACH(child, &bs->children, next) {
3213         bdrv_register_buf(child->bs, host, size);
3214     }
3215 }
3216 
3217 void bdrv_unregister_buf(BlockDriverState *bs, void *host)
3218 {
3219     BdrvChild *child;
3220 
3221     if (bs->drv && bs->drv->bdrv_unregister_buf) {
3222         bs->drv->bdrv_unregister_buf(bs, host);
3223     }
3224     QLIST_FOREACH(child, &bs->children, next) {
3225         bdrv_unregister_buf(child->bs, host);
3226     }
3227 }
3228 
3229 static int coroutine_fn bdrv_co_copy_range_internal(
3230         BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3231         int64_t dst_offset, int64_t bytes,
3232         BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3233         bool recurse_src)
3234 {
3235     BdrvTrackedRequest req;
3236     int ret;
3237 
3238     /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3239     assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3240     assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
3241 
3242     if (!dst || !dst->bs || !bdrv_is_inserted(dst->bs)) {
3243         return -ENOMEDIUM;
3244     }
3245     ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
3246     if (ret) {
3247         return ret;
3248     }
3249     if (write_flags & BDRV_REQ_ZERO_WRITE) {
3250         return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3251     }
3252 
3253     if (!src || !src->bs || !bdrv_is_inserted(src->bs)) {
3254         return -ENOMEDIUM;
3255     }
3256     ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
3257     if (ret) {
3258         return ret;
3259     }
3260 
3261     if (!src->bs->drv->bdrv_co_copy_range_from
3262         || !dst->bs->drv->bdrv_co_copy_range_to
3263         || src->bs->encrypted || dst->bs->encrypted) {
3264         return -ENOTSUP;
3265     }
3266 
3267     if (recurse_src) {
3268         bdrv_inc_in_flight(src->bs);
3269         tracked_request_begin(&req, src->bs, src_offset, bytes,
3270                               BDRV_TRACKED_READ);
3271 
3272         /* BDRV_REQ_SERIALISING is only for write operation */
3273         assert(!(read_flags & BDRV_REQ_SERIALISING));
3274         bdrv_wait_serialising_requests(&req);
3275 
3276         ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3277                                                     src, src_offset,
3278                                                     dst, dst_offset,
3279                                                     bytes,
3280                                                     read_flags, write_flags);
3281 
3282         tracked_request_end(&req);
3283         bdrv_dec_in_flight(src->bs);
3284     } else {
3285         bdrv_inc_in_flight(dst->bs);
3286         tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3287                               BDRV_TRACKED_WRITE);
3288         ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
3289                                         write_flags);
3290         if (!ret) {
3291             ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3292                                                       src, src_offset,
3293                                                       dst, dst_offset,
3294                                                       bytes,
3295                                                       read_flags, write_flags);
3296         }
3297         bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3298         tracked_request_end(&req);
3299         bdrv_dec_in_flight(dst->bs);
3300     }
3301 
3302     return ret;
3303 }
3304 
3305 /* Copy range from @src to @dst.
3306  *
3307  * See the comment of bdrv_co_copy_range for the parameter and return value
3308  * semantics. */
3309 int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3310                                          BdrvChild *dst, int64_t dst_offset,
3311                                          int64_t bytes,
3312                                          BdrvRequestFlags read_flags,
3313                                          BdrvRequestFlags write_flags)
3314 {
3315     trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3316                                   read_flags, write_flags);
3317     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3318                                        bytes, read_flags, write_flags, true);
3319 }
3320 
3321 /* Copy range from @src to @dst.
3322  *
3323  * See the comment of bdrv_co_copy_range for the parameter and return value
3324  * semantics. */
3325 int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3326                                        BdrvChild *dst, int64_t dst_offset,
3327                                        int64_t bytes,
3328                                        BdrvRequestFlags read_flags,
3329                                        BdrvRequestFlags write_flags)
3330 {
3331     trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3332                                 read_flags, write_flags);
3333     return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
3334                                        bytes, read_flags, write_flags, false);
3335 }
3336 
3337 int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3338                                     BdrvChild *dst, int64_t dst_offset,
3339                                     int64_t bytes, BdrvRequestFlags read_flags,
3340                                     BdrvRequestFlags write_flags)
3341 {
3342     return bdrv_co_copy_range_from(src, src_offset,
3343                                    dst, dst_offset,
3344                                    bytes, read_flags, write_flags);
3345 }
3346 
3347 static void bdrv_parent_cb_resize(BlockDriverState *bs)
3348 {
3349     BdrvChild *c;
3350     QLIST_FOREACH(c, &bs->parents, next_parent) {
3351         if (c->klass->resize) {
3352             c->klass->resize(c);
3353         }
3354     }
3355 }
3356 
3357 /**
3358  * Truncate file to 'offset' bytes (needed only for file protocols)
3359  *
3360  * If 'exact' is true, the file must be resized to exactly the given
3361  * 'offset'.  Otherwise, it is sufficient for the node to be at least
3362  * 'offset' bytes in length.
3363  */
3364 int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
3365                                   PreallocMode prealloc, BdrvRequestFlags flags,
3366                                   Error **errp)
3367 {
3368     BlockDriverState *bs = child->bs;
3369     BdrvChild *filtered, *backing;
3370     BlockDriver *drv = bs->drv;
3371     BdrvTrackedRequest req;
3372     int64_t old_size, new_bytes;
3373     int ret;
3374 
3375 
3376     /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
3377     if (!drv) {
3378         error_setg(errp, "No medium inserted");
3379         return -ENOMEDIUM;
3380     }
3381     if (offset < 0) {
3382         error_setg(errp, "Image size cannot be negative");
3383         return -EINVAL;
3384     }
3385 
3386     ret = bdrv_check_request(offset, 0, errp);
3387     if (ret < 0) {
3388         return ret;
3389     }
3390 
3391     old_size = bdrv_getlength(bs);
3392     if (old_size < 0) {
3393         error_setg_errno(errp, -old_size, "Failed to get old image size");
3394         return old_size;
3395     }
3396 
3397     if (bdrv_is_read_only(bs)) {
3398         error_setg(errp, "Image is read-only");
3399         return -EACCES;
3400     }
3401 
3402     if (offset > old_size) {
3403         new_bytes = offset - old_size;
3404     } else {
3405         new_bytes = 0;
3406     }
3407 
3408     bdrv_inc_in_flight(bs);
3409     tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
3410                           BDRV_TRACKED_TRUNCATE);
3411 
3412     /* If we are growing the image and potentially using preallocation for the
3413      * new area, we need to make sure that no write requests are made to it
3414      * concurrently or they might be overwritten by preallocation. */
3415     if (new_bytes) {
3416         bdrv_make_request_serialising(&req, 1);
3417     }
3418     ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3419                                     0);
3420     if (ret < 0) {
3421         error_setg_errno(errp, -ret,
3422                          "Failed to prepare request for truncation");
3423         goto out;
3424     }
3425 
3426     filtered = bdrv_filter_child(bs);
3427     backing = bdrv_cow_child(bs);
3428 
3429     /*
3430      * If the image has a backing file that is large enough that it would
3431      * provide data for the new area, we cannot leave it unallocated because
3432      * then the backing file content would become visible. Instead, zero-fill
3433      * the new area.
3434      *
3435      * Note that if the image has a backing file, but was opened without the
3436      * backing file, taking care of keeping things consistent with that backing
3437      * file is the user's responsibility.
3438      */
3439     if (new_bytes && backing) {
3440         int64_t backing_len;
3441 
3442         backing_len = bdrv_getlength(backing->bs);
3443         if (backing_len < 0) {
3444             ret = backing_len;
3445             error_setg_errno(errp, -ret, "Could not get backing file size");
3446             goto out;
3447         }
3448 
3449         if (backing_len > old_size) {
3450             flags |= BDRV_REQ_ZERO_WRITE;
3451         }
3452     }
3453 
3454     if (drv->bdrv_co_truncate) {
3455         if (flags & ~bs->supported_truncate_flags) {
3456             error_setg(errp, "Block driver does not support requested flags");
3457             ret = -ENOTSUP;
3458             goto out;
3459         }
3460         ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
3461     } else if (filtered) {
3462         ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
3463     } else {
3464         error_setg(errp, "Image format driver does not support resize");
3465         ret = -ENOTSUP;
3466         goto out;
3467     }
3468     if (ret < 0) {
3469         goto out;
3470     }
3471 
3472     ret = refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
3473     if (ret < 0) {
3474         error_setg_errno(errp, -ret, "Could not refresh total sector count");
3475     } else {
3476         offset = bs->total_sectors * BDRV_SECTOR_SIZE;
3477     }
3478     /* It's possible that truncation succeeded but refresh_total_sectors
3479      * failed, but the latter doesn't affect how we should finish the request.
3480      * Pass 0 as the last parameter so that dirty bitmaps etc. are handled. */
3481     bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
3482 
3483 out:
3484     tracked_request_end(&req);
3485     bdrv_dec_in_flight(bs);
3486 
3487     return ret;
3488 }
3489 
3490 void bdrv_cancel_in_flight(BlockDriverState *bs)
3491 {
3492     if (!bs || !bs->drv) {
3493         return;
3494     }
3495 
3496     if (bs->drv->bdrv_cancel_in_flight) {
3497         bs->drv->bdrv_cancel_in_flight(bs);
3498     }
3499 }
3500