xref: /openbmc/qemu/block/io.c (revision c0bea68f9ea48f0dea7a06a259a613bfd3a7e35e)
1 /*
2  * Block layer I/O functions
3  *
4  * Copyright (c) 2003 Fabrice Bellard
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 
25 #include "qemu/osdep.h"
26 #include "trace.h"
27 #include "sysemu/block-backend.h"
28 #include "block/blockjob.h"
29 #include "block/block_int.h"
30 #include "qemu/cutils.h"
31 #include "qapi/error.h"
32 #include "qemu/error-report.h"
33 
34 #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
35 
36 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
37                                          int64_t sector_num,
38                                          QEMUIOVector *qiov,
39                                          int nb_sectors,
40                                          BdrvRequestFlags flags,
41                                          BlockCompletionFunc *cb,
42                                          void *opaque,
43                                          bool is_write);
44 static void coroutine_fn bdrv_co_do_rw(void *opaque);
45 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
46     int64_t sector_num, int nb_sectors, BdrvRequestFlags flags);
47 
48 static void bdrv_parent_drained_begin(BlockDriverState *bs)
49 {
50     BdrvChild *c;
51 
52     QLIST_FOREACH(c, &bs->parents, next_parent) {
53         if (c->role->drained_begin) {
54             c->role->drained_begin(c);
55         }
56     }
57 }
58 
59 static void bdrv_parent_drained_end(BlockDriverState *bs)
60 {
61     BdrvChild *c;
62 
63     QLIST_FOREACH(c, &bs->parents, next_parent) {
64         if (c->role->drained_end) {
65             c->role->drained_end(c);
66         }
67     }
68 }
69 
70 void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
71 {
72     BlockDriver *drv = bs->drv;
73     Error *local_err = NULL;
74 
75     memset(&bs->bl, 0, sizeof(bs->bl));
76 
77     if (!drv) {
78         return;
79     }
80 
81     /* Take some limits from the children as a default */
82     if (bs->file) {
83         bdrv_refresh_limits(bs->file->bs, &local_err);
84         if (local_err) {
85             error_propagate(errp, local_err);
86             return;
87         }
88         bs->bl.opt_transfer_length = bs->file->bs->bl.opt_transfer_length;
89         bs->bl.max_transfer_length = bs->file->bs->bl.max_transfer_length;
90         bs->bl.min_mem_alignment = bs->file->bs->bl.min_mem_alignment;
91         bs->bl.opt_mem_alignment = bs->file->bs->bl.opt_mem_alignment;
92         bs->bl.max_iov = bs->file->bs->bl.max_iov;
93     } else {
94         bs->bl.min_mem_alignment = 512;
95         bs->bl.opt_mem_alignment = getpagesize();
96 
97         /* Safe default since most protocols use readv()/writev()/etc */
98         bs->bl.max_iov = IOV_MAX;
99     }
100 
101     if (bs->backing) {
102         bdrv_refresh_limits(bs->backing->bs, &local_err);
103         if (local_err) {
104             error_propagate(errp, local_err);
105             return;
106         }
107         bs->bl.opt_transfer_length =
108             MAX(bs->bl.opt_transfer_length,
109                 bs->backing->bs->bl.opt_transfer_length);
110         bs->bl.max_transfer_length =
111             MIN_NON_ZERO(bs->bl.max_transfer_length,
112                          bs->backing->bs->bl.max_transfer_length);
113         bs->bl.opt_mem_alignment =
114             MAX(bs->bl.opt_mem_alignment,
115                 bs->backing->bs->bl.opt_mem_alignment);
116         bs->bl.min_mem_alignment =
117             MAX(bs->bl.min_mem_alignment,
118                 bs->backing->bs->bl.min_mem_alignment);
119         bs->bl.max_iov =
120             MIN(bs->bl.max_iov,
121                 bs->backing->bs->bl.max_iov);
122     }
123 
124     /* Then let the driver override it */
125     if (drv->bdrv_refresh_limits) {
126         drv->bdrv_refresh_limits(bs, errp);
127     }
128 }
129 
130 /**
131  * The copy-on-read flag is actually a reference count so multiple users may
132  * use the feature without worrying about clobbering its previous state.
133  * Copy-on-read stays enabled until all users have called to disable it.
134  */
135 void bdrv_enable_copy_on_read(BlockDriverState *bs)
136 {
137     bs->copy_on_read++;
138 }
139 
140 void bdrv_disable_copy_on_read(BlockDriverState *bs)
141 {
142     assert(bs->copy_on_read > 0);
143     bs->copy_on_read--;
144 }
145 
146 /* Check if any requests are in-flight (including throttled requests) */
147 bool bdrv_requests_pending(BlockDriverState *bs)
148 {
149     BdrvChild *child;
150 
151     if (!QLIST_EMPTY(&bs->tracked_requests)) {
152         return true;
153     }
154 
155     QLIST_FOREACH(child, &bs->children, next) {
156         if (bdrv_requests_pending(child->bs)) {
157             return true;
158         }
159     }
160 
161     return false;
162 }
163 
164 static void bdrv_drain_recurse(BlockDriverState *bs)
165 {
166     BdrvChild *child;
167 
168     if (bs->drv && bs->drv->bdrv_drain) {
169         bs->drv->bdrv_drain(bs);
170     }
171     QLIST_FOREACH(child, &bs->children, next) {
172         bdrv_drain_recurse(child->bs);
173     }
174 }
175 
176 typedef struct {
177     Coroutine *co;
178     BlockDriverState *bs;
179     QEMUBH *bh;
180     bool done;
181 } BdrvCoDrainData;
182 
183 static void bdrv_drain_poll(BlockDriverState *bs)
184 {
185     bool busy = true;
186 
187     while (busy) {
188         /* Keep iterating */
189         busy = bdrv_requests_pending(bs);
190         busy |= aio_poll(bdrv_get_aio_context(bs), busy);
191     }
192 }
193 
194 static void bdrv_co_drain_bh_cb(void *opaque)
195 {
196     BdrvCoDrainData *data = opaque;
197     Coroutine *co = data->co;
198 
199     qemu_bh_delete(data->bh);
200     bdrv_drain_poll(data->bs);
201     data->done = true;
202     qemu_coroutine_enter(co, NULL);
203 }
204 
205 static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
206 {
207     BdrvCoDrainData data;
208 
209     /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
210      * other coroutines run if they were queued from
211      * qemu_co_queue_run_restart(). */
212 
213     assert(qemu_in_coroutine());
214     data = (BdrvCoDrainData) {
215         .co = qemu_coroutine_self(),
216         .bs = bs,
217         .done = false,
218         .bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_drain_bh_cb, &data),
219     };
220     qemu_bh_schedule(data.bh);
221 
222     qemu_coroutine_yield();
223     /* If we are resumed from some other event (such as an aio completion or a
224      * timer callback), it is a bug in the caller that should be fixed. */
225     assert(data.done);
226 }
227 
228 /*
229  * Wait for pending requests to complete on a single BlockDriverState subtree,
230  * and suspend block driver's internal I/O until next request arrives.
231  *
232  * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
233  * AioContext.
234  *
235  * Only this BlockDriverState's AioContext is run, so in-flight requests must
236  * not depend on events in other AioContexts.  In that case, use
237  * bdrv_drain_all() instead.
238  */
239 void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
240 {
241     bdrv_parent_drained_begin(bs);
242     bdrv_io_unplugged_begin(bs);
243     bdrv_drain_recurse(bs);
244     bdrv_co_yield_to_drain(bs);
245     bdrv_io_unplugged_end(bs);
246     bdrv_parent_drained_end(bs);
247 }
248 
249 void bdrv_drain(BlockDriverState *bs)
250 {
251     bdrv_parent_drained_begin(bs);
252     bdrv_io_unplugged_begin(bs);
253     bdrv_drain_recurse(bs);
254     if (qemu_in_coroutine()) {
255         bdrv_co_yield_to_drain(bs);
256     } else {
257         bdrv_drain_poll(bs);
258     }
259     bdrv_io_unplugged_end(bs);
260     bdrv_parent_drained_end(bs);
261 }
262 
263 /*
264  * Wait for pending requests to complete across all BlockDriverStates
265  *
266  * This function does not flush data to disk, use bdrv_flush_all() for that
267  * after calling this function.
268  */
269 void bdrv_drain_all(void)
270 {
271     /* Always run first iteration so any pending completion BHs run */
272     bool busy = true;
273     BlockDriverState *bs;
274     BdrvNextIterator *it = NULL;
275     GSList *aio_ctxs = NULL, *ctx;
276 
277     while ((it = bdrv_next(it, &bs))) {
278         AioContext *aio_context = bdrv_get_aio_context(bs);
279 
280         aio_context_acquire(aio_context);
281         if (bs->job) {
282             block_job_pause(bs->job);
283         }
284         bdrv_parent_drained_begin(bs);
285         bdrv_io_unplugged_begin(bs);
286         bdrv_drain_recurse(bs);
287         aio_context_release(aio_context);
288 
289         if (!g_slist_find(aio_ctxs, aio_context)) {
290             aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
291         }
292     }
293 
294     /* Note that completion of an asynchronous I/O operation can trigger any
295      * number of other I/O operations on other devices---for example a
296      * coroutine can submit an I/O request to another device in response to
297      * request completion.  Therefore we must keep looping until there was no
298      * more activity rather than simply draining each device independently.
299      */
300     while (busy) {
301         busy = false;
302 
303         for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
304             AioContext *aio_context = ctx->data;
305             it = NULL;
306 
307             aio_context_acquire(aio_context);
308             while ((it = bdrv_next(it, &bs))) {
309                 if (aio_context == bdrv_get_aio_context(bs)) {
310                     if (bdrv_requests_pending(bs)) {
311                         busy = true;
312                         aio_poll(aio_context, busy);
313                     }
314                 }
315             }
316             busy |= aio_poll(aio_context, false);
317             aio_context_release(aio_context);
318         }
319     }
320 
321     it = NULL;
322     while ((it = bdrv_next(it, &bs))) {
323         AioContext *aio_context = bdrv_get_aio_context(bs);
324 
325         aio_context_acquire(aio_context);
326         bdrv_io_unplugged_end(bs);
327         bdrv_parent_drained_end(bs);
328         if (bs->job) {
329             block_job_resume(bs->job);
330         }
331         aio_context_release(aio_context);
332     }
333     g_slist_free(aio_ctxs);
334 }
335 
336 /**
337  * Remove an active request from the tracked requests list
338  *
339  * This function should be called when a tracked request is completing.
340  */
341 static void tracked_request_end(BdrvTrackedRequest *req)
342 {
343     if (req->serialising) {
344         req->bs->serialising_in_flight--;
345     }
346 
347     QLIST_REMOVE(req, list);
348     qemu_co_queue_restart_all(&req->wait_queue);
349 }
350 
351 /**
352  * Add an active request to the tracked requests list
353  */
354 static void tracked_request_begin(BdrvTrackedRequest *req,
355                                   BlockDriverState *bs,
356                                   int64_t offset,
357                                   unsigned int bytes,
358                                   enum BdrvTrackedRequestType type)
359 {
360     *req = (BdrvTrackedRequest){
361         .bs = bs,
362         .offset         = offset,
363         .bytes          = bytes,
364         .type           = type,
365         .co             = qemu_coroutine_self(),
366         .serialising    = false,
367         .overlap_offset = offset,
368         .overlap_bytes  = bytes,
369     };
370 
371     qemu_co_queue_init(&req->wait_queue);
372 
373     QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
374 }
375 
376 static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
377 {
378     int64_t overlap_offset = req->offset & ~(align - 1);
379     unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
380                                - overlap_offset;
381 
382     if (!req->serialising) {
383         req->bs->serialising_in_flight++;
384         req->serialising = true;
385     }
386 
387     req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
388     req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
389 }
390 
391 /**
392  * Round a region to cluster boundaries
393  */
394 void bdrv_round_to_clusters(BlockDriverState *bs,
395                             int64_t sector_num, int nb_sectors,
396                             int64_t *cluster_sector_num,
397                             int *cluster_nb_sectors)
398 {
399     BlockDriverInfo bdi;
400 
401     if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
402         *cluster_sector_num = sector_num;
403         *cluster_nb_sectors = nb_sectors;
404     } else {
405         int64_t c = bdi.cluster_size / BDRV_SECTOR_SIZE;
406         *cluster_sector_num = QEMU_ALIGN_DOWN(sector_num, c);
407         *cluster_nb_sectors = QEMU_ALIGN_UP(sector_num - *cluster_sector_num +
408                                             nb_sectors, c);
409     }
410 }
411 
412 static int bdrv_get_cluster_size(BlockDriverState *bs)
413 {
414     BlockDriverInfo bdi;
415     int ret;
416 
417     ret = bdrv_get_info(bs, &bdi);
418     if (ret < 0 || bdi.cluster_size == 0) {
419         return bs->request_alignment;
420     } else {
421         return bdi.cluster_size;
422     }
423 }
424 
425 static bool tracked_request_overlaps(BdrvTrackedRequest *req,
426                                      int64_t offset, unsigned int bytes)
427 {
428     /*        aaaa   bbbb */
429     if (offset >= req->overlap_offset + req->overlap_bytes) {
430         return false;
431     }
432     /* bbbb   aaaa        */
433     if (req->overlap_offset >= offset + bytes) {
434         return false;
435     }
436     return true;
437 }
438 
439 static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
440 {
441     BlockDriverState *bs = self->bs;
442     BdrvTrackedRequest *req;
443     bool retry;
444     bool waited = false;
445 
446     if (!bs->serialising_in_flight) {
447         return false;
448     }
449 
450     do {
451         retry = false;
452         QLIST_FOREACH(req, &bs->tracked_requests, list) {
453             if (req == self || (!req->serialising && !self->serialising)) {
454                 continue;
455             }
456             if (tracked_request_overlaps(req, self->overlap_offset,
457                                          self->overlap_bytes))
458             {
459                 /* Hitting this means there was a reentrant request, for
460                  * example, a block driver issuing nested requests.  This must
461                  * never happen since it means deadlock.
462                  */
463                 assert(qemu_coroutine_self() != req->co);
464 
465                 /* If the request is already (indirectly) waiting for us, or
466                  * will wait for us as soon as it wakes up, then just go on
467                  * (instead of producing a deadlock in the former case). */
468                 if (!req->waiting_for) {
469                     self->waiting_for = req;
470                     qemu_co_queue_wait(&req->wait_queue);
471                     self->waiting_for = NULL;
472                     retry = true;
473                     waited = true;
474                     break;
475                 }
476             }
477         }
478     } while (retry);
479 
480     return waited;
481 }
482 
483 static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
484                                    size_t size)
485 {
486     if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
487         return -EIO;
488     }
489 
490     if (!bdrv_is_inserted(bs)) {
491         return -ENOMEDIUM;
492     }
493 
494     if (offset < 0) {
495         return -EIO;
496     }
497 
498     return 0;
499 }
500 
501 static int bdrv_check_request(BlockDriverState *bs, int64_t sector_num,
502                               int nb_sectors)
503 {
504     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
505         return -EIO;
506     }
507 
508     return bdrv_check_byte_request(bs, sector_num * BDRV_SECTOR_SIZE,
509                                    nb_sectors * BDRV_SECTOR_SIZE);
510 }
511 
512 typedef struct RwCo {
513     BlockDriverState *bs;
514     int64_t offset;
515     QEMUIOVector *qiov;
516     bool is_write;
517     int ret;
518     BdrvRequestFlags flags;
519 } RwCo;
520 
521 static void coroutine_fn bdrv_rw_co_entry(void *opaque)
522 {
523     RwCo *rwco = opaque;
524 
525     if (!rwco->is_write) {
526         rwco->ret = bdrv_co_preadv(rwco->bs, rwco->offset,
527                                    rwco->qiov->size, rwco->qiov,
528                                    rwco->flags);
529     } else {
530         rwco->ret = bdrv_co_pwritev(rwco->bs, rwco->offset,
531                                     rwco->qiov->size, rwco->qiov,
532                                     rwco->flags);
533     }
534 }
535 
536 /*
537  * Process a vectored synchronous request using coroutines
538  */
539 static int bdrv_prwv_co(BlockDriverState *bs, int64_t offset,
540                         QEMUIOVector *qiov, bool is_write,
541                         BdrvRequestFlags flags)
542 {
543     Coroutine *co;
544     RwCo rwco = {
545         .bs = bs,
546         .offset = offset,
547         .qiov = qiov,
548         .is_write = is_write,
549         .ret = NOT_DONE,
550         .flags = flags,
551     };
552 
553     if (qemu_in_coroutine()) {
554         /* Fast-path if already in coroutine context */
555         bdrv_rw_co_entry(&rwco);
556     } else {
557         AioContext *aio_context = bdrv_get_aio_context(bs);
558 
559         co = qemu_coroutine_create(bdrv_rw_co_entry);
560         qemu_coroutine_enter(co, &rwco);
561         while (rwco.ret == NOT_DONE) {
562             aio_poll(aio_context, true);
563         }
564     }
565     return rwco.ret;
566 }
567 
568 /*
569  * Process a synchronous request using coroutines
570  */
571 static int bdrv_rw_co(BlockDriverState *bs, int64_t sector_num, uint8_t *buf,
572                       int nb_sectors, bool is_write, BdrvRequestFlags flags)
573 {
574     QEMUIOVector qiov;
575     struct iovec iov = {
576         .iov_base = (void *)buf,
577         .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
578     };
579 
580     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
581         return -EINVAL;
582     }
583 
584     qemu_iovec_init_external(&qiov, &iov, 1);
585     return bdrv_prwv_co(bs, sector_num << BDRV_SECTOR_BITS,
586                         &qiov, is_write, flags);
587 }
588 
589 /* return < 0 if error. See bdrv_write() for the return codes */
590 int bdrv_read(BlockDriverState *bs, int64_t sector_num,
591               uint8_t *buf, int nb_sectors)
592 {
593     return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false, 0);
594 }
595 
596 /* Return < 0 if error. Important errors are:
597   -EIO         generic I/O error (may happen for all errors)
598   -ENOMEDIUM   No media inserted.
599   -EINVAL      Invalid sector number or nb_sectors
600   -EACCES      Trying to write a read-only device
601 */
602 int bdrv_write(BlockDriverState *bs, int64_t sector_num,
603                const uint8_t *buf, int nb_sectors)
604 {
605     return bdrv_rw_co(bs, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
606 }
607 
608 int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
609                       int nb_sectors, BdrvRequestFlags flags)
610 {
611     return bdrv_rw_co(bs, sector_num, NULL, nb_sectors, true,
612                       BDRV_REQ_ZERO_WRITE | flags);
613 }
614 
615 /*
616  * Completely zero out a block device with the help of bdrv_write_zeroes.
617  * The operation is sped up by checking the block status and only writing
618  * zeroes to the device if they currently do not return zeroes. Optional
619  * flags are passed through to bdrv_write_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
620  * BDRV_REQ_FUA).
621  *
622  * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
623  */
624 int bdrv_make_zero(BlockDriverState *bs, BdrvRequestFlags flags)
625 {
626     int64_t target_sectors, ret, nb_sectors, sector_num = 0;
627     BlockDriverState *file;
628     int n;
629 
630     target_sectors = bdrv_nb_sectors(bs);
631     if (target_sectors < 0) {
632         return target_sectors;
633     }
634 
635     for (;;) {
636         nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
637         if (nb_sectors <= 0) {
638             return 0;
639         }
640         ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
641         if (ret < 0) {
642             error_report("error getting block status at sector %" PRId64 ": %s",
643                          sector_num, strerror(-ret));
644             return ret;
645         }
646         if (ret & BDRV_BLOCK_ZERO) {
647             sector_num += n;
648             continue;
649         }
650         ret = bdrv_write_zeroes(bs, sector_num, n, flags);
651         if (ret < 0) {
652             error_report("error writing zeroes at sector %" PRId64 ": %s",
653                          sector_num, strerror(-ret));
654             return ret;
655         }
656         sector_num += n;
657     }
658 }
659 
660 int bdrv_pread(BlockDriverState *bs, int64_t offset, void *buf, int bytes)
661 {
662     QEMUIOVector qiov;
663     struct iovec iov = {
664         .iov_base = (void *)buf,
665         .iov_len = bytes,
666     };
667     int ret;
668 
669     if (bytes < 0) {
670         return -EINVAL;
671     }
672 
673     qemu_iovec_init_external(&qiov, &iov, 1);
674     ret = bdrv_prwv_co(bs, offset, &qiov, false, 0);
675     if (ret < 0) {
676         return ret;
677     }
678 
679     return bytes;
680 }
681 
682 int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov)
683 {
684     int ret;
685 
686     ret = bdrv_prwv_co(bs, offset, qiov, true, 0);
687     if (ret < 0) {
688         return ret;
689     }
690 
691     return qiov->size;
692 }
693 
694 int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
695                 const void *buf, int bytes)
696 {
697     QEMUIOVector qiov;
698     struct iovec iov = {
699         .iov_base   = (void *) buf,
700         .iov_len    = bytes,
701     };
702 
703     if (bytes < 0) {
704         return -EINVAL;
705     }
706 
707     qemu_iovec_init_external(&qiov, &iov, 1);
708     return bdrv_pwritev(bs, offset, &qiov);
709 }
710 
711 /*
712  * Writes to the file and ensures that no writes are reordered across this
713  * request (acts as a barrier)
714  *
715  * Returns 0 on success, -errno in error cases.
716  */
717 int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
718     const void *buf, int count)
719 {
720     int ret;
721 
722     ret = bdrv_pwrite(bs, offset, buf, count);
723     if (ret < 0) {
724         return ret;
725     }
726 
727     ret = bdrv_flush(bs);
728     if (ret < 0) {
729         return ret;
730     }
731 
732     return 0;
733 }
734 
735 typedef struct CoroutineIOCompletion {
736     Coroutine *coroutine;
737     int ret;
738 } CoroutineIOCompletion;
739 
740 static void bdrv_co_io_em_complete(void *opaque, int ret)
741 {
742     CoroutineIOCompletion *co = opaque;
743 
744     co->ret = ret;
745     qemu_coroutine_enter(co->coroutine, NULL);
746 }
747 
748 static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
749                                            uint64_t offset, uint64_t bytes,
750                                            QEMUIOVector *qiov, int flags)
751 {
752     BlockDriver *drv = bs->drv;
753     int64_t sector_num;
754     unsigned int nb_sectors;
755 
756     if (drv->bdrv_co_preadv) {
757         return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
758     }
759 
760     sector_num = offset >> BDRV_SECTOR_BITS;
761     nb_sectors = bytes >> BDRV_SECTOR_BITS;
762 
763     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
764     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
765     assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
766 
767     if (drv->bdrv_co_readv) {
768         return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
769     } else {
770         BlockAIOCB *acb;
771         CoroutineIOCompletion co = {
772             .coroutine = qemu_coroutine_self(),
773         };
774 
775         acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
776                                       bdrv_co_io_em_complete, &co);
777         if (acb == NULL) {
778             return -EIO;
779         } else {
780             qemu_coroutine_yield();
781             return co.ret;
782         }
783     }
784 }
785 
786 static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
787                                             uint64_t offset, uint64_t bytes,
788                                             QEMUIOVector *qiov, int flags)
789 {
790     BlockDriver *drv = bs->drv;
791     int64_t sector_num;
792     unsigned int nb_sectors;
793     int ret;
794 
795     if (drv->bdrv_co_pwritev) {
796         ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
797         goto emulate_flags;
798     }
799 
800     sector_num = offset >> BDRV_SECTOR_BITS;
801     nb_sectors = bytes >> BDRV_SECTOR_BITS;
802 
803     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
804     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
805     assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
806 
807     if (drv->bdrv_co_writev_flags) {
808         ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
809                                         flags & bs->supported_write_flags);
810         flags &= ~bs->supported_write_flags;
811     } else if (drv->bdrv_co_writev) {
812         assert(!bs->supported_write_flags);
813         ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
814     } else {
815         BlockAIOCB *acb;
816         CoroutineIOCompletion co = {
817             .coroutine = qemu_coroutine_self(),
818         };
819 
820         acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
821                                        bdrv_co_io_em_complete, &co);
822         if (acb == NULL) {
823             ret = -EIO;
824         } else {
825             qemu_coroutine_yield();
826             ret = co.ret;
827         }
828     }
829 
830 emulate_flags:
831     if (ret == 0 && (flags & BDRV_REQ_FUA)) {
832         ret = bdrv_co_flush(bs);
833     }
834 
835     return ret;
836 }
837 
838 static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
839         int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
840 {
841     /* Perform I/O through a temporary buffer so that users who scribble over
842      * their read buffer while the operation is in progress do not end up
843      * modifying the image file.  This is critical for zero-copy guest I/O
844      * where anything might happen inside guest memory.
845      */
846     void *bounce_buffer;
847 
848     BlockDriver *drv = bs->drv;
849     struct iovec iov;
850     QEMUIOVector bounce_qiov;
851     int64_t cluster_sector_num;
852     int cluster_nb_sectors;
853     size_t skip_bytes;
854     int ret;
855 
856     /* Cover entire cluster so no additional backing file I/O is required when
857      * allocating cluster in the image file.
858      */
859     bdrv_round_to_clusters(bs, sector_num, nb_sectors,
860                            &cluster_sector_num, &cluster_nb_sectors);
861 
862     trace_bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors,
863                                    cluster_sector_num, cluster_nb_sectors);
864 
865     iov.iov_len = cluster_nb_sectors * BDRV_SECTOR_SIZE;
866     iov.iov_base = bounce_buffer = qemu_try_blockalign(bs, iov.iov_len);
867     if (bounce_buffer == NULL) {
868         ret = -ENOMEM;
869         goto err;
870     }
871 
872     qemu_iovec_init_external(&bounce_qiov, &iov, 1);
873 
874     ret = bdrv_driver_preadv(bs, cluster_sector_num * BDRV_SECTOR_SIZE,
875                              cluster_nb_sectors * BDRV_SECTOR_SIZE,
876                              &bounce_qiov, 0);
877     if (ret < 0) {
878         goto err;
879     }
880 
881     if (drv->bdrv_co_write_zeroes &&
882         buffer_is_zero(bounce_buffer, iov.iov_len)) {
883         ret = bdrv_co_do_write_zeroes(bs, cluster_sector_num,
884                                       cluster_nb_sectors, 0);
885     } else {
886         /* This does not change the data on the disk, it is not necessary
887          * to flush even in cache=writethrough mode.
888          */
889         ret = bdrv_driver_pwritev(bs, cluster_sector_num * BDRV_SECTOR_SIZE,
890                                   cluster_nb_sectors * BDRV_SECTOR_SIZE,
891                                   &bounce_qiov, 0);
892     }
893 
894     if (ret < 0) {
895         /* It might be okay to ignore write errors for guest requests.  If this
896          * is a deliberate copy-on-read then we don't want to ignore the error.
897          * Simply report it in all cases.
898          */
899         goto err;
900     }
901 
902     skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
903     qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
904                         nb_sectors * BDRV_SECTOR_SIZE);
905 
906 err:
907     qemu_vfree(bounce_buffer);
908     return ret;
909 }
910 
911 /*
912  * Forwards an already correctly aligned request to the BlockDriver. This
913  * handles copy on read and zeroing after EOF; any other features must be
914  * implemented by the caller.
915  */
916 static int coroutine_fn bdrv_aligned_preadv(BlockDriverState *bs,
917     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
918     int64_t align, QEMUIOVector *qiov, int flags)
919 {
920     int ret;
921 
922     int64_t sector_num = offset >> BDRV_SECTOR_BITS;
923     unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
924 
925     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
926     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
927     assert(!qiov || bytes == qiov->size);
928     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
929 
930     /* Handle Copy on Read and associated serialisation */
931     if (flags & BDRV_REQ_COPY_ON_READ) {
932         /* If we touch the same cluster it counts as an overlap.  This
933          * guarantees that allocating writes will be serialized and not race
934          * with each other for the same cluster.  For example, in copy-on-read
935          * it ensures that the CoR read and write operations are atomic and
936          * guest writes cannot interleave between them. */
937         mark_request_serialising(req, bdrv_get_cluster_size(bs));
938     }
939 
940     if (!(flags & BDRV_REQ_NO_SERIALISING)) {
941         wait_serialising_requests(req);
942     }
943 
944     if (flags & BDRV_REQ_COPY_ON_READ) {
945         int pnum;
946 
947         ret = bdrv_is_allocated(bs, sector_num, nb_sectors, &pnum);
948         if (ret < 0) {
949             goto out;
950         }
951 
952         if (!ret || pnum != nb_sectors) {
953             ret = bdrv_co_do_copy_on_readv(bs, sector_num, nb_sectors, qiov);
954             goto out;
955         }
956     }
957 
958     /* Forward the request to the BlockDriver */
959     if (!bs->zero_beyond_eof) {
960         ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
961     } else {
962         /* Read zeros after EOF */
963         int64_t total_sectors, max_nb_sectors;
964 
965         total_sectors = bdrv_nb_sectors(bs);
966         if (total_sectors < 0) {
967             ret = total_sectors;
968             goto out;
969         }
970 
971         max_nb_sectors = ROUND_UP(MAX(0, total_sectors - sector_num),
972                                   align >> BDRV_SECTOR_BITS);
973         if (nb_sectors < max_nb_sectors) {
974             ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
975         } else if (max_nb_sectors > 0) {
976             QEMUIOVector local_qiov;
977 
978             qemu_iovec_init(&local_qiov, qiov->niov);
979             qemu_iovec_concat(&local_qiov, qiov, 0,
980                               max_nb_sectors * BDRV_SECTOR_SIZE);
981 
982             ret = bdrv_driver_preadv(bs, offset,
983                                      max_nb_sectors * BDRV_SECTOR_SIZE,
984                                      &local_qiov, 0);
985 
986             qemu_iovec_destroy(&local_qiov);
987         } else {
988             ret = 0;
989         }
990 
991         /* Reading beyond end of file is supposed to produce zeroes */
992         if (ret == 0 && total_sectors < sector_num + nb_sectors) {
993             uint64_t offset = MAX(0, total_sectors - sector_num);
994             uint64_t bytes = (sector_num + nb_sectors - offset) *
995                               BDRV_SECTOR_SIZE;
996             qemu_iovec_memset(qiov, offset * BDRV_SECTOR_SIZE, 0, bytes);
997         }
998     }
999 
1000 out:
1001     return ret;
1002 }
1003 
1004 /*
1005  * Handle a read request in coroutine context
1006  */
1007 int coroutine_fn bdrv_co_preadv(BlockDriverState *bs,
1008     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1009     BdrvRequestFlags flags)
1010 {
1011     BlockDriver *drv = bs->drv;
1012     BdrvTrackedRequest req;
1013 
1014     /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1015     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1016     uint8_t *head_buf = NULL;
1017     uint8_t *tail_buf = NULL;
1018     QEMUIOVector local_qiov;
1019     bool use_local_qiov = false;
1020     int ret;
1021 
1022     if (!drv) {
1023         return -ENOMEDIUM;
1024     }
1025 
1026     ret = bdrv_check_byte_request(bs, offset, bytes);
1027     if (ret < 0) {
1028         return ret;
1029     }
1030 
1031     /* Don't do copy-on-read if we read data before write operation */
1032     if (bs->copy_on_read && !(flags & BDRV_REQ_NO_SERIALISING)) {
1033         flags |= BDRV_REQ_COPY_ON_READ;
1034     }
1035 
1036     /* Align read if necessary by padding qiov */
1037     if (offset & (align - 1)) {
1038         head_buf = qemu_blockalign(bs, align);
1039         qemu_iovec_init(&local_qiov, qiov->niov + 2);
1040         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1041         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1042         use_local_qiov = true;
1043 
1044         bytes += offset & (align - 1);
1045         offset = offset & ~(align - 1);
1046     }
1047 
1048     if ((offset + bytes) & (align - 1)) {
1049         if (!use_local_qiov) {
1050             qemu_iovec_init(&local_qiov, qiov->niov + 1);
1051             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1052             use_local_qiov = true;
1053         }
1054         tail_buf = qemu_blockalign(bs, align);
1055         qemu_iovec_add(&local_qiov, tail_buf,
1056                        align - ((offset + bytes) & (align - 1)));
1057 
1058         bytes = ROUND_UP(bytes, align);
1059     }
1060 
1061     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1062     ret = bdrv_aligned_preadv(bs, &req, offset, bytes, align,
1063                               use_local_qiov ? &local_qiov : qiov,
1064                               flags);
1065     tracked_request_end(&req);
1066 
1067     if (use_local_qiov) {
1068         qemu_iovec_destroy(&local_qiov);
1069         qemu_vfree(head_buf);
1070         qemu_vfree(tail_buf);
1071     }
1072 
1073     return ret;
1074 }
1075 
1076 static int coroutine_fn bdrv_co_do_readv(BlockDriverState *bs,
1077     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1078     BdrvRequestFlags flags)
1079 {
1080     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1081         return -EINVAL;
1082     }
1083 
1084     return bdrv_co_preadv(bs, sector_num << BDRV_SECTOR_BITS,
1085                           nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1086 }
1087 
1088 int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
1089     int nb_sectors, QEMUIOVector *qiov)
1090 {
1091     trace_bdrv_co_readv(bs, sector_num, nb_sectors);
1092 
1093     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov, 0);
1094 }
1095 
1096 int coroutine_fn bdrv_co_readv_no_serialising(BlockDriverState *bs,
1097     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1098 {
1099     trace_bdrv_co_readv_no_serialising(bs, sector_num, nb_sectors);
1100 
1101     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1102                             BDRV_REQ_NO_SERIALISING);
1103 }
1104 
1105 int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
1106     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov)
1107 {
1108     trace_bdrv_co_copy_on_readv(bs, sector_num, nb_sectors);
1109 
1110     return bdrv_co_do_readv(bs, sector_num, nb_sectors, qiov,
1111                             BDRV_REQ_COPY_ON_READ);
1112 }
1113 
1114 #define MAX_WRITE_ZEROES_BOUNCE_BUFFER 32768
1115 
1116 static int coroutine_fn bdrv_co_do_write_zeroes(BlockDriverState *bs,
1117     int64_t sector_num, int nb_sectors, BdrvRequestFlags flags)
1118 {
1119     BlockDriver *drv = bs->drv;
1120     QEMUIOVector qiov;
1121     struct iovec iov = {0};
1122     int ret = 0;
1123     bool need_flush = false;
1124 
1125     int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_write_zeroes,
1126                                         BDRV_REQUEST_MAX_SECTORS);
1127 
1128     while (nb_sectors > 0 && !ret) {
1129         int num = nb_sectors;
1130 
1131         /* Align request.  Block drivers can expect the "bulk" of the request
1132          * to be aligned.
1133          */
1134         if (bs->bl.write_zeroes_alignment
1135             && num > bs->bl.write_zeroes_alignment) {
1136             if (sector_num % bs->bl.write_zeroes_alignment != 0) {
1137                 /* Make a small request up to the first aligned sector.  */
1138                 num = bs->bl.write_zeroes_alignment;
1139                 num -= sector_num % bs->bl.write_zeroes_alignment;
1140             } else if ((sector_num + num) % bs->bl.write_zeroes_alignment != 0) {
1141                 /* Shorten the request to the last aligned sector.  num cannot
1142                  * underflow because num > bs->bl.write_zeroes_alignment.
1143                  */
1144                 num -= (sector_num + num) % bs->bl.write_zeroes_alignment;
1145             }
1146         }
1147 
1148         /* limit request size */
1149         if (num > max_write_zeroes) {
1150             num = max_write_zeroes;
1151         }
1152 
1153         ret = -ENOTSUP;
1154         /* First try the efficient write zeroes operation */
1155         if (drv->bdrv_co_write_zeroes) {
1156             ret = drv->bdrv_co_write_zeroes(bs, sector_num, num,
1157                                             flags & bs->supported_zero_flags);
1158             if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1159                 !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1160                 need_flush = true;
1161             }
1162         } else {
1163             assert(!bs->supported_zero_flags);
1164         }
1165 
1166         if (ret == -ENOTSUP) {
1167             /* Fall back to bounce buffer if write zeroes is unsupported */
1168             int max_xfer_len = MIN_NON_ZERO(bs->bl.max_transfer_length,
1169                                             MAX_WRITE_ZEROES_BOUNCE_BUFFER);
1170             BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1171 
1172             if ((flags & BDRV_REQ_FUA) &&
1173                 !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1174                 /* No need for bdrv_driver_pwrite() to do a fallback
1175                  * flush on each chunk; use just one at the end */
1176                 write_flags &= ~BDRV_REQ_FUA;
1177                 need_flush = true;
1178             }
1179             num = MIN(num, max_xfer_len);
1180             iov.iov_len = num * BDRV_SECTOR_SIZE;
1181             if (iov.iov_base == NULL) {
1182                 iov.iov_base = qemu_try_blockalign(bs, num * BDRV_SECTOR_SIZE);
1183                 if (iov.iov_base == NULL) {
1184                     ret = -ENOMEM;
1185                     goto fail;
1186                 }
1187                 memset(iov.iov_base, 0, num * BDRV_SECTOR_SIZE);
1188             }
1189             qemu_iovec_init_external(&qiov, &iov, 1);
1190 
1191             ret = bdrv_driver_pwritev(bs, sector_num * BDRV_SECTOR_SIZE,
1192                                       num * BDRV_SECTOR_SIZE, &qiov,
1193                                       write_flags);
1194 
1195             /* Keep bounce buffer around if it is big enough for all
1196              * all future requests.
1197              */
1198             if (num < max_xfer_len) {
1199                 qemu_vfree(iov.iov_base);
1200                 iov.iov_base = NULL;
1201             }
1202         }
1203 
1204         sector_num += num;
1205         nb_sectors -= num;
1206     }
1207 
1208 fail:
1209     if (ret == 0 && need_flush) {
1210         ret = bdrv_co_flush(bs);
1211     }
1212     qemu_vfree(iov.iov_base);
1213     return ret;
1214 }
1215 
1216 /*
1217  * Forwards an already correctly aligned write request to the BlockDriver.
1218  */
1219 static int coroutine_fn bdrv_aligned_pwritev(BlockDriverState *bs,
1220     BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1221     QEMUIOVector *qiov, int flags)
1222 {
1223     BlockDriver *drv = bs->drv;
1224     bool waited;
1225     int ret;
1226 
1227     int64_t sector_num = offset >> BDRV_SECTOR_BITS;
1228     unsigned int nb_sectors = bytes >> BDRV_SECTOR_BITS;
1229 
1230     assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
1231     assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
1232     assert(!qiov || bytes == qiov->size);
1233     assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1234 
1235     waited = wait_serialising_requests(req);
1236     assert(!waited || !req->serialising);
1237     assert(req->overlap_offset <= offset);
1238     assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1239 
1240     ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1241 
1242     if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1243         !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_write_zeroes &&
1244         qemu_iovec_is_zero(qiov)) {
1245         flags |= BDRV_REQ_ZERO_WRITE;
1246         if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1247             flags |= BDRV_REQ_MAY_UNMAP;
1248         }
1249     }
1250 
1251     if (ret < 0) {
1252         /* Do nothing, write notifier decided to fail this request */
1253     } else if (flags & BDRV_REQ_ZERO_WRITE) {
1254         bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1255         ret = bdrv_co_do_write_zeroes(bs, sector_num, nb_sectors, flags);
1256     } else {
1257         bdrv_debug_event(bs, BLKDBG_PWRITEV);
1258         ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
1259     }
1260     bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1261 
1262     bdrv_set_dirty(bs, sector_num, nb_sectors);
1263 
1264     if (bs->wr_highest_offset < offset + bytes) {
1265         bs->wr_highest_offset = offset + bytes;
1266     }
1267 
1268     if (ret >= 0) {
1269         bs->total_sectors = MAX(bs->total_sectors, sector_num + nb_sectors);
1270     }
1271 
1272     return ret;
1273 }
1274 
1275 static int coroutine_fn bdrv_co_do_zero_pwritev(BlockDriverState *bs,
1276                                                 int64_t offset,
1277                                                 unsigned int bytes,
1278                                                 BdrvRequestFlags flags,
1279                                                 BdrvTrackedRequest *req)
1280 {
1281     uint8_t *buf = NULL;
1282     QEMUIOVector local_qiov;
1283     struct iovec iov;
1284     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1285     unsigned int head_padding_bytes, tail_padding_bytes;
1286     int ret = 0;
1287 
1288     head_padding_bytes = offset & (align - 1);
1289     tail_padding_bytes = align - ((offset + bytes) & (align - 1));
1290 
1291 
1292     assert(flags & BDRV_REQ_ZERO_WRITE);
1293     if (head_padding_bytes || tail_padding_bytes) {
1294         buf = qemu_blockalign(bs, align);
1295         iov = (struct iovec) {
1296             .iov_base   = buf,
1297             .iov_len    = align,
1298         };
1299         qemu_iovec_init_external(&local_qiov, &iov, 1);
1300     }
1301     if (head_padding_bytes) {
1302         uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1303 
1304         /* RMW the unaligned part before head. */
1305         mark_request_serialising(req, align);
1306         wait_serialising_requests(req);
1307         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1308         ret = bdrv_aligned_preadv(bs, req, offset & ~(align - 1), align,
1309                                   align, &local_qiov, 0);
1310         if (ret < 0) {
1311             goto fail;
1312         }
1313         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1314 
1315         memset(buf + head_padding_bytes, 0, zero_bytes);
1316         ret = bdrv_aligned_pwritev(bs, req, offset & ~(align - 1), align,
1317                                    &local_qiov,
1318                                    flags & ~BDRV_REQ_ZERO_WRITE);
1319         if (ret < 0) {
1320             goto fail;
1321         }
1322         offset += zero_bytes;
1323         bytes -= zero_bytes;
1324     }
1325 
1326     assert(!bytes || (offset & (align - 1)) == 0);
1327     if (bytes >= align) {
1328         /* Write the aligned part in the middle. */
1329         uint64_t aligned_bytes = bytes & ~(align - 1);
1330         ret = bdrv_aligned_pwritev(bs, req, offset, aligned_bytes,
1331                                    NULL, flags);
1332         if (ret < 0) {
1333             goto fail;
1334         }
1335         bytes -= aligned_bytes;
1336         offset += aligned_bytes;
1337     }
1338 
1339     assert(!bytes || (offset & (align - 1)) == 0);
1340     if (bytes) {
1341         assert(align == tail_padding_bytes + bytes);
1342         /* RMW the unaligned part after tail. */
1343         mark_request_serialising(req, align);
1344         wait_serialising_requests(req);
1345         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1346         ret = bdrv_aligned_preadv(bs, req, offset, align,
1347                                   align, &local_qiov, 0);
1348         if (ret < 0) {
1349             goto fail;
1350         }
1351         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1352 
1353         memset(buf, 0, bytes);
1354         ret = bdrv_aligned_pwritev(bs, req, offset, align,
1355                                    &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1356     }
1357 fail:
1358     qemu_vfree(buf);
1359     return ret;
1360 
1361 }
1362 
1363 /*
1364  * Handle a write request in coroutine context
1365  */
1366 int coroutine_fn bdrv_co_pwritev(BlockDriverState *bs,
1367     int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1368     BdrvRequestFlags flags)
1369 {
1370     BdrvTrackedRequest req;
1371     /* TODO Lift BDRV_SECTOR_SIZE restriction in BlockDriver interface */
1372     uint64_t align = MAX(BDRV_SECTOR_SIZE, bs->request_alignment);
1373     uint8_t *head_buf = NULL;
1374     uint8_t *tail_buf = NULL;
1375     QEMUIOVector local_qiov;
1376     bool use_local_qiov = false;
1377     int ret;
1378 
1379     if (!bs->drv) {
1380         return -ENOMEDIUM;
1381     }
1382     if (bs->read_only) {
1383         return -EPERM;
1384     }
1385     assert(!(bs->open_flags & BDRV_O_INACTIVE));
1386 
1387     ret = bdrv_check_byte_request(bs, offset, bytes);
1388     if (ret < 0) {
1389         return ret;
1390     }
1391 
1392     /*
1393      * Align write if necessary by performing a read-modify-write cycle.
1394      * Pad qiov with the read parts and be sure to have a tracked request not
1395      * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1396      */
1397     tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1398 
1399     if (!qiov) {
1400         ret = bdrv_co_do_zero_pwritev(bs, offset, bytes, flags, &req);
1401         goto out;
1402     }
1403 
1404     if (offset & (align - 1)) {
1405         QEMUIOVector head_qiov;
1406         struct iovec head_iov;
1407 
1408         mark_request_serialising(&req, align);
1409         wait_serialising_requests(&req);
1410 
1411         head_buf = qemu_blockalign(bs, align);
1412         head_iov = (struct iovec) {
1413             .iov_base   = head_buf,
1414             .iov_len    = align,
1415         };
1416         qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1417 
1418         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1419         ret = bdrv_aligned_preadv(bs, &req, offset & ~(align - 1), align,
1420                                   align, &head_qiov, 0);
1421         if (ret < 0) {
1422             goto fail;
1423         }
1424         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1425 
1426         qemu_iovec_init(&local_qiov, qiov->niov + 2);
1427         qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1428         qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1429         use_local_qiov = true;
1430 
1431         bytes += offset & (align - 1);
1432         offset = offset & ~(align - 1);
1433     }
1434 
1435     if ((offset + bytes) & (align - 1)) {
1436         QEMUIOVector tail_qiov;
1437         struct iovec tail_iov;
1438         size_t tail_bytes;
1439         bool waited;
1440 
1441         mark_request_serialising(&req, align);
1442         waited = wait_serialising_requests(&req);
1443         assert(!waited || !use_local_qiov);
1444 
1445         tail_buf = qemu_blockalign(bs, align);
1446         tail_iov = (struct iovec) {
1447             .iov_base   = tail_buf,
1448             .iov_len    = align,
1449         };
1450         qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1451 
1452         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1453         ret = bdrv_aligned_preadv(bs, &req, (offset + bytes) & ~(align - 1), align,
1454                                   align, &tail_qiov, 0);
1455         if (ret < 0) {
1456             goto fail;
1457         }
1458         bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1459 
1460         if (!use_local_qiov) {
1461             qemu_iovec_init(&local_qiov, qiov->niov + 1);
1462             qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1463             use_local_qiov = true;
1464         }
1465 
1466         tail_bytes = (offset + bytes) & (align - 1);
1467         qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1468 
1469         bytes = ROUND_UP(bytes, align);
1470     }
1471 
1472     ret = bdrv_aligned_pwritev(bs, &req, offset, bytes,
1473                                use_local_qiov ? &local_qiov : qiov,
1474                                flags);
1475 
1476 fail:
1477 
1478     if (use_local_qiov) {
1479         qemu_iovec_destroy(&local_qiov);
1480     }
1481     qemu_vfree(head_buf);
1482     qemu_vfree(tail_buf);
1483 out:
1484     tracked_request_end(&req);
1485     return ret;
1486 }
1487 
1488 static int coroutine_fn bdrv_co_do_writev(BlockDriverState *bs,
1489     int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1490     BdrvRequestFlags flags)
1491 {
1492     if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1493         return -EINVAL;
1494     }
1495 
1496     return bdrv_co_pwritev(bs, sector_num << BDRV_SECTOR_BITS,
1497                            nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1498 }
1499 
1500 int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
1501     int nb_sectors, QEMUIOVector *qiov)
1502 {
1503     trace_bdrv_co_writev(bs, sector_num, nb_sectors);
1504 
1505     return bdrv_co_do_writev(bs, sector_num, nb_sectors, qiov, 0);
1506 }
1507 
1508 int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs,
1509                                       int64_t sector_num, int nb_sectors,
1510                                       BdrvRequestFlags flags)
1511 {
1512     trace_bdrv_co_write_zeroes(bs, sector_num, nb_sectors, flags);
1513 
1514     if (!(bs->open_flags & BDRV_O_UNMAP)) {
1515         flags &= ~BDRV_REQ_MAY_UNMAP;
1516     }
1517 
1518     return bdrv_co_do_writev(bs, sector_num, nb_sectors, NULL,
1519                              BDRV_REQ_ZERO_WRITE | flags);
1520 }
1521 
1522 typedef struct BdrvCoGetBlockStatusData {
1523     BlockDriverState *bs;
1524     BlockDriverState *base;
1525     BlockDriverState **file;
1526     int64_t sector_num;
1527     int nb_sectors;
1528     int *pnum;
1529     int64_t ret;
1530     bool done;
1531 } BdrvCoGetBlockStatusData;
1532 
1533 /*
1534  * Returns the allocation status of the specified sectors.
1535  * Drivers not implementing the functionality are assumed to not support
1536  * backing files, hence all their sectors are reported as allocated.
1537  *
1538  * If 'sector_num' is beyond the end of the disk image the return value is 0
1539  * and 'pnum' is set to 0.
1540  *
1541  * 'pnum' is set to the number of sectors (including and immediately following
1542  * the specified sector) that are known to be in the same
1543  * allocated/unallocated state.
1544  *
1545  * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
1546  * beyond the end of the disk image it will be clamped.
1547  *
1548  * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1549  * points to the BDS which the sector range is allocated in.
1550  */
1551 static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1552                                                      int64_t sector_num,
1553                                                      int nb_sectors, int *pnum,
1554                                                      BlockDriverState **file)
1555 {
1556     int64_t total_sectors;
1557     int64_t n;
1558     int64_t ret, ret2;
1559 
1560     total_sectors = bdrv_nb_sectors(bs);
1561     if (total_sectors < 0) {
1562         return total_sectors;
1563     }
1564 
1565     if (sector_num >= total_sectors) {
1566         *pnum = 0;
1567         return 0;
1568     }
1569 
1570     n = total_sectors - sector_num;
1571     if (n < nb_sectors) {
1572         nb_sectors = n;
1573     }
1574 
1575     if (!bs->drv->bdrv_co_get_block_status) {
1576         *pnum = nb_sectors;
1577         ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1578         if (bs->drv->protocol_name) {
1579             ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1580         }
1581         return ret;
1582     }
1583 
1584     *file = NULL;
1585     ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1586                                             file);
1587     if (ret < 0) {
1588         *pnum = 0;
1589         return ret;
1590     }
1591 
1592     if (ret & BDRV_BLOCK_RAW) {
1593         assert(ret & BDRV_BLOCK_OFFSET_VALID);
1594         return bdrv_get_block_status(bs->file->bs, ret >> BDRV_SECTOR_BITS,
1595                                      *pnum, pnum, file);
1596     }
1597 
1598     if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1599         ret |= BDRV_BLOCK_ALLOCATED;
1600     } else {
1601         if (bdrv_unallocated_blocks_are_zero(bs)) {
1602             ret |= BDRV_BLOCK_ZERO;
1603         } else if (bs->backing) {
1604             BlockDriverState *bs2 = bs->backing->bs;
1605             int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1606             if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1607                 ret |= BDRV_BLOCK_ZERO;
1608             }
1609         }
1610     }
1611 
1612     if (*file && *file != bs &&
1613         (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1614         (ret & BDRV_BLOCK_OFFSET_VALID)) {
1615         BlockDriverState *file2;
1616         int file_pnum;
1617 
1618         ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
1619                                         *pnum, &file_pnum, &file2);
1620         if (ret2 >= 0) {
1621             /* Ignore errors.  This is just providing extra information, it
1622              * is useful but not necessary.
1623              */
1624             if (!file_pnum) {
1625                 /* !file_pnum indicates an offset at or beyond the EOF; it is
1626                  * perfectly valid for the format block driver to point to such
1627                  * offsets, so catch it and mark everything as zero */
1628                 ret |= BDRV_BLOCK_ZERO;
1629             } else {
1630                 /* Limit request to the range reported by the protocol driver */
1631                 *pnum = file_pnum;
1632                 ret |= (ret2 & BDRV_BLOCK_ZERO);
1633             }
1634         }
1635     }
1636 
1637     return ret;
1638 }
1639 
1640 static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1641         BlockDriverState *base,
1642         int64_t sector_num,
1643         int nb_sectors,
1644         int *pnum,
1645         BlockDriverState **file)
1646 {
1647     BlockDriverState *p;
1648     int64_t ret = 0;
1649 
1650     assert(bs != base);
1651     for (p = bs; p != base; p = backing_bs(p)) {
1652         ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
1653         if (ret < 0 || ret & BDRV_BLOCK_ALLOCATED) {
1654             break;
1655         }
1656         /* [sector_num, pnum] unallocated on this layer, which could be only
1657          * the first part of [sector_num, nb_sectors].  */
1658         nb_sectors = MIN(nb_sectors, *pnum);
1659     }
1660     return ret;
1661 }
1662 
1663 /* Coroutine wrapper for bdrv_get_block_status_above() */
1664 static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
1665 {
1666     BdrvCoGetBlockStatusData *data = opaque;
1667 
1668     data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1669                                                data->sector_num,
1670                                                data->nb_sectors,
1671                                                data->pnum,
1672                                                data->file);
1673     data->done = true;
1674 }
1675 
1676 /*
1677  * Synchronous wrapper around bdrv_co_get_block_status_above().
1678  *
1679  * See bdrv_co_get_block_status_above() for details.
1680  */
1681 int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1682                                     BlockDriverState *base,
1683                                     int64_t sector_num,
1684                                     int nb_sectors, int *pnum,
1685                                     BlockDriverState **file)
1686 {
1687     Coroutine *co;
1688     BdrvCoGetBlockStatusData data = {
1689         .bs = bs,
1690         .base = base,
1691         .file = file,
1692         .sector_num = sector_num,
1693         .nb_sectors = nb_sectors,
1694         .pnum = pnum,
1695         .done = false,
1696     };
1697 
1698     if (qemu_in_coroutine()) {
1699         /* Fast-path if already in coroutine context */
1700         bdrv_get_block_status_above_co_entry(&data);
1701     } else {
1702         AioContext *aio_context = bdrv_get_aio_context(bs);
1703 
1704         co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry);
1705         qemu_coroutine_enter(co, &data);
1706         while (!data.done) {
1707             aio_poll(aio_context, true);
1708         }
1709     }
1710     return data.ret;
1711 }
1712 
1713 int64_t bdrv_get_block_status(BlockDriverState *bs,
1714                               int64_t sector_num,
1715                               int nb_sectors, int *pnum,
1716                               BlockDriverState **file)
1717 {
1718     return bdrv_get_block_status_above(bs, backing_bs(bs),
1719                                        sector_num, nb_sectors, pnum, file);
1720 }
1721 
1722 int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num,
1723                                    int nb_sectors, int *pnum)
1724 {
1725     BlockDriverState *file;
1726     int64_t ret = bdrv_get_block_status(bs, sector_num, nb_sectors, pnum,
1727                                         &file);
1728     if (ret < 0) {
1729         return ret;
1730     }
1731     return !!(ret & BDRV_BLOCK_ALLOCATED);
1732 }
1733 
1734 /*
1735  * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
1736  *
1737  * Return true if the given sector is allocated in any image between
1738  * BASE and TOP (inclusive).  BASE can be NULL to check if the given
1739  * sector is allocated in any image of the chain.  Return false otherwise.
1740  *
1741  * 'pnum' is set to the number of sectors (including and immediately following
1742  *  the specified sector) that are known to be in the same
1743  *  allocated/unallocated state.
1744  *
1745  */
1746 int bdrv_is_allocated_above(BlockDriverState *top,
1747                             BlockDriverState *base,
1748                             int64_t sector_num,
1749                             int nb_sectors, int *pnum)
1750 {
1751     BlockDriverState *intermediate;
1752     int ret, n = nb_sectors;
1753 
1754     intermediate = top;
1755     while (intermediate && intermediate != base) {
1756         int pnum_inter;
1757         ret = bdrv_is_allocated(intermediate, sector_num, nb_sectors,
1758                                 &pnum_inter);
1759         if (ret < 0) {
1760             return ret;
1761         } else if (ret) {
1762             *pnum = pnum_inter;
1763             return 1;
1764         }
1765 
1766         /*
1767          * [sector_num, nb_sectors] is unallocated on top but intermediate
1768          * might have
1769          *
1770          * [sector_num+x, nr_sectors] allocated.
1771          */
1772         if (n > pnum_inter &&
1773             (intermediate == top ||
1774              sector_num + pnum_inter < intermediate->total_sectors)) {
1775             n = pnum_inter;
1776         }
1777 
1778         intermediate = backing_bs(intermediate);
1779     }
1780 
1781     *pnum = n;
1782     return 0;
1783 }
1784 
1785 int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
1786                           const uint8_t *buf, int nb_sectors)
1787 {
1788     BlockDriver *drv = bs->drv;
1789     int ret;
1790 
1791     if (!drv) {
1792         return -ENOMEDIUM;
1793     }
1794     if (!drv->bdrv_write_compressed) {
1795         return -ENOTSUP;
1796     }
1797     ret = bdrv_check_request(bs, sector_num, nb_sectors);
1798     if (ret < 0) {
1799         return ret;
1800     }
1801 
1802     assert(QLIST_EMPTY(&bs->dirty_bitmaps));
1803 
1804     return drv->bdrv_write_compressed(bs, sector_num, buf, nb_sectors);
1805 }
1806 
1807 int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
1808                       int64_t pos, int size)
1809 {
1810     QEMUIOVector qiov;
1811     struct iovec iov = {
1812         .iov_base   = (void *) buf,
1813         .iov_len    = size,
1814     };
1815 
1816     qemu_iovec_init_external(&qiov, &iov, 1);
1817     return bdrv_writev_vmstate(bs, &qiov, pos);
1818 }
1819 
1820 int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
1821 {
1822     BlockDriver *drv = bs->drv;
1823 
1824     if (!drv) {
1825         return -ENOMEDIUM;
1826     } else if (drv->bdrv_save_vmstate) {
1827         return drv->bdrv_save_vmstate(bs, qiov, pos);
1828     } else if (bs->file) {
1829         return bdrv_writev_vmstate(bs->file->bs, qiov, pos);
1830     }
1831 
1832     return -ENOTSUP;
1833 }
1834 
1835 int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
1836                       int64_t pos, int size)
1837 {
1838     BlockDriver *drv = bs->drv;
1839     if (!drv)
1840         return -ENOMEDIUM;
1841     if (drv->bdrv_load_vmstate)
1842         return drv->bdrv_load_vmstate(bs, buf, pos, size);
1843     if (bs->file)
1844         return bdrv_load_vmstate(bs->file->bs, buf, pos, size);
1845     return -ENOTSUP;
1846 }
1847 
1848 /**************************************************************/
1849 /* async I/Os */
1850 
1851 BlockAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
1852                            QEMUIOVector *qiov, int nb_sectors,
1853                            BlockCompletionFunc *cb, void *opaque)
1854 {
1855     trace_bdrv_aio_readv(bs, sector_num, nb_sectors, opaque);
1856 
1857     return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1858                                  cb, opaque, false);
1859 }
1860 
1861 BlockAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
1862                             QEMUIOVector *qiov, int nb_sectors,
1863                             BlockCompletionFunc *cb, void *opaque)
1864 {
1865     trace_bdrv_aio_writev(bs, sector_num, nb_sectors, opaque);
1866 
1867     return bdrv_co_aio_rw_vector(bs, sector_num, qiov, nb_sectors, 0,
1868                                  cb, opaque, true);
1869 }
1870 
1871 BlockAIOCB *bdrv_aio_write_zeroes(BlockDriverState *bs,
1872         int64_t sector_num, int nb_sectors, BdrvRequestFlags flags,
1873         BlockCompletionFunc *cb, void *opaque)
1874 {
1875     trace_bdrv_aio_write_zeroes(bs, sector_num, nb_sectors, flags, opaque);
1876 
1877     return bdrv_co_aio_rw_vector(bs, sector_num, NULL, nb_sectors,
1878                                  BDRV_REQ_ZERO_WRITE | flags,
1879                                  cb, opaque, true);
1880 }
1881 
1882 void bdrv_aio_cancel(BlockAIOCB *acb)
1883 {
1884     qemu_aio_ref(acb);
1885     bdrv_aio_cancel_async(acb);
1886     while (acb->refcnt > 1) {
1887         if (acb->aiocb_info->get_aio_context) {
1888             aio_poll(acb->aiocb_info->get_aio_context(acb), true);
1889         } else if (acb->bs) {
1890             aio_poll(bdrv_get_aio_context(acb->bs), true);
1891         } else {
1892             abort();
1893         }
1894     }
1895     qemu_aio_unref(acb);
1896 }
1897 
1898 /* Async version of aio cancel. The caller is not blocked if the acb implements
1899  * cancel_async, otherwise we do nothing and let the request normally complete.
1900  * In either case the completion callback must be called. */
1901 void bdrv_aio_cancel_async(BlockAIOCB *acb)
1902 {
1903     if (acb->aiocb_info->cancel_async) {
1904         acb->aiocb_info->cancel_async(acb);
1905     }
1906 }
1907 
1908 /**************************************************************/
1909 /* async block device emulation */
1910 
1911 typedef struct BlockAIOCBCoroutine {
1912     BlockAIOCB common;
1913     BlockRequest req;
1914     bool is_write;
1915     bool need_bh;
1916     bool *done;
1917     QEMUBH* bh;
1918 } BlockAIOCBCoroutine;
1919 
1920 static const AIOCBInfo bdrv_em_co_aiocb_info = {
1921     .aiocb_size         = sizeof(BlockAIOCBCoroutine),
1922 };
1923 
1924 static void bdrv_co_complete(BlockAIOCBCoroutine *acb)
1925 {
1926     if (!acb->need_bh) {
1927         acb->common.cb(acb->common.opaque, acb->req.error);
1928         qemu_aio_unref(acb);
1929     }
1930 }
1931 
1932 static void bdrv_co_em_bh(void *opaque)
1933 {
1934     BlockAIOCBCoroutine *acb = opaque;
1935 
1936     assert(!acb->need_bh);
1937     qemu_bh_delete(acb->bh);
1938     bdrv_co_complete(acb);
1939 }
1940 
1941 static void bdrv_co_maybe_schedule_bh(BlockAIOCBCoroutine *acb)
1942 {
1943     acb->need_bh = false;
1944     if (acb->req.error != -EINPROGRESS) {
1945         BlockDriverState *bs = acb->common.bs;
1946 
1947         acb->bh = aio_bh_new(bdrv_get_aio_context(bs), bdrv_co_em_bh, acb);
1948         qemu_bh_schedule(acb->bh);
1949     }
1950 }
1951 
1952 /* Invoke bdrv_co_do_readv/bdrv_co_do_writev */
1953 static void coroutine_fn bdrv_co_do_rw(void *opaque)
1954 {
1955     BlockAIOCBCoroutine *acb = opaque;
1956     BlockDriverState *bs = acb->common.bs;
1957 
1958     if (!acb->is_write) {
1959         acb->req.error = bdrv_co_do_readv(bs, acb->req.sector,
1960             acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
1961     } else {
1962         acb->req.error = bdrv_co_do_writev(bs, acb->req.sector,
1963             acb->req.nb_sectors, acb->req.qiov, acb->req.flags);
1964     }
1965 
1966     bdrv_co_complete(acb);
1967 }
1968 
1969 static BlockAIOCB *bdrv_co_aio_rw_vector(BlockDriverState *bs,
1970                                          int64_t sector_num,
1971                                          QEMUIOVector *qiov,
1972                                          int nb_sectors,
1973                                          BdrvRequestFlags flags,
1974                                          BlockCompletionFunc *cb,
1975                                          void *opaque,
1976                                          bool is_write)
1977 {
1978     Coroutine *co;
1979     BlockAIOCBCoroutine *acb;
1980 
1981     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
1982     acb->need_bh = true;
1983     acb->req.error = -EINPROGRESS;
1984     acb->req.sector = sector_num;
1985     acb->req.nb_sectors = nb_sectors;
1986     acb->req.qiov = qiov;
1987     acb->req.flags = flags;
1988     acb->is_write = is_write;
1989 
1990     co = qemu_coroutine_create(bdrv_co_do_rw);
1991     qemu_coroutine_enter(co, acb);
1992 
1993     bdrv_co_maybe_schedule_bh(acb);
1994     return &acb->common;
1995 }
1996 
1997 static void coroutine_fn bdrv_aio_flush_co_entry(void *opaque)
1998 {
1999     BlockAIOCBCoroutine *acb = opaque;
2000     BlockDriverState *bs = acb->common.bs;
2001 
2002     acb->req.error = bdrv_co_flush(bs);
2003     bdrv_co_complete(acb);
2004 }
2005 
2006 BlockAIOCB *bdrv_aio_flush(BlockDriverState *bs,
2007         BlockCompletionFunc *cb, void *opaque)
2008 {
2009     trace_bdrv_aio_flush(bs, opaque);
2010 
2011     Coroutine *co;
2012     BlockAIOCBCoroutine *acb;
2013 
2014     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2015     acb->need_bh = true;
2016     acb->req.error = -EINPROGRESS;
2017 
2018     co = qemu_coroutine_create(bdrv_aio_flush_co_entry);
2019     qemu_coroutine_enter(co, acb);
2020 
2021     bdrv_co_maybe_schedule_bh(acb);
2022     return &acb->common;
2023 }
2024 
2025 static void coroutine_fn bdrv_aio_discard_co_entry(void *opaque)
2026 {
2027     BlockAIOCBCoroutine *acb = opaque;
2028     BlockDriverState *bs = acb->common.bs;
2029 
2030     acb->req.error = bdrv_co_discard(bs, acb->req.sector, acb->req.nb_sectors);
2031     bdrv_co_complete(acb);
2032 }
2033 
2034 BlockAIOCB *bdrv_aio_discard(BlockDriverState *bs,
2035         int64_t sector_num, int nb_sectors,
2036         BlockCompletionFunc *cb, void *opaque)
2037 {
2038     Coroutine *co;
2039     BlockAIOCBCoroutine *acb;
2040 
2041     trace_bdrv_aio_discard(bs, sector_num, nb_sectors, opaque);
2042 
2043     acb = qemu_aio_get(&bdrv_em_co_aiocb_info, bs, cb, opaque);
2044     acb->need_bh = true;
2045     acb->req.error = -EINPROGRESS;
2046     acb->req.sector = sector_num;
2047     acb->req.nb_sectors = nb_sectors;
2048     co = qemu_coroutine_create(bdrv_aio_discard_co_entry);
2049     qemu_coroutine_enter(co, acb);
2050 
2051     bdrv_co_maybe_schedule_bh(acb);
2052     return &acb->common;
2053 }
2054 
2055 void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
2056                    BlockCompletionFunc *cb, void *opaque)
2057 {
2058     BlockAIOCB *acb;
2059 
2060     acb = g_malloc(aiocb_info->aiocb_size);
2061     acb->aiocb_info = aiocb_info;
2062     acb->bs = bs;
2063     acb->cb = cb;
2064     acb->opaque = opaque;
2065     acb->refcnt = 1;
2066     return acb;
2067 }
2068 
2069 void qemu_aio_ref(void *p)
2070 {
2071     BlockAIOCB *acb = p;
2072     acb->refcnt++;
2073 }
2074 
2075 void qemu_aio_unref(void *p)
2076 {
2077     BlockAIOCB *acb = p;
2078     assert(acb->refcnt > 0);
2079     if (--acb->refcnt == 0) {
2080         g_free(acb);
2081     }
2082 }
2083 
2084 /**************************************************************/
2085 /* Coroutine block device emulation */
2086 
2087 static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2088 {
2089     RwCo *rwco = opaque;
2090 
2091     rwco->ret = bdrv_co_flush(rwco->bs);
2092 }
2093 
2094 int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2095 {
2096     int ret;
2097     BdrvTrackedRequest req;
2098 
2099     if (!bs || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2100         bdrv_is_sg(bs)) {
2101         return 0;
2102     }
2103 
2104     tracked_request_begin(&req, bs, 0, 0, BDRV_TRACKED_FLUSH);
2105 
2106     /* Write back all layers by calling one driver function */
2107     if (bs->drv->bdrv_co_flush) {
2108         ret = bs->drv->bdrv_co_flush(bs);
2109         goto out;
2110     }
2111 
2112     /* Write back cached data to the OS even with cache=unsafe */
2113     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2114     if (bs->drv->bdrv_co_flush_to_os) {
2115         ret = bs->drv->bdrv_co_flush_to_os(bs);
2116         if (ret < 0) {
2117             goto out;
2118         }
2119     }
2120 
2121     /* But don't actually force it to the disk with cache=unsafe */
2122     if (bs->open_flags & BDRV_O_NO_FLUSH) {
2123         goto flush_parent;
2124     }
2125 
2126     BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2127     if (bs->drv->bdrv_co_flush_to_disk) {
2128         ret = bs->drv->bdrv_co_flush_to_disk(bs);
2129     } else if (bs->drv->bdrv_aio_flush) {
2130         BlockAIOCB *acb;
2131         CoroutineIOCompletion co = {
2132             .coroutine = qemu_coroutine_self(),
2133         };
2134 
2135         acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2136         if (acb == NULL) {
2137             ret = -EIO;
2138         } else {
2139             qemu_coroutine_yield();
2140             ret = co.ret;
2141         }
2142     } else {
2143         /*
2144          * Some block drivers always operate in either writethrough or unsafe
2145          * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2146          * know how the server works (because the behaviour is hardcoded or
2147          * depends on server-side configuration), so we can't ensure that
2148          * everything is safe on disk. Returning an error doesn't work because
2149          * that would break guests even if the server operates in writethrough
2150          * mode.
2151          *
2152          * Let's hope the user knows what he's doing.
2153          */
2154         ret = 0;
2155     }
2156     if (ret < 0) {
2157         goto out;
2158     }
2159 
2160     /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
2161      * in the case of cache=unsafe, so there are no useless flushes.
2162      */
2163 flush_parent:
2164     ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2165 out:
2166     tracked_request_end(&req);
2167     return ret;
2168 }
2169 
2170 int bdrv_flush(BlockDriverState *bs)
2171 {
2172     Coroutine *co;
2173     RwCo rwco = {
2174         .bs = bs,
2175         .ret = NOT_DONE,
2176     };
2177 
2178     if (qemu_in_coroutine()) {
2179         /* Fast-path if already in coroutine context */
2180         bdrv_flush_co_entry(&rwco);
2181     } else {
2182         AioContext *aio_context = bdrv_get_aio_context(bs);
2183 
2184         co = qemu_coroutine_create(bdrv_flush_co_entry);
2185         qemu_coroutine_enter(co, &rwco);
2186         while (rwco.ret == NOT_DONE) {
2187             aio_poll(aio_context, true);
2188         }
2189     }
2190 
2191     return rwco.ret;
2192 }
2193 
2194 typedef struct DiscardCo {
2195     BlockDriverState *bs;
2196     int64_t sector_num;
2197     int nb_sectors;
2198     int ret;
2199 } DiscardCo;
2200 static void coroutine_fn bdrv_discard_co_entry(void *opaque)
2201 {
2202     DiscardCo *rwco = opaque;
2203 
2204     rwco->ret = bdrv_co_discard(rwco->bs, rwco->sector_num, rwco->nb_sectors);
2205 }
2206 
2207 int coroutine_fn bdrv_co_discard(BlockDriverState *bs, int64_t sector_num,
2208                                  int nb_sectors)
2209 {
2210     BdrvTrackedRequest req;
2211     int max_discard, ret;
2212 
2213     if (!bs->drv) {
2214         return -ENOMEDIUM;
2215     }
2216 
2217     ret = bdrv_check_request(bs, sector_num, nb_sectors);
2218     if (ret < 0) {
2219         return ret;
2220     } else if (bs->read_only) {
2221         return -EPERM;
2222     }
2223     assert(!(bs->open_flags & BDRV_O_INACTIVE));
2224 
2225     /* Do nothing if disabled.  */
2226     if (!(bs->open_flags & BDRV_O_UNMAP)) {
2227         return 0;
2228     }
2229 
2230     if (!bs->drv->bdrv_co_discard && !bs->drv->bdrv_aio_discard) {
2231         return 0;
2232     }
2233 
2234     tracked_request_begin(&req, bs, sector_num, nb_sectors,
2235                           BDRV_TRACKED_DISCARD);
2236     bdrv_set_dirty(bs, sector_num, nb_sectors);
2237 
2238     max_discard = MIN_NON_ZERO(bs->bl.max_discard, BDRV_REQUEST_MAX_SECTORS);
2239     while (nb_sectors > 0) {
2240         int ret;
2241         int num = nb_sectors;
2242 
2243         /* align request */
2244         if (bs->bl.discard_alignment &&
2245             num >= bs->bl.discard_alignment &&
2246             sector_num % bs->bl.discard_alignment) {
2247             if (num > bs->bl.discard_alignment) {
2248                 num = bs->bl.discard_alignment;
2249             }
2250             num -= sector_num % bs->bl.discard_alignment;
2251         }
2252 
2253         /* limit request size */
2254         if (num > max_discard) {
2255             num = max_discard;
2256         }
2257 
2258         if (bs->drv->bdrv_co_discard) {
2259             ret = bs->drv->bdrv_co_discard(bs, sector_num, num);
2260         } else {
2261             BlockAIOCB *acb;
2262             CoroutineIOCompletion co = {
2263                 .coroutine = qemu_coroutine_self(),
2264             };
2265 
2266             acb = bs->drv->bdrv_aio_discard(bs, sector_num, nb_sectors,
2267                                             bdrv_co_io_em_complete, &co);
2268             if (acb == NULL) {
2269                 ret = -EIO;
2270                 goto out;
2271             } else {
2272                 qemu_coroutine_yield();
2273                 ret = co.ret;
2274             }
2275         }
2276         if (ret && ret != -ENOTSUP) {
2277             goto out;
2278         }
2279 
2280         sector_num += num;
2281         nb_sectors -= num;
2282     }
2283     ret = 0;
2284 out:
2285     tracked_request_end(&req);
2286     return ret;
2287 }
2288 
2289 int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors)
2290 {
2291     Coroutine *co;
2292     DiscardCo rwco = {
2293         .bs = bs,
2294         .sector_num = sector_num,
2295         .nb_sectors = nb_sectors,
2296         .ret = NOT_DONE,
2297     };
2298 
2299     if (qemu_in_coroutine()) {
2300         /* Fast-path if already in coroutine context */
2301         bdrv_discard_co_entry(&rwco);
2302     } else {
2303         AioContext *aio_context = bdrv_get_aio_context(bs);
2304 
2305         co = qemu_coroutine_create(bdrv_discard_co_entry);
2306         qemu_coroutine_enter(co, &rwco);
2307         while (rwco.ret == NOT_DONE) {
2308             aio_poll(aio_context, true);
2309         }
2310     }
2311 
2312     return rwco.ret;
2313 }
2314 
2315 typedef struct {
2316     CoroutineIOCompletion *co;
2317     QEMUBH *bh;
2318 } BdrvIoctlCompletionData;
2319 
2320 static void bdrv_ioctl_bh_cb(void *opaque)
2321 {
2322     BdrvIoctlCompletionData *data = opaque;
2323 
2324     bdrv_co_io_em_complete(data->co, -ENOTSUP);
2325     qemu_bh_delete(data->bh);
2326 }
2327 
2328 static int bdrv_co_do_ioctl(BlockDriverState *bs, int req, void *buf)
2329 {
2330     BlockDriver *drv = bs->drv;
2331     BdrvTrackedRequest tracked_req;
2332     CoroutineIOCompletion co = {
2333         .coroutine = qemu_coroutine_self(),
2334     };
2335     BlockAIOCB *acb;
2336 
2337     tracked_request_begin(&tracked_req, bs, 0, 0, BDRV_TRACKED_IOCTL);
2338     if (!drv || !drv->bdrv_aio_ioctl) {
2339         co.ret = -ENOTSUP;
2340         goto out;
2341     }
2342 
2343     acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2344     if (!acb) {
2345         BdrvIoctlCompletionData *data = g_new(BdrvIoctlCompletionData, 1);
2346         data->bh = aio_bh_new(bdrv_get_aio_context(bs),
2347                                 bdrv_ioctl_bh_cb, data);
2348         data->co = &co;
2349         qemu_bh_schedule(data->bh);
2350     }
2351     qemu_coroutine_yield();
2352 out:
2353     tracked_request_end(&tracked_req);
2354     return co.ret;
2355 }
2356 
2357 typedef struct {
2358     BlockDriverState *bs;
2359     int req;
2360     void *buf;
2361     int ret;
2362 } BdrvIoctlCoData;
2363 
2364 static void coroutine_fn bdrv_co_ioctl_entry(void *opaque)
2365 {
2366     BdrvIoctlCoData *data = opaque;
2367     data->ret = bdrv_co_do_ioctl(data->bs, data->req, data->buf);
2368 }
2369 
2370 /* needed for generic scsi interface */
2371 int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
2372 {
2373     BdrvIoctlCoData data = {
2374         .bs = bs,
2375         .req = req,
2376         .buf = buf,
2377         .ret = -EINPROGRESS,
2378     };
2379 
2380     if (qemu_in_coroutine()) {
2381         /* Fast-path if already in coroutine context */
2382         bdrv_co_ioctl_entry(&data);
2383     } else {
2384         Coroutine *co = qemu_coroutine_create(bdrv_co_ioctl_entry);
2385 
2386         qemu_coroutine_enter(co, &data);
2387         while (data.ret == -EINPROGRESS) {
2388             aio_poll(bdrv_get_aio_context(bs), true);
2389         }
2390     }
2391     return data.ret;
2392 }
2393 
2394 static void coroutine_fn bdrv_co_aio_ioctl_entry(void *opaque)
2395 {
2396     BlockAIOCBCoroutine *acb = opaque;
2397     acb->req.error = bdrv_co_do_ioctl(acb->common.bs,
2398                                       acb->req.req, acb->req.buf);
2399     bdrv_co_complete(acb);
2400 }
2401 
2402 BlockAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
2403         unsigned long int req, void *buf,
2404         BlockCompletionFunc *cb, void *opaque)
2405 {
2406     BlockAIOCBCoroutine *acb = qemu_aio_get(&bdrv_em_co_aiocb_info,
2407                                             bs, cb, opaque);
2408     Coroutine *co;
2409 
2410     acb->need_bh = true;
2411     acb->req.error = -EINPROGRESS;
2412     acb->req.req = req;
2413     acb->req.buf = buf;
2414     co = qemu_coroutine_create(bdrv_co_aio_ioctl_entry);
2415     qemu_coroutine_enter(co, acb);
2416 
2417     bdrv_co_maybe_schedule_bh(acb);
2418     return &acb->common;
2419 }
2420 
2421 void *qemu_blockalign(BlockDriverState *bs, size_t size)
2422 {
2423     return qemu_memalign(bdrv_opt_mem_align(bs), size);
2424 }
2425 
2426 void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2427 {
2428     return memset(qemu_blockalign(bs, size), 0, size);
2429 }
2430 
2431 void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2432 {
2433     size_t align = bdrv_opt_mem_align(bs);
2434 
2435     /* Ensure that NULL is never returned on success */
2436     assert(align > 0);
2437     if (size == 0) {
2438         size = align;
2439     }
2440 
2441     return qemu_try_memalign(align, size);
2442 }
2443 
2444 void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2445 {
2446     void *mem = qemu_try_blockalign(bs, size);
2447 
2448     if (mem) {
2449         memset(mem, 0, size);
2450     }
2451 
2452     return mem;
2453 }
2454 
2455 /*
2456  * Check if all memory in this vector is sector aligned.
2457  */
2458 bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2459 {
2460     int i;
2461     size_t alignment = bdrv_min_mem_align(bs);
2462 
2463     for (i = 0; i < qiov->niov; i++) {
2464         if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2465             return false;
2466         }
2467         if (qiov->iov[i].iov_len % alignment) {
2468             return false;
2469         }
2470     }
2471 
2472     return true;
2473 }
2474 
2475 void bdrv_add_before_write_notifier(BlockDriverState *bs,
2476                                     NotifierWithReturn *notifier)
2477 {
2478     notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2479 }
2480 
2481 void bdrv_io_plug(BlockDriverState *bs)
2482 {
2483     BdrvChild *child;
2484 
2485     QLIST_FOREACH(child, &bs->children, next) {
2486         bdrv_io_plug(child->bs);
2487     }
2488 
2489     if (bs->io_plugged++ == 0 && bs->io_plug_disabled == 0) {
2490         BlockDriver *drv = bs->drv;
2491         if (drv && drv->bdrv_io_plug) {
2492             drv->bdrv_io_plug(bs);
2493         }
2494     }
2495 }
2496 
2497 void bdrv_io_unplug(BlockDriverState *bs)
2498 {
2499     BdrvChild *child;
2500 
2501     assert(bs->io_plugged);
2502     if (--bs->io_plugged == 0 && bs->io_plug_disabled == 0) {
2503         BlockDriver *drv = bs->drv;
2504         if (drv && drv->bdrv_io_unplug) {
2505             drv->bdrv_io_unplug(bs);
2506         }
2507     }
2508 
2509     QLIST_FOREACH(child, &bs->children, next) {
2510         bdrv_io_unplug(child->bs);
2511     }
2512 }
2513 
2514 void bdrv_io_unplugged_begin(BlockDriverState *bs)
2515 {
2516     BdrvChild *child;
2517 
2518     if (bs->io_plug_disabled++ == 0 && bs->io_plugged > 0) {
2519         BlockDriver *drv = bs->drv;
2520         if (drv && drv->bdrv_io_unplug) {
2521             drv->bdrv_io_unplug(bs);
2522         }
2523     }
2524 
2525     QLIST_FOREACH(child, &bs->children, next) {
2526         bdrv_io_unplugged_begin(child->bs);
2527     }
2528 }
2529 
2530 void bdrv_io_unplugged_end(BlockDriverState *bs)
2531 {
2532     BdrvChild *child;
2533 
2534     assert(bs->io_plug_disabled);
2535     QLIST_FOREACH(child, &bs->children, next) {
2536         bdrv_io_unplugged_end(child->bs);
2537     }
2538 
2539     if (--bs->io_plug_disabled == 0 && bs->io_plugged > 0) {
2540         BlockDriver *drv = bs->drv;
2541         if (drv && drv->bdrv_io_plug) {
2542             drv->bdrv_io_plug(bs);
2543         }
2544     }
2545 }
2546 
2547 void bdrv_drained_begin(BlockDriverState *bs)
2548 {
2549     if (!bs->quiesce_counter++) {
2550         aio_disable_external(bdrv_get_aio_context(bs));
2551     }
2552     bdrv_parent_drained_begin(bs);
2553     bdrv_drain(bs);
2554 }
2555 
2556 void bdrv_drained_end(BlockDriverState *bs)
2557 {
2558     bdrv_parent_drained_end(bs);
2559 
2560     assert(bs->quiesce_counter > 0);
2561     if (--bs->quiesce_counter > 0) {
2562         return;
2563     }
2564     aio_enable_external(bdrv_get_aio_context(bs));
2565 }
2566