161007b31SStefan Hajnoczi /*
261007b31SStefan Hajnoczi * Block layer I/O functions
361007b31SStefan Hajnoczi *
461007b31SStefan Hajnoczi * Copyright (c) 2003 Fabrice Bellard
561007b31SStefan Hajnoczi *
661007b31SStefan Hajnoczi * Permission is hereby granted, free of charge, to any person obtaining a copy
761007b31SStefan Hajnoczi * of this software and associated documentation files (the "Software"), to deal
861007b31SStefan Hajnoczi * in the Software without restriction, including without limitation the rights
961007b31SStefan Hajnoczi * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
1061007b31SStefan Hajnoczi * copies of the Software, and to permit persons to whom the Software is
1161007b31SStefan Hajnoczi * furnished to do so, subject to the following conditions:
1261007b31SStefan Hajnoczi *
1361007b31SStefan Hajnoczi * The above copyright notice and this permission notice shall be included in
1461007b31SStefan Hajnoczi * all copies or substantial portions of the Software.
1561007b31SStefan Hajnoczi *
1661007b31SStefan Hajnoczi * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1761007b31SStefan Hajnoczi * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1861007b31SStefan Hajnoczi * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
1961007b31SStefan Hajnoczi * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2061007b31SStefan Hajnoczi * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
2161007b31SStefan Hajnoczi * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
2261007b31SStefan Hajnoczi * THE SOFTWARE.
2361007b31SStefan Hajnoczi */
2461007b31SStefan Hajnoczi
2580c71a24SPeter Maydell #include "qemu/osdep.h"
2661007b31SStefan Hajnoczi #include "trace.h"
277f0e9da6SMax Reitz #include "sysemu/block-backend.h"
287719f3c9SStefan Hajnoczi #include "block/aio-wait.h"
2961007b31SStefan Hajnoczi #include "block/blockjob.h"
30f321dcb5SPaolo Bonzini #include "block/blockjob_int.h"
3161007b31SStefan Hajnoczi #include "block/block_int.h"
3221c2283eSVladimir Sementsov-Ogievskiy #include "block/coroutines.h"
33e2c1c34fSMarkus Armbruster #include "block/dirty-bitmap.h"
3494783301SVladimir Sementsov-Ogievskiy #include "block/write-threshold.h"
35f348b6d1SVeronia Bahaa #include "qemu/cutils.h"
365df022cfSPeter Maydell #include "qemu/memalign.h"
37da34e65cSMarkus Armbruster #include "qapi/error.h"
38d49b6836SMarkus Armbruster #include "qemu/error-report.h"
39db725815SMarkus Armbruster #include "qemu/main-loop.h"
40c8aa7895SPavel Dovgalyuk #include "sysemu/replay.h"
4161007b31SStefan Hajnoczi
42cb2e2878SEric Blake /* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
43cb2e2878SEric Blake #define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
44cb2e2878SEric Blake
457859c45aSKevin Wolf static void coroutine_fn GRAPH_RDLOCK
467859c45aSKevin Wolf bdrv_parent_cb_resize(BlockDriverState *bs);
477859c45aSKevin Wolf
48d05aa8bbSEric Blake static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
495ae07b14SVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, BdrvRequestFlags flags);
5061007b31SStefan Hajnoczi
51d05ab380SEmanuele Giuseppe Esposito static void GRAPH_RDLOCK
bdrv_parent_drained_begin(BlockDriverState * bs,BdrvChild * ignore)52d05ab380SEmanuele Giuseppe Esposito bdrv_parent_drained_begin(BlockDriverState *bs, BdrvChild *ignore)
5361007b31SStefan Hajnoczi {
5402d21300SKevin Wolf BdrvChild *c, *next;
55d05ab380SEmanuele Giuseppe Esposito IO_OR_GS_CODE();
56d05ab380SEmanuele Giuseppe Esposito assert_bdrv_graph_readable();
5727ccdd52SKevin Wolf
5802d21300SKevin Wolf QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
59a82a3bd1SKevin Wolf if (c == ignore) {
600152bf40SKevin Wolf continue;
610152bf40SKevin Wolf }
62606ed756SKevin Wolf bdrv_parent_drained_begin_single(c);
63ce0f1412SPaolo Bonzini }
64ce0f1412SPaolo Bonzini }
65ce0f1412SPaolo Bonzini
bdrv_parent_drained_end_single(BdrvChild * c)662f65df6eSKevin Wolf void bdrv_parent_drained_end_single(BdrvChild *c)
67804db8eaSMax Reitz {
68ab613350SStefan Hajnoczi GLOBAL_STATE_CODE();
692f65df6eSKevin Wolf
7057e05be3SKevin Wolf assert(c->quiesced_parent);
7157e05be3SKevin Wolf c->quiesced_parent = false;
7257e05be3SKevin Wolf
73bd86fb99SMax Reitz if (c->klass->drained_end) {
742f65df6eSKevin Wolf c->klass->drained_end(c);
75804db8eaSMax Reitz }
76804db8eaSMax Reitz }
77804db8eaSMax Reitz
78d05ab380SEmanuele Giuseppe Esposito static void GRAPH_RDLOCK
bdrv_parent_drained_end(BlockDriverState * bs,BdrvChild * ignore)79d05ab380SEmanuele Giuseppe Esposito bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore)
80ce0f1412SPaolo Bonzini {
8161ad631cSMax Reitz BdrvChild *c;
82d05ab380SEmanuele Giuseppe Esposito IO_OR_GS_CODE();
83d05ab380SEmanuele Giuseppe Esposito assert_bdrv_graph_readable();
8427ccdd52SKevin Wolf
8561ad631cSMax Reitz QLIST_FOREACH(c, &bs->parents, next_parent) {
86a82a3bd1SKevin Wolf if (c == ignore) {
870152bf40SKevin Wolf continue;
880152bf40SKevin Wolf }
892f65df6eSKevin Wolf bdrv_parent_drained_end_single(c);
90c2066af0SKevin Wolf }
9161007b31SStefan Hajnoczi }
9261007b31SStefan Hajnoczi
bdrv_parent_drained_poll_single(BdrvChild * c)9323987471SKevin Wolf bool bdrv_parent_drained_poll_single(BdrvChild *c)
944be6a6d1SKevin Wolf {
95d05ab380SEmanuele Giuseppe Esposito IO_OR_GS_CODE();
96d05ab380SEmanuele Giuseppe Esposito
97bd86fb99SMax Reitz if (c->klass->drained_poll) {
98bd86fb99SMax Reitz return c->klass->drained_poll(c);
994be6a6d1SKevin Wolf }
1004be6a6d1SKevin Wolf return false;
1014be6a6d1SKevin Wolf }
1024be6a6d1SKevin Wolf
103d05ab380SEmanuele Giuseppe Esposito static bool GRAPH_RDLOCK
bdrv_parent_drained_poll(BlockDriverState * bs,BdrvChild * ignore,bool ignore_bds_parents)104d05ab380SEmanuele Giuseppe Esposito bdrv_parent_drained_poll(BlockDriverState *bs, BdrvChild *ignore,
1056cd5c9d7SKevin Wolf bool ignore_bds_parents)
10689bd0305SKevin Wolf {
10789bd0305SKevin Wolf BdrvChild *c, *next;
10889bd0305SKevin Wolf bool busy = false;
109d05ab380SEmanuele Giuseppe Esposito IO_OR_GS_CODE();
110d05ab380SEmanuele Giuseppe Esposito assert_bdrv_graph_readable();
11189bd0305SKevin Wolf
11289bd0305SKevin Wolf QLIST_FOREACH_SAFE(c, &bs->parents, next_parent, next) {
113bd86fb99SMax Reitz if (c == ignore || (ignore_bds_parents && c->klass->parent_is_bds)) {
11489bd0305SKevin Wolf continue;
11589bd0305SKevin Wolf }
1164be6a6d1SKevin Wolf busy |= bdrv_parent_drained_poll_single(c);
11789bd0305SKevin Wolf }
11889bd0305SKevin Wolf
11989bd0305SKevin Wolf return busy;
12089bd0305SKevin Wolf }
12189bd0305SKevin Wolf
bdrv_parent_drained_begin_single(BdrvChild * c)122606ed756SKevin Wolf void bdrv_parent_drained_begin_single(BdrvChild *c)
1234be6a6d1SKevin Wolf {
124ab613350SStefan Hajnoczi GLOBAL_STATE_CODE();
12557e05be3SKevin Wolf
12657e05be3SKevin Wolf assert(!c->quiesced_parent);
12757e05be3SKevin Wolf c->quiesced_parent = true;
12857e05be3SKevin Wolf
129bd86fb99SMax Reitz if (c->klass->drained_begin) {
130d05ab380SEmanuele Giuseppe Esposito /* called with rdlock taken, but it doesn't really need it. */
131bd86fb99SMax Reitz c->klass->drained_begin(c);
1324be6a6d1SKevin Wolf }
1334be6a6d1SKevin Wolf }
1344be6a6d1SKevin Wolf
bdrv_merge_limits(BlockLimits * dst,const BlockLimits * src)135d9e0dfa2SEric Blake static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
136d9e0dfa2SEric Blake {
1379f460c64SAkihiko Odaki dst->pdiscard_alignment = MAX(dst->pdiscard_alignment,
1389f460c64SAkihiko Odaki src->pdiscard_alignment);
139d9e0dfa2SEric Blake dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
140d9e0dfa2SEric Blake dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
14124b36e98SPaolo Bonzini dst->max_hw_transfer = MIN_NON_ZERO(dst->max_hw_transfer,
14224b36e98SPaolo Bonzini src->max_hw_transfer);
143d9e0dfa2SEric Blake dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
144d9e0dfa2SEric Blake src->opt_mem_alignment);
145d9e0dfa2SEric Blake dst->min_mem_alignment = MAX(dst->min_mem_alignment,
146d9e0dfa2SEric Blake src->min_mem_alignment);
147d9e0dfa2SEric Blake dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
148cc071629SPaolo Bonzini dst->max_hw_iov = MIN_NON_ZERO(dst->max_hw_iov, src->max_hw_iov);
149d9e0dfa2SEric Blake }
150d9e0dfa2SEric Blake
1511e4c797cSVladimir Sementsov-Ogievskiy typedef struct BdrvRefreshLimitsState {
1521e4c797cSVladimir Sementsov-Ogievskiy BlockDriverState *bs;
1531e4c797cSVladimir Sementsov-Ogievskiy BlockLimits old_bl;
1541e4c797cSVladimir Sementsov-Ogievskiy } BdrvRefreshLimitsState;
1551e4c797cSVladimir Sementsov-Ogievskiy
bdrv_refresh_limits_abort(void * opaque)1561e4c797cSVladimir Sementsov-Ogievskiy static void bdrv_refresh_limits_abort(void *opaque)
1571e4c797cSVladimir Sementsov-Ogievskiy {
1581e4c797cSVladimir Sementsov-Ogievskiy BdrvRefreshLimitsState *s = opaque;
1591e4c797cSVladimir Sementsov-Ogievskiy
1601e4c797cSVladimir Sementsov-Ogievskiy s->bs->bl = s->old_bl;
1611e4c797cSVladimir Sementsov-Ogievskiy }
1621e4c797cSVladimir Sementsov-Ogievskiy
1631e4c797cSVladimir Sementsov-Ogievskiy static TransactionActionDrv bdrv_refresh_limits_drv = {
1641e4c797cSVladimir Sementsov-Ogievskiy .abort = bdrv_refresh_limits_abort,
1651e4c797cSVladimir Sementsov-Ogievskiy .clean = g_free,
1661e4c797cSVladimir Sementsov-Ogievskiy };
1671e4c797cSVladimir Sementsov-Ogievskiy
1681e4c797cSVladimir Sementsov-Ogievskiy /* @tran is allowed to be NULL, in this case no rollback is possible. */
bdrv_refresh_limits(BlockDriverState * bs,Transaction * tran,Error ** errp)1691e4c797cSVladimir Sementsov-Ogievskiy void bdrv_refresh_limits(BlockDriverState *bs, Transaction *tran, Error **errp)
17061007b31SStefan Hajnoczi {
17133985614SVladimir Sementsov-Ogievskiy ERRP_GUARD();
17261007b31SStefan Hajnoczi BlockDriver *drv = bs->drv;
17366b129acSMax Reitz BdrvChild *c;
17466b129acSMax Reitz bool have_limits;
17561007b31SStefan Hajnoczi
176f791bf7fSEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
177f791bf7fSEmanuele Giuseppe Esposito
1781e4c797cSVladimir Sementsov-Ogievskiy if (tran) {
1791e4c797cSVladimir Sementsov-Ogievskiy BdrvRefreshLimitsState *s = g_new(BdrvRefreshLimitsState, 1);
1801e4c797cSVladimir Sementsov-Ogievskiy *s = (BdrvRefreshLimitsState) {
1811e4c797cSVladimir Sementsov-Ogievskiy .bs = bs,
1821e4c797cSVladimir Sementsov-Ogievskiy .old_bl = bs->bl,
1831e4c797cSVladimir Sementsov-Ogievskiy };
1841e4c797cSVladimir Sementsov-Ogievskiy tran_add(tran, &bdrv_refresh_limits_drv, s);
1851e4c797cSVladimir Sementsov-Ogievskiy }
1861e4c797cSVladimir Sementsov-Ogievskiy
18761007b31SStefan Hajnoczi memset(&bs->bl, 0, sizeof(bs->bl));
18861007b31SStefan Hajnoczi
18961007b31SStefan Hajnoczi if (!drv) {
19061007b31SStefan Hajnoczi return;
19161007b31SStefan Hajnoczi }
19261007b31SStefan Hajnoczi
19379ba8c98SEric Blake /* Default alignment based on whether driver has byte interface */
194e31f6864SEric Blake bs->bl.request_alignment = (drv->bdrv_co_preadv ||
195ac850bf0SVladimir Sementsov-Ogievskiy drv->bdrv_aio_preadv ||
196ac850bf0SVladimir Sementsov-Ogievskiy drv->bdrv_co_preadv_part) ? 1 : 512;
19779ba8c98SEric Blake
19861007b31SStefan Hajnoczi /* Take some limits from the children as a default */
19966b129acSMax Reitz have_limits = false;
20066b129acSMax Reitz QLIST_FOREACH(c, &bs->children, next) {
20166b129acSMax Reitz if (c->role & (BDRV_CHILD_DATA | BDRV_CHILD_FILTERED | BDRV_CHILD_COW))
20266b129acSMax Reitz {
20366b129acSMax Reitz bdrv_merge_limits(&bs->bl, &c->bs->bl);
20466b129acSMax Reitz have_limits = true;
20566b129acSMax Reitz }
206160a29e2SPaolo Bonzini
207160a29e2SPaolo Bonzini if (c->role & BDRV_CHILD_FILTERED) {
208160a29e2SPaolo Bonzini bs->bl.has_variable_length |= c->bs->bl.has_variable_length;
209160a29e2SPaolo Bonzini }
21066b129acSMax Reitz }
21166b129acSMax Reitz
21266b129acSMax Reitz if (!have_limits) {
2134196d2f0SDenis V. Lunev bs->bl.min_mem_alignment = 512;
2148e3b0cbbSMarc-André Lureau bs->bl.opt_mem_alignment = qemu_real_host_page_size();
215bd44feb7SStefan Hajnoczi
216bd44feb7SStefan Hajnoczi /* Safe default since most protocols use readv()/writev()/etc */
217bd44feb7SStefan Hajnoczi bs->bl.max_iov = IOV_MAX;
21861007b31SStefan Hajnoczi }
21961007b31SStefan Hajnoczi
22061007b31SStefan Hajnoczi /* Then let the driver override it */
22161007b31SStefan Hajnoczi if (drv->bdrv_refresh_limits) {
22261007b31SStefan Hajnoczi drv->bdrv_refresh_limits(bs, errp);
2238b117001SVladimir Sementsov-Ogievskiy if (*errp) {
2248b117001SVladimir Sementsov-Ogievskiy return;
2258b117001SVladimir Sementsov-Ogievskiy }
2268b117001SVladimir Sementsov-Ogievskiy }
2278b117001SVladimir Sementsov-Ogievskiy
2288b117001SVladimir Sementsov-Ogievskiy if (bs->bl.request_alignment > BDRV_MAX_ALIGNMENT) {
2298b117001SVladimir Sementsov-Ogievskiy error_setg(errp, "Driver requires too large request alignment");
23061007b31SStefan Hajnoczi }
23161007b31SStefan Hajnoczi }
23261007b31SStefan Hajnoczi
23361007b31SStefan Hajnoczi /**
23461007b31SStefan Hajnoczi * The copy-on-read flag is actually a reference count so multiple users may
23561007b31SStefan Hajnoczi * use the feature without worrying about clobbering its previous state.
23661007b31SStefan Hajnoczi * Copy-on-read stays enabled until all users have called to disable it.
23761007b31SStefan Hajnoczi */
bdrv_enable_copy_on_read(BlockDriverState * bs)23861007b31SStefan Hajnoczi void bdrv_enable_copy_on_read(BlockDriverState *bs)
23961007b31SStefan Hajnoczi {
240384a48fbSEmanuele Giuseppe Esposito IO_CODE();
241d73415a3SStefan Hajnoczi qatomic_inc(&bs->copy_on_read);
24261007b31SStefan Hajnoczi }
24361007b31SStefan Hajnoczi
bdrv_disable_copy_on_read(BlockDriverState * bs)24461007b31SStefan Hajnoczi void bdrv_disable_copy_on_read(BlockDriverState *bs)
24561007b31SStefan Hajnoczi {
246d73415a3SStefan Hajnoczi int old = qatomic_fetch_dec(&bs->copy_on_read);
247384a48fbSEmanuele Giuseppe Esposito IO_CODE();
248d3faa13eSPaolo Bonzini assert(old >= 1);
24961007b31SStefan Hajnoczi }
25061007b31SStefan Hajnoczi
25161124f03SPaolo Bonzini typedef struct {
25261124f03SPaolo Bonzini Coroutine *co;
25361124f03SPaolo Bonzini BlockDriverState *bs;
25461124f03SPaolo Bonzini bool done;
255481cad48SManos Pitsidianakis bool begin;
256fe4f0614SKevin Wolf bool poll;
2570152bf40SKevin Wolf BdrvChild *parent;
25861124f03SPaolo Bonzini } BdrvCoDrainData;
25961124f03SPaolo Bonzini
2601cc8e54aSKevin Wolf /* Returns true if BDRV_POLL_WHILE() should go into a blocking aio_poll() */
bdrv_drain_poll(BlockDriverState * bs,BdrvChild * ignore_parent,bool ignore_bds_parents)261299403aeSKevin Wolf bool bdrv_drain_poll(BlockDriverState *bs, BdrvChild *ignore_parent,
262299403aeSKevin Wolf bool ignore_bds_parents)
26389bd0305SKevin Wolf {
264ab613350SStefan Hajnoczi GLOBAL_STATE_CODE();
265fe4f0614SKevin Wolf
2666cd5c9d7SKevin Wolf if (bdrv_parent_drained_poll(bs, ignore_parent, ignore_bds_parents)) {
26789bd0305SKevin Wolf return true;
26889bd0305SKevin Wolf }
26989bd0305SKevin Wolf
270d73415a3SStefan Hajnoczi if (qatomic_read(&bs->in_flight)) {
271fe4f0614SKevin Wolf return true;
27289bd0305SKevin Wolf }
27389bd0305SKevin Wolf
274fe4f0614SKevin Wolf return false;
275fe4f0614SKevin Wolf }
276fe4f0614SKevin Wolf
bdrv_drain_poll_top_level(BlockDriverState * bs,BdrvChild * ignore_parent)277299403aeSKevin Wolf static bool bdrv_drain_poll_top_level(BlockDriverState *bs,
27889bd0305SKevin Wolf BdrvChild *ignore_parent)
2791cc8e54aSKevin Wolf {
280d05ab380SEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
281d05ab380SEmanuele Giuseppe Esposito GRAPH_RDLOCK_GUARD_MAINLOOP();
282d05ab380SEmanuele Giuseppe Esposito
283299403aeSKevin Wolf return bdrv_drain_poll(bs, ignore_parent, false);
2841cc8e54aSKevin Wolf }
2851cc8e54aSKevin Wolf
286299403aeSKevin Wolf static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
287a82a3bd1SKevin Wolf bool poll);
288a82a3bd1SKevin Wolf static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent);
2890152bf40SKevin Wolf
bdrv_co_drain_bh_cb(void * opaque)290a77fd4bbSFam Zheng static void bdrv_co_drain_bh_cb(void *opaque)
291a77fd4bbSFam Zheng {
292a77fd4bbSFam Zheng BdrvCoDrainData *data = opaque;
293a77fd4bbSFam Zheng Coroutine *co = data->co;
29499723548SPaolo Bonzini BlockDriverState *bs = data->bs;
295a77fd4bbSFam Zheng
296c8ca33d0SKevin Wolf if (bs) {
29799723548SPaolo Bonzini bdrv_dec_in_flight(bs);
298481cad48SManos Pitsidianakis if (data->begin) {
299a82a3bd1SKevin Wolf bdrv_do_drained_begin(bs, data->parent, data->poll);
300481cad48SManos Pitsidianakis } else {
301e037c09cSMax Reitz assert(!data->poll);
302a82a3bd1SKevin Wolf bdrv_do_drained_end(bs, data->parent);
303481cad48SManos Pitsidianakis }
304c8ca33d0SKevin Wolf } else {
305c8ca33d0SKevin Wolf assert(data->begin);
306c8ca33d0SKevin Wolf bdrv_drain_all_begin();
307c8ca33d0SKevin Wolf }
308481cad48SManos Pitsidianakis
309a77fd4bbSFam Zheng data->done = true;
3101919631eSPaolo Bonzini aio_co_wake(co);
311a77fd4bbSFam Zheng }
312a77fd4bbSFam Zheng
bdrv_co_yield_to_drain(BlockDriverState * bs,bool begin,BdrvChild * parent,bool poll)313481cad48SManos Pitsidianakis static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs,
314299403aeSKevin Wolf bool begin,
3156cd5c9d7SKevin Wolf BdrvChild *parent,
3162f65df6eSKevin Wolf bool poll)
317a77fd4bbSFam Zheng {
318a77fd4bbSFam Zheng BdrvCoDrainData data;
319960d5fb3SKevin Wolf Coroutine *self = qemu_coroutine_self();
320a77fd4bbSFam Zheng
321a77fd4bbSFam Zheng /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
322c40a2545SStefan Hajnoczi * other coroutines run if they were queued by aio_co_enter(). */
323a77fd4bbSFam Zheng
324a77fd4bbSFam Zheng assert(qemu_in_coroutine());
325a77fd4bbSFam Zheng data = (BdrvCoDrainData) {
326960d5fb3SKevin Wolf .co = self,
327a77fd4bbSFam Zheng .bs = bs,
328a77fd4bbSFam Zheng .done = false,
329481cad48SManos Pitsidianakis .begin = begin,
3300152bf40SKevin Wolf .parent = parent,
331fe4f0614SKevin Wolf .poll = poll,
332a77fd4bbSFam Zheng };
3338e1da77eSMax Reitz
334c8ca33d0SKevin Wolf if (bs) {
33599723548SPaolo Bonzini bdrv_inc_in_flight(bs);
336c8ca33d0SKevin Wolf }
337960d5fb3SKevin Wolf
338ab613350SStefan Hajnoczi replay_bh_schedule_oneshot_event(qemu_get_aio_context(),
339ab613350SStefan Hajnoczi bdrv_co_drain_bh_cb, &data);
340a77fd4bbSFam Zheng
341a77fd4bbSFam Zheng qemu_coroutine_yield();
342a77fd4bbSFam Zheng /* If we are resumed from some other event (such as an aio completion or a
343a77fd4bbSFam Zheng * timer callback), it is a bug in the caller that should be fixed. */
344a77fd4bbSFam Zheng assert(data.done);
345a77fd4bbSFam Zheng }
346a77fd4bbSFam Zheng
bdrv_do_drained_begin(BlockDriverState * bs,BdrvChild * parent,bool poll)34705c272ffSKevin Wolf static void bdrv_do_drained_begin(BlockDriverState *bs, BdrvChild *parent,
34805c272ffSKevin Wolf bool poll)
349dcf94a23SKevin Wolf {
350384a48fbSEmanuele Giuseppe Esposito IO_OR_GS_CODE();
35105c272ffSKevin Wolf
35205c272ffSKevin Wolf if (qemu_in_coroutine()) {
35305c272ffSKevin Wolf bdrv_co_yield_to_drain(bs, true, parent, poll);
35405c272ffSKevin Wolf return;
35505c272ffSKevin Wolf }
356dcf94a23SKevin Wolf
357ab613350SStefan Hajnoczi GLOBAL_STATE_CODE();
358ab613350SStefan Hajnoczi
359dcf94a23SKevin Wolf /* Stop things in parent-to-child order */
360d73415a3SStefan Hajnoczi if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) {
361d05ab380SEmanuele Giuseppe Esposito GRAPH_RDLOCK_GUARD_MAINLOOP();
362a82a3bd1SKevin Wolf bdrv_parent_drained_begin(bs, parent);
363c7bc05f7SKevin Wolf if (bs->drv && bs->drv->bdrv_drain_begin) {
364c7bc05f7SKevin Wolf bs->drv->bdrv_drain_begin(bs);
365c7bc05f7SKevin Wolf }
366dcf94a23SKevin Wolf }
367d30b8e64SKevin Wolf
368fe4f0614SKevin Wolf /*
369fe4f0614SKevin Wolf * Wait for drained requests to finish.
370fe4f0614SKevin Wolf *
371fe4f0614SKevin Wolf * Calling BDRV_POLL_WHILE() only once for the top-level node is okay: The
372fe4f0614SKevin Wolf * call is needed so things in this AioContext can make progress even
373fe4f0614SKevin Wolf * though we don't return to the main AioContext loop - this automatically
374fe4f0614SKevin Wolf * includes other nodes in the same AioContext and therefore all child
375fe4f0614SKevin Wolf * nodes.
376fe4f0614SKevin Wolf */
377fe4f0614SKevin Wolf if (poll) {
378299403aeSKevin Wolf BDRV_POLL_WHILE(bs, bdrv_drain_poll_top_level(bs, parent));
379fe4f0614SKevin Wolf }
3806820643fSKevin Wolf }
3816820643fSKevin Wolf
bdrv_do_drained_begin_quiesce(BlockDriverState * bs,BdrvChild * parent)38205c272ffSKevin Wolf void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, BdrvChild *parent)
38305c272ffSKevin Wolf {
38405c272ffSKevin Wolf bdrv_do_drained_begin(bs, parent, false);
38505c272ffSKevin Wolf }
38605c272ffSKevin Wolf
387e2dbca03SPaolo Bonzini void coroutine_mixed_fn
bdrv_drained_begin(BlockDriverState * bs)388e2dbca03SPaolo Bonzini bdrv_drained_begin(BlockDriverState *bs)
3890152bf40SKevin Wolf {
390384a48fbSEmanuele Giuseppe Esposito IO_OR_GS_CODE();
391a82a3bd1SKevin Wolf bdrv_do_drained_begin(bs, NULL, true);
392b0165585SKevin Wolf }
393b0165585SKevin Wolf
394e037c09cSMax Reitz /**
395e037c09cSMax Reitz * This function does not poll, nor must any of its recursively called
3962f65df6eSKevin Wolf * functions.
397e037c09cSMax Reitz */
bdrv_do_drained_end(BlockDriverState * bs,BdrvChild * parent)398a82a3bd1SKevin Wolf static void bdrv_do_drained_end(BlockDriverState *bs, BdrvChild *parent)
399b0165585SKevin Wolf {
4000f115168SKevin Wolf int old_quiesce_counter;
4010f115168SKevin Wolf
402ab613350SStefan Hajnoczi IO_OR_GS_CODE();
403ab613350SStefan Hajnoczi
404481cad48SManos Pitsidianakis if (qemu_in_coroutine()) {
405a82a3bd1SKevin Wolf bdrv_co_yield_to_drain(bs, false, parent, false);
406481cad48SManos Pitsidianakis return;
407481cad48SManos Pitsidianakis }
408d05ab380SEmanuele Giuseppe Esposito
409d05ab380SEmanuele Giuseppe Esposito /* At this point, we should be always running in the main loop. */
410d05ab380SEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
4116820643fSKevin Wolf assert(bs->quiesce_counter > 0);
412ab613350SStefan Hajnoczi GLOBAL_STATE_CODE();
4136820643fSKevin Wolf
41460369b86SKevin Wolf /* Re-enable things in child-to-parent order */
41557e05be3SKevin Wolf old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter);
41657e05be3SKevin Wolf if (old_quiesce_counter == 1) {
417d05ab380SEmanuele Giuseppe Esposito GRAPH_RDLOCK_GUARD_MAINLOOP();
418c7bc05f7SKevin Wolf if (bs->drv && bs->drv->bdrv_drain_end) {
419c7bc05f7SKevin Wolf bs->drv->bdrv_drain_end(bs);
420c7bc05f7SKevin Wolf }
421a82a3bd1SKevin Wolf bdrv_parent_drained_end(bs, parent);
4226820643fSKevin Wolf }
4230f115168SKevin Wolf }
4246820643fSKevin Wolf
bdrv_drained_end(BlockDriverState * bs)4250152bf40SKevin Wolf void bdrv_drained_end(BlockDriverState *bs)
4260152bf40SKevin Wolf {
427384a48fbSEmanuele Giuseppe Esposito IO_OR_GS_CODE();
428a82a3bd1SKevin Wolf bdrv_do_drained_end(bs, NULL);
429d736f119SKevin Wolf }
430d736f119SKevin Wolf
bdrv_drain(BlockDriverState * bs)43161007b31SStefan Hajnoczi void bdrv_drain(BlockDriverState *bs)
43261007b31SStefan Hajnoczi {
433384a48fbSEmanuele Giuseppe Esposito IO_OR_GS_CODE();
4346820643fSKevin Wolf bdrv_drained_begin(bs);
4356820643fSKevin Wolf bdrv_drained_end(bs);
43661007b31SStefan Hajnoczi }
43761007b31SStefan Hajnoczi
bdrv_drain_assert_idle(BlockDriverState * bs)438c13ad59fSKevin Wolf static void bdrv_drain_assert_idle(BlockDriverState *bs)
439c13ad59fSKevin Wolf {
440c13ad59fSKevin Wolf BdrvChild *child, *next;
441d05ab380SEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
442d05ab380SEmanuele Giuseppe Esposito GRAPH_RDLOCK_GUARD_MAINLOOP();
443c13ad59fSKevin Wolf
444d73415a3SStefan Hajnoczi assert(qatomic_read(&bs->in_flight) == 0);
445c13ad59fSKevin Wolf QLIST_FOREACH_SAFE(child, &bs->children, next, next) {
446c13ad59fSKevin Wolf bdrv_drain_assert_idle(child->bs);
447c13ad59fSKevin Wolf }
448c13ad59fSKevin Wolf }
449c13ad59fSKevin Wolf
4500f12264eSKevin Wolf unsigned int bdrv_drain_all_count = 0;
4510f12264eSKevin Wolf
bdrv_drain_all_poll(void)4520f12264eSKevin Wolf static bool bdrv_drain_all_poll(void)
4530f12264eSKevin Wolf {
4540f12264eSKevin Wolf BlockDriverState *bs = NULL;
4550f12264eSKevin Wolf bool result = false;
456d05ab380SEmanuele Giuseppe Esposito
457f791bf7fSEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
458d05ab380SEmanuele Giuseppe Esposito GRAPH_RDLOCK_GUARD_MAINLOOP();
4590f12264eSKevin Wolf
460b49f4755SStefan Hajnoczi /*
461b49f4755SStefan Hajnoczi * bdrv_drain_poll() can't make changes to the graph and we hold the BQL,
462b49f4755SStefan Hajnoczi * so iterating bdrv_next_all_states() is safe.
463b49f4755SStefan Hajnoczi */
4640f12264eSKevin Wolf while ((bs = bdrv_next_all_states(bs))) {
465299403aeSKevin Wolf result |= bdrv_drain_poll(bs, NULL, true);
4660f12264eSKevin Wolf }
4670f12264eSKevin Wolf
4680f12264eSKevin Wolf return result;
4690f12264eSKevin Wolf }
4700f12264eSKevin Wolf
47161007b31SStefan Hajnoczi /*
47261007b31SStefan Hajnoczi * Wait for pending requests to complete across all BlockDriverStates
47361007b31SStefan Hajnoczi *
47461007b31SStefan Hajnoczi * This function does not flush data to disk, use bdrv_flush_all() for that
47561007b31SStefan Hajnoczi * after calling this function.
476c0778f66SAlberto Garcia *
477c0778f66SAlberto Garcia * This pauses all block jobs and disables external clients. It must
478c0778f66SAlberto Garcia * be paired with bdrv_drain_all_end().
479c0778f66SAlberto Garcia *
480c0778f66SAlberto Garcia * NOTE: no new block jobs or BlockDriverStates can be created between
481c0778f66SAlberto Garcia * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
48261007b31SStefan Hajnoczi */
bdrv_drain_all_begin_nopoll(void)483da0bd744SKevin Wolf void bdrv_drain_all_begin_nopoll(void)
48461007b31SStefan Hajnoczi {
4850f12264eSKevin Wolf BlockDriverState *bs = NULL;
486f791bf7fSEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
48761007b31SStefan Hajnoczi
488c8aa7895SPavel Dovgalyuk /*
489c8aa7895SPavel Dovgalyuk * bdrv queue is managed by record/replay,
490c8aa7895SPavel Dovgalyuk * waiting for finishing the I/O requests may
491c8aa7895SPavel Dovgalyuk * be infinite
492c8aa7895SPavel Dovgalyuk */
493c8aa7895SPavel Dovgalyuk if (replay_events_enabled()) {
494c8aa7895SPavel Dovgalyuk return;
495c8aa7895SPavel Dovgalyuk }
496c8aa7895SPavel Dovgalyuk
4970f12264eSKevin Wolf /* AIO_WAIT_WHILE() with a NULL context can only be called from the main
4980f12264eSKevin Wolf * loop AioContext, so make sure we're in the main context. */
4999a7e86c8SKevin Wolf assert(qemu_get_current_aio_context() == qemu_get_aio_context());
5000f12264eSKevin Wolf assert(bdrv_drain_all_count < INT_MAX);
5010f12264eSKevin Wolf bdrv_drain_all_count++;
5029a7e86c8SKevin Wolf
5030f12264eSKevin Wolf /* Quiesce all nodes, without polling in-flight requests yet. The graph
5040f12264eSKevin Wolf * cannot change during this loop. */
5050f12264eSKevin Wolf while ((bs = bdrv_next_all_states(bs))) {
506a82a3bd1SKevin Wolf bdrv_do_drained_begin(bs, NULL, false);
50761007b31SStefan Hajnoczi }
508da0bd744SKevin Wolf }
509da0bd744SKevin Wolf
bdrv_drain_all_begin(void)510e2dbca03SPaolo Bonzini void coroutine_mixed_fn bdrv_drain_all_begin(void)
511da0bd744SKevin Wolf {
512da0bd744SKevin Wolf BlockDriverState *bs = NULL;
513da0bd744SKevin Wolf
514da0bd744SKevin Wolf if (qemu_in_coroutine()) {
515da0bd744SKevin Wolf bdrv_co_yield_to_drain(NULL, true, NULL, true);
516da0bd744SKevin Wolf return;
517da0bd744SKevin Wolf }
518da0bd744SKevin Wolf
51963945789SPeter Maydell /*
52063945789SPeter Maydell * bdrv queue is managed by record/replay,
52163945789SPeter Maydell * waiting for finishing the I/O requests may
52263945789SPeter Maydell * be infinite
52363945789SPeter Maydell */
52463945789SPeter Maydell if (replay_events_enabled()) {
52563945789SPeter Maydell return;
52663945789SPeter Maydell }
52763945789SPeter Maydell
528da0bd744SKevin Wolf bdrv_drain_all_begin_nopoll();
52961007b31SStefan Hajnoczi
5300f12264eSKevin Wolf /* Now poll the in-flight requests */
531263d5e12SStefan Hajnoczi AIO_WAIT_WHILE_UNLOCKED(NULL, bdrv_drain_all_poll());
5320f12264eSKevin Wolf
5330f12264eSKevin Wolf while ((bs = bdrv_next_all_states(bs))) {
534c13ad59fSKevin Wolf bdrv_drain_assert_idle(bs);
535f406c03cSAlexander Yarygin }
536f406c03cSAlexander Yarygin }
537c0778f66SAlberto Garcia
bdrv_drain_all_end_quiesce(BlockDriverState * bs)5381a6d3bd2SGreg Kurz void bdrv_drain_all_end_quiesce(BlockDriverState *bs)
5391a6d3bd2SGreg Kurz {
540b4ad82aaSEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
5411a6d3bd2SGreg Kurz
5421a6d3bd2SGreg Kurz g_assert(bs->quiesce_counter > 0);
5431a6d3bd2SGreg Kurz g_assert(!bs->refcnt);
5441a6d3bd2SGreg Kurz
5451a6d3bd2SGreg Kurz while (bs->quiesce_counter) {
546a82a3bd1SKevin Wolf bdrv_do_drained_end(bs, NULL);
5471a6d3bd2SGreg Kurz }
5481a6d3bd2SGreg Kurz }
5491a6d3bd2SGreg Kurz
bdrv_drain_all_end(void)550c0778f66SAlberto Garcia void bdrv_drain_all_end(void)
551c0778f66SAlberto Garcia {
5520f12264eSKevin Wolf BlockDriverState *bs = NULL;
553f791bf7fSEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
554c0778f66SAlberto Garcia
555c8aa7895SPavel Dovgalyuk /*
556c8aa7895SPavel Dovgalyuk * bdrv queue is managed by record/replay,
557c8aa7895SPavel Dovgalyuk * waiting for finishing the I/O requests may
558c8aa7895SPavel Dovgalyuk * be endless
559c8aa7895SPavel Dovgalyuk */
560c8aa7895SPavel Dovgalyuk if (replay_events_enabled()) {
561c8aa7895SPavel Dovgalyuk return;
562c8aa7895SPavel Dovgalyuk }
563c8aa7895SPavel Dovgalyuk
5640f12264eSKevin Wolf while ((bs = bdrv_next_all_states(bs))) {
565a82a3bd1SKevin Wolf bdrv_do_drained_end(bs, NULL);
56661007b31SStefan Hajnoczi }
5670f12264eSKevin Wolf
568e037c09cSMax Reitz assert(qemu_get_current_aio_context() == qemu_get_aio_context());
5690f12264eSKevin Wolf assert(bdrv_drain_all_count > 0);
5700f12264eSKevin Wolf bdrv_drain_all_count--;
57161007b31SStefan Hajnoczi }
57261007b31SStefan Hajnoczi
bdrv_drain_all(void)573c0778f66SAlberto Garcia void bdrv_drain_all(void)
574c0778f66SAlberto Garcia {
575f791bf7fSEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
576c0778f66SAlberto Garcia bdrv_drain_all_begin();
577c0778f66SAlberto Garcia bdrv_drain_all_end();
578c0778f66SAlberto Garcia }
579c0778f66SAlberto Garcia
58061007b31SStefan Hajnoczi /**
58161007b31SStefan Hajnoczi * Remove an active request from the tracked requests list
58261007b31SStefan Hajnoczi *
58361007b31SStefan Hajnoczi * This function should be called when a tracked request is completing.
58461007b31SStefan Hajnoczi */
tracked_request_end(BdrvTrackedRequest * req)585f0d43b1eSPaolo Bonzini static void coroutine_fn tracked_request_end(BdrvTrackedRequest *req)
58661007b31SStefan Hajnoczi {
58761007b31SStefan Hajnoczi if (req->serialising) {
588d73415a3SStefan Hajnoczi qatomic_dec(&req->bs->serialising_in_flight);
58961007b31SStefan Hajnoczi }
59061007b31SStefan Hajnoczi
591fa9185fcSStefan Hajnoczi qemu_mutex_lock(&req->bs->reqs_lock);
59261007b31SStefan Hajnoczi QLIST_REMOVE(req, list);
593fa9185fcSStefan Hajnoczi qemu_mutex_unlock(&req->bs->reqs_lock);
5943480ce69SStefan Hajnoczi
5953480ce69SStefan Hajnoczi /*
5963480ce69SStefan Hajnoczi * At this point qemu_co_queue_wait(&req->wait_queue, ...) won't be called
5973480ce69SStefan Hajnoczi * anymore because the request has been removed from the list, so it's safe
5983480ce69SStefan Hajnoczi * to restart the queue outside reqs_lock to minimize the critical section.
5993480ce69SStefan Hajnoczi */
60061007b31SStefan Hajnoczi qemu_co_queue_restart_all(&req->wait_queue);
60161007b31SStefan Hajnoczi }
60261007b31SStefan Hajnoczi
60361007b31SStefan Hajnoczi /**
60461007b31SStefan Hajnoczi * Add an active request to the tracked requests list
60561007b31SStefan Hajnoczi */
tracked_request_begin(BdrvTrackedRequest * req,BlockDriverState * bs,int64_t offset,int64_t bytes,enum BdrvTrackedRequestType type)606881a4c55SPaolo Bonzini static void coroutine_fn tracked_request_begin(BdrvTrackedRequest *req,
60761007b31SStefan Hajnoczi BlockDriverState *bs,
60861007b31SStefan Hajnoczi int64_t offset,
60980247264SEric Blake int64_t bytes,
610ebde595cSFam Zheng enum BdrvTrackedRequestType type)
61161007b31SStefan Hajnoczi {
61280247264SEric Blake bdrv_check_request(offset, bytes, &error_abort);
61322931a15SFam Zheng
61461007b31SStefan Hajnoczi *req = (BdrvTrackedRequest){
61561007b31SStefan Hajnoczi .bs = bs,
61661007b31SStefan Hajnoczi .offset = offset,
61761007b31SStefan Hajnoczi .bytes = bytes,
618ebde595cSFam Zheng .type = type,
61961007b31SStefan Hajnoczi .co = qemu_coroutine_self(),
62061007b31SStefan Hajnoczi .serialising = false,
62161007b31SStefan Hajnoczi .overlap_offset = offset,
62261007b31SStefan Hajnoczi .overlap_bytes = bytes,
62361007b31SStefan Hajnoczi };
62461007b31SStefan Hajnoczi
62561007b31SStefan Hajnoczi qemu_co_queue_init(&req->wait_queue);
62661007b31SStefan Hajnoczi
627fa9185fcSStefan Hajnoczi qemu_mutex_lock(&bs->reqs_lock);
62861007b31SStefan Hajnoczi QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
629fa9185fcSStefan Hajnoczi qemu_mutex_unlock(&bs->reqs_lock);
63061007b31SStefan Hajnoczi }
63161007b31SStefan Hajnoczi
tracked_request_overlaps(BdrvTrackedRequest * req,int64_t offset,int64_t bytes)6323ba0e1a0SPaolo Bonzini static bool tracked_request_overlaps(BdrvTrackedRequest *req,
63380247264SEric Blake int64_t offset, int64_t bytes)
6343ba0e1a0SPaolo Bonzini {
63580247264SEric Blake bdrv_check_request(offset, bytes, &error_abort);
63680247264SEric Blake
6373ba0e1a0SPaolo Bonzini /* aaaa bbbb */
6383ba0e1a0SPaolo Bonzini if (offset >= req->overlap_offset + req->overlap_bytes) {
6393ba0e1a0SPaolo Bonzini return false;
6403ba0e1a0SPaolo Bonzini }
6413ba0e1a0SPaolo Bonzini /* bbbb aaaa */
6423ba0e1a0SPaolo Bonzini if (req->overlap_offset >= offset + bytes) {
6433ba0e1a0SPaolo Bonzini return false;
6443ba0e1a0SPaolo Bonzini }
6453ba0e1a0SPaolo Bonzini return true;
6463ba0e1a0SPaolo Bonzini }
6473ba0e1a0SPaolo Bonzini
6483183937fSVladimir Sementsov-Ogievskiy /* Called with self->bs->reqs_lock held */
649881a4c55SPaolo Bonzini static coroutine_fn BdrvTrackedRequest *
bdrv_find_conflicting_request(BdrvTrackedRequest * self)6503183937fSVladimir Sementsov-Ogievskiy bdrv_find_conflicting_request(BdrvTrackedRequest *self)
6513ba0e1a0SPaolo Bonzini {
6523ba0e1a0SPaolo Bonzini BdrvTrackedRequest *req;
6533ba0e1a0SPaolo Bonzini
6543183937fSVladimir Sementsov-Ogievskiy QLIST_FOREACH(req, &self->bs->tracked_requests, list) {
6553ba0e1a0SPaolo Bonzini if (req == self || (!req->serialising && !self->serialising)) {
6563ba0e1a0SPaolo Bonzini continue;
6573ba0e1a0SPaolo Bonzini }
6583ba0e1a0SPaolo Bonzini if (tracked_request_overlaps(req, self->overlap_offset,
6593ba0e1a0SPaolo Bonzini self->overlap_bytes))
6603ba0e1a0SPaolo Bonzini {
6613183937fSVladimir Sementsov-Ogievskiy /*
6623183937fSVladimir Sementsov-Ogievskiy * Hitting this means there was a reentrant request, for
6633ba0e1a0SPaolo Bonzini * example, a block driver issuing nested requests. This must
6643ba0e1a0SPaolo Bonzini * never happen since it means deadlock.
6653ba0e1a0SPaolo Bonzini */
6663ba0e1a0SPaolo Bonzini assert(qemu_coroutine_self() != req->co);
6673ba0e1a0SPaolo Bonzini
6683183937fSVladimir Sementsov-Ogievskiy /*
6693183937fSVladimir Sementsov-Ogievskiy * If the request is already (indirectly) waiting for us, or
6703ba0e1a0SPaolo Bonzini * will wait for us as soon as it wakes up, then just go on
6713183937fSVladimir Sementsov-Ogievskiy * (instead of producing a deadlock in the former case).
6723183937fSVladimir Sementsov-Ogievskiy */
6733ba0e1a0SPaolo Bonzini if (!req->waiting_for) {
6743183937fSVladimir Sementsov-Ogievskiy return req;
6753183937fSVladimir Sementsov-Ogievskiy }
6763183937fSVladimir Sementsov-Ogievskiy }
6773183937fSVladimir Sementsov-Ogievskiy }
6783183937fSVladimir Sementsov-Ogievskiy
6793183937fSVladimir Sementsov-Ogievskiy return NULL;
6803183937fSVladimir Sementsov-Ogievskiy }
6813183937fSVladimir Sementsov-Ogievskiy
682ec1c8868SVladimir Sementsov-Ogievskiy /* Called with self->bs->reqs_lock held */
683131498f7SDenis V. Lunev static void coroutine_fn
bdrv_wait_serialising_requests_locked(BdrvTrackedRequest * self)684ec1c8868SVladimir Sementsov-Ogievskiy bdrv_wait_serialising_requests_locked(BdrvTrackedRequest *self)
6853183937fSVladimir Sementsov-Ogievskiy {
6863183937fSVladimir Sementsov-Ogievskiy BdrvTrackedRequest *req;
6873183937fSVladimir Sementsov-Ogievskiy
6883183937fSVladimir Sementsov-Ogievskiy while ((req = bdrv_find_conflicting_request(self))) {
6893ba0e1a0SPaolo Bonzini self->waiting_for = req;
690ec1c8868SVladimir Sementsov-Ogievskiy qemu_co_queue_wait(&req->wait_queue, &self->bs->reqs_lock);
6913ba0e1a0SPaolo Bonzini self->waiting_for = NULL;
6923ba0e1a0SPaolo Bonzini }
6933ba0e1a0SPaolo Bonzini }
6943ba0e1a0SPaolo Bonzini
6958ac5aab2SVladimir Sementsov-Ogievskiy /* Called with req->bs->reqs_lock held */
tracked_request_set_serialising(BdrvTrackedRequest * req,uint64_t align)6968ac5aab2SVladimir Sementsov-Ogievskiy static void tracked_request_set_serialising(BdrvTrackedRequest *req,
6978ac5aab2SVladimir Sementsov-Ogievskiy uint64_t align)
69861007b31SStefan Hajnoczi {
69961007b31SStefan Hajnoczi int64_t overlap_offset = req->offset & ~(align - 1);
70080247264SEric Blake int64_t overlap_bytes =
70180247264SEric Blake ROUND_UP(req->offset + req->bytes, align) - overlap_offset;
70280247264SEric Blake
70380247264SEric Blake bdrv_check_request(req->offset, req->bytes, &error_abort);
70461007b31SStefan Hajnoczi
70561007b31SStefan Hajnoczi if (!req->serialising) {
706d73415a3SStefan Hajnoczi qatomic_inc(&req->bs->serialising_in_flight);
70761007b31SStefan Hajnoczi req->serialising = true;
70861007b31SStefan Hajnoczi }
70961007b31SStefan Hajnoczi
71061007b31SStefan Hajnoczi req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
71161007b31SStefan Hajnoczi req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
71209d2f948SVladimir Sementsov-Ogievskiy }
71309d2f948SVladimir Sementsov-Ogievskiy
71461007b31SStefan Hajnoczi /**
715c28107e9SMax Reitz * Return the tracked request on @bs for the current coroutine, or
716c28107e9SMax Reitz * NULL if there is none.
717c28107e9SMax Reitz */
bdrv_co_get_self_request(BlockDriverState * bs)718c28107e9SMax Reitz BdrvTrackedRequest *coroutine_fn bdrv_co_get_self_request(BlockDriverState *bs)
719c28107e9SMax Reitz {
720c28107e9SMax Reitz BdrvTrackedRequest *req;
721c28107e9SMax Reitz Coroutine *self = qemu_coroutine_self();
722967d7905SEmanuele Giuseppe Esposito IO_CODE();
723c28107e9SMax Reitz
724c28107e9SMax Reitz QLIST_FOREACH(req, &bs->tracked_requests, list) {
725c28107e9SMax Reitz if (req->co == self) {
726c28107e9SMax Reitz return req;
727c28107e9SMax Reitz }
728c28107e9SMax Reitz }
729c28107e9SMax Reitz
730c28107e9SMax Reitz return NULL;
731c28107e9SMax Reitz }
732c28107e9SMax Reitz
733c28107e9SMax Reitz /**
734fc6b211fSAndrey Drobyshev * Round a region to subcluster (if supported) or cluster boundaries
735244483e6SKevin Wolf */
736a00e70c0SEmanuele Giuseppe Esposito void coroutine_fn GRAPH_RDLOCK
bdrv_round_to_subclusters(BlockDriverState * bs,int64_t offset,int64_t bytes,int64_t * align_offset,int64_t * align_bytes)737fc6b211fSAndrey Drobyshev bdrv_round_to_subclusters(BlockDriverState *bs, int64_t offset, int64_t bytes,
738fc6b211fSAndrey Drobyshev int64_t *align_offset, int64_t *align_bytes)
739244483e6SKevin Wolf {
740244483e6SKevin Wolf BlockDriverInfo bdi;
741384a48fbSEmanuele Giuseppe Esposito IO_CODE();
742fc6b211fSAndrey Drobyshev if (bdrv_co_get_info(bs, &bdi) < 0 || bdi.subcluster_size == 0) {
743fc6b211fSAndrey Drobyshev *align_offset = offset;
744fc6b211fSAndrey Drobyshev *align_bytes = bytes;
745244483e6SKevin Wolf } else {
746fc6b211fSAndrey Drobyshev int64_t c = bdi.subcluster_size;
747fc6b211fSAndrey Drobyshev *align_offset = QEMU_ALIGN_DOWN(offset, c);
748fc6b211fSAndrey Drobyshev *align_bytes = QEMU_ALIGN_UP(offset - *align_offset + bytes, c);
749244483e6SKevin Wolf }
750244483e6SKevin Wolf }
751244483e6SKevin Wolf
bdrv_get_cluster_size(BlockDriverState * bs)752a00e70c0SEmanuele Giuseppe Esposito static int coroutine_fn GRAPH_RDLOCK bdrv_get_cluster_size(BlockDriverState *bs)
75361007b31SStefan Hajnoczi {
75461007b31SStefan Hajnoczi BlockDriverInfo bdi;
75561007b31SStefan Hajnoczi int ret;
75661007b31SStefan Hajnoczi
7573d47eb0aSEmanuele Giuseppe Esposito ret = bdrv_co_get_info(bs, &bdi);
75861007b31SStefan Hajnoczi if (ret < 0 || bdi.cluster_size == 0) {
759a5b8dd2cSEric Blake return bs->bl.request_alignment;
76061007b31SStefan Hajnoczi } else {
76161007b31SStefan Hajnoczi return bdi.cluster_size;
76261007b31SStefan Hajnoczi }
76361007b31SStefan Hajnoczi }
76461007b31SStefan Hajnoczi
bdrv_inc_in_flight(BlockDriverState * bs)76599723548SPaolo Bonzini void bdrv_inc_in_flight(BlockDriverState *bs)
76699723548SPaolo Bonzini {
767967d7905SEmanuele Giuseppe Esposito IO_CODE();
768d73415a3SStefan Hajnoczi qatomic_inc(&bs->in_flight);
76999723548SPaolo Bonzini }
77099723548SPaolo Bonzini
bdrv_wakeup(BlockDriverState * bs)771c9d1a561SPaolo Bonzini void bdrv_wakeup(BlockDriverState *bs)
772c9d1a561SPaolo Bonzini {
773967d7905SEmanuele Giuseppe Esposito IO_CODE();
774cfe29d82SKevin Wolf aio_wait_kick();
775c9d1a561SPaolo Bonzini }
776c9d1a561SPaolo Bonzini
bdrv_dec_in_flight(BlockDriverState * bs)77799723548SPaolo Bonzini void bdrv_dec_in_flight(BlockDriverState *bs)
77899723548SPaolo Bonzini {
779967d7905SEmanuele Giuseppe Esposito IO_CODE();
780d73415a3SStefan Hajnoczi qatomic_dec(&bs->in_flight);
781c9d1a561SPaolo Bonzini bdrv_wakeup(bs);
78299723548SPaolo Bonzini }
78399723548SPaolo Bonzini
784131498f7SDenis V. Lunev static void coroutine_fn
bdrv_wait_serialising_requests(BdrvTrackedRequest * self)785131498f7SDenis V. Lunev bdrv_wait_serialising_requests(BdrvTrackedRequest *self)
78661007b31SStefan Hajnoczi {
78761007b31SStefan Hajnoczi BlockDriverState *bs = self->bs;
78861007b31SStefan Hajnoczi
789d73415a3SStefan Hajnoczi if (!qatomic_read(&bs->serialising_in_flight)) {
790131498f7SDenis V. Lunev return;
79161007b31SStefan Hajnoczi }
79261007b31SStefan Hajnoczi
793fa9185fcSStefan Hajnoczi qemu_mutex_lock(&bs->reqs_lock);
794131498f7SDenis V. Lunev bdrv_wait_serialising_requests_locked(self);
795fa9185fcSStefan Hajnoczi qemu_mutex_unlock(&bs->reqs_lock);
79661007b31SStefan Hajnoczi }
79761007b31SStefan Hajnoczi
bdrv_make_request_serialising(BdrvTrackedRequest * req,uint64_t align)798131498f7SDenis V. Lunev void coroutine_fn bdrv_make_request_serialising(BdrvTrackedRequest *req,
7998ac5aab2SVladimir Sementsov-Ogievskiy uint64_t align)
8008ac5aab2SVladimir Sementsov-Ogievskiy {
801967d7905SEmanuele Giuseppe Esposito IO_CODE();
8028ac5aab2SVladimir Sementsov-Ogievskiy
803fa9185fcSStefan Hajnoczi qemu_mutex_lock(&req->bs->reqs_lock);
8048ac5aab2SVladimir Sementsov-Ogievskiy
8058ac5aab2SVladimir Sementsov-Ogievskiy tracked_request_set_serialising(req, align);
806131498f7SDenis V. Lunev bdrv_wait_serialising_requests_locked(req);
8078ac5aab2SVladimir Sementsov-Ogievskiy
808fa9185fcSStefan Hajnoczi qemu_mutex_unlock(&req->bs->reqs_lock);
8098ac5aab2SVladimir Sementsov-Ogievskiy }
8108ac5aab2SVladimir Sementsov-Ogievskiy
bdrv_check_qiov_request(int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,Error ** errp)811558902ccSVladimir Sementsov-Ogievskiy int bdrv_check_qiov_request(int64_t offset, int64_t bytes,
81263f4ad11SVladimir Sementsov-Ogievskiy QEMUIOVector *qiov, size_t qiov_offset,
81363f4ad11SVladimir Sementsov-Ogievskiy Error **errp)
81461007b31SStefan Hajnoczi {
81563f4ad11SVladimir Sementsov-Ogievskiy /*
81663f4ad11SVladimir Sementsov-Ogievskiy * Check generic offset/bytes correctness
81763f4ad11SVladimir Sementsov-Ogievskiy */
81863f4ad11SVladimir Sementsov-Ogievskiy
81969b55e03SVladimir Sementsov-Ogievskiy if (offset < 0) {
82069b55e03SVladimir Sementsov-Ogievskiy error_setg(errp, "offset is negative: %" PRIi64, offset);
82169b55e03SVladimir Sementsov-Ogievskiy return -EIO;
82269b55e03SVladimir Sementsov-Ogievskiy }
82369b55e03SVladimir Sementsov-Ogievskiy
82469b55e03SVladimir Sementsov-Ogievskiy if (bytes < 0) {
82569b55e03SVladimir Sementsov-Ogievskiy error_setg(errp, "bytes is negative: %" PRIi64, bytes);
82661007b31SStefan Hajnoczi return -EIO;
82761007b31SStefan Hajnoczi }
82861007b31SStefan Hajnoczi
8298b117001SVladimir Sementsov-Ogievskiy if (bytes > BDRV_MAX_LENGTH) {
83069b55e03SVladimir Sementsov-Ogievskiy error_setg(errp, "bytes(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
83169b55e03SVladimir Sementsov-Ogievskiy bytes, BDRV_MAX_LENGTH);
83269b55e03SVladimir Sementsov-Ogievskiy return -EIO;
83369b55e03SVladimir Sementsov-Ogievskiy }
83469b55e03SVladimir Sementsov-Ogievskiy
83569b55e03SVladimir Sementsov-Ogievskiy if (offset > BDRV_MAX_LENGTH) {
83669b55e03SVladimir Sementsov-Ogievskiy error_setg(errp, "offset(%" PRIi64 ") exceeds maximum(%" PRIi64 ")",
83769b55e03SVladimir Sementsov-Ogievskiy offset, BDRV_MAX_LENGTH);
8388b117001SVladimir Sementsov-Ogievskiy return -EIO;
8398b117001SVladimir Sementsov-Ogievskiy }
8408b117001SVladimir Sementsov-Ogievskiy
8418b117001SVladimir Sementsov-Ogievskiy if (offset > BDRV_MAX_LENGTH - bytes) {
84269b55e03SVladimir Sementsov-Ogievskiy error_setg(errp, "sum of offset(%" PRIi64 ") and bytes(%" PRIi64 ") "
84369b55e03SVladimir Sementsov-Ogievskiy "exceeds maximum(%" PRIi64 ")", offset, bytes,
84469b55e03SVladimir Sementsov-Ogievskiy BDRV_MAX_LENGTH);
8458b117001SVladimir Sementsov-Ogievskiy return -EIO;
8468b117001SVladimir Sementsov-Ogievskiy }
8478b117001SVladimir Sementsov-Ogievskiy
84863f4ad11SVladimir Sementsov-Ogievskiy if (!qiov) {
8498b117001SVladimir Sementsov-Ogievskiy return 0;
8508b117001SVladimir Sementsov-Ogievskiy }
8518b117001SVladimir Sementsov-Ogievskiy
85263f4ad11SVladimir Sementsov-Ogievskiy /*
85363f4ad11SVladimir Sementsov-Ogievskiy * Check qiov and qiov_offset
85463f4ad11SVladimir Sementsov-Ogievskiy */
85563f4ad11SVladimir Sementsov-Ogievskiy
85663f4ad11SVladimir Sementsov-Ogievskiy if (qiov_offset > qiov->size) {
85763f4ad11SVladimir Sementsov-Ogievskiy error_setg(errp, "qiov_offset(%zu) overflow io vector size(%zu)",
85863f4ad11SVladimir Sementsov-Ogievskiy qiov_offset, qiov->size);
85963f4ad11SVladimir Sementsov-Ogievskiy return -EIO;
86063f4ad11SVladimir Sementsov-Ogievskiy }
86163f4ad11SVladimir Sementsov-Ogievskiy
86263f4ad11SVladimir Sementsov-Ogievskiy if (bytes > qiov->size - qiov_offset) {
86363f4ad11SVladimir Sementsov-Ogievskiy error_setg(errp, "bytes(%" PRIi64 ") + qiov_offset(%zu) overflow io "
86463f4ad11SVladimir Sementsov-Ogievskiy "vector size(%zu)", bytes, qiov_offset, qiov->size);
86563f4ad11SVladimir Sementsov-Ogievskiy return -EIO;
86663f4ad11SVladimir Sementsov-Ogievskiy }
86763f4ad11SVladimir Sementsov-Ogievskiy
86863f4ad11SVladimir Sementsov-Ogievskiy return 0;
86963f4ad11SVladimir Sementsov-Ogievskiy }
87063f4ad11SVladimir Sementsov-Ogievskiy
bdrv_check_request(int64_t offset,int64_t bytes,Error ** errp)87163f4ad11SVladimir Sementsov-Ogievskiy int bdrv_check_request(int64_t offset, int64_t bytes, Error **errp)
8728b117001SVladimir Sementsov-Ogievskiy {
87363f4ad11SVladimir Sementsov-Ogievskiy return bdrv_check_qiov_request(offset, bytes, NULL, 0, errp);
87463f4ad11SVladimir Sementsov-Ogievskiy }
87563f4ad11SVladimir Sementsov-Ogievskiy
bdrv_check_request32(int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset)87663f4ad11SVladimir Sementsov-Ogievskiy static int bdrv_check_request32(int64_t offset, int64_t bytes,
87763f4ad11SVladimir Sementsov-Ogievskiy QEMUIOVector *qiov, size_t qiov_offset)
87863f4ad11SVladimir Sementsov-Ogievskiy {
87963f4ad11SVladimir Sementsov-Ogievskiy int ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
8808b117001SVladimir Sementsov-Ogievskiy if (ret < 0) {
8818b117001SVladimir Sementsov-Ogievskiy return ret;
8828b117001SVladimir Sementsov-Ogievskiy }
8838b117001SVladimir Sementsov-Ogievskiy
8848b117001SVladimir Sementsov-Ogievskiy if (bytes > BDRV_REQUEST_MAX_BYTES) {
88561007b31SStefan Hajnoczi return -EIO;
88661007b31SStefan Hajnoczi }
88761007b31SStefan Hajnoczi
88861007b31SStefan Hajnoczi return 0;
88961007b31SStefan Hajnoczi }
89061007b31SStefan Hajnoczi
89161007b31SStefan Hajnoczi /*
89274021bc4SEric Blake * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
89361007b31SStefan Hajnoczi * The operation is sped up by checking the block status and only writing
89461007b31SStefan Hajnoczi * zeroes to the device if they currently do not return zeroes. Optional
89574021bc4SEric Blake * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
896465fe887SEric Blake * BDRV_REQ_FUA).
89761007b31SStefan Hajnoczi *
898f4649069SEric Blake * Returns < 0 on error, 0 on success. For error codes see bdrv_pwrite().
89961007b31SStefan Hajnoczi */
bdrv_make_zero(BdrvChild * child,BdrvRequestFlags flags)900720ff280SKevin Wolf int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
90161007b31SStefan Hajnoczi {
902237d78f8SEric Blake int ret;
903237d78f8SEric Blake int64_t target_size, bytes, offset = 0;
904720ff280SKevin Wolf BlockDriverState *bs = child->bs;
905384a48fbSEmanuele Giuseppe Esposito IO_CODE();
90661007b31SStefan Hajnoczi
9077286d610SEric Blake target_size = bdrv_getlength(bs);
9087286d610SEric Blake if (target_size < 0) {
9097286d610SEric Blake return target_size;
91061007b31SStefan Hajnoczi }
91161007b31SStefan Hajnoczi
91261007b31SStefan Hajnoczi for (;;) {
9137286d610SEric Blake bytes = MIN(target_size - offset, BDRV_REQUEST_MAX_BYTES);
9147286d610SEric Blake if (bytes <= 0) {
91561007b31SStefan Hajnoczi return 0;
91661007b31SStefan Hajnoczi }
917237d78f8SEric Blake ret = bdrv_block_status(bs, offset, bytes, &bytes, NULL, NULL);
91861007b31SStefan Hajnoczi if (ret < 0) {
91961007b31SStefan Hajnoczi return ret;
92061007b31SStefan Hajnoczi }
92161007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_ZERO) {
922237d78f8SEric Blake offset += bytes;
92361007b31SStefan Hajnoczi continue;
92461007b31SStefan Hajnoczi }
925237d78f8SEric Blake ret = bdrv_pwrite_zeroes(child, offset, bytes, flags);
92661007b31SStefan Hajnoczi if (ret < 0) {
92761007b31SStefan Hajnoczi return ret;
92861007b31SStefan Hajnoczi }
929237d78f8SEric Blake offset += bytes;
93061007b31SStefan Hajnoczi }
93161007b31SStefan Hajnoczi }
93261007b31SStefan Hajnoczi
93361007b31SStefan Hajnoczi /*
93461007b31SStefan Hajnoczi * Writes to the file and ensures that no writes are reordered across this
93561007b31SStefan Hajnoczi * request (acts as a barrier)
93661007b31SStefan Hajnoczi *
93761007b31SStefan Hajnoczi * Returns 0 on success, -errno in error cases.
93861007b31SStefan Hajnoczi */
bdrv_co_pwrite_sync(BdrvChild * child,int64_t offset,int64_t bytes,const void * buf,BdrvRequestFlags flags)939e97190a4SAlberto Faria int coroutine_fn bdrv_co_pwrite_sync(BdrvChild *child, int64_t offset,
940e97190a4SAlberto Faria int64_t bytes, const void *buf,
941e97190a4SAlberto Faria BdrvRequestFlags flags)
94261007b31SStefan Hajnoczi {
94361007b31SStefan Hajnoczi int ret;
944384a48fbSEmanuele Giuseppe Esposito IO_CODE();
945b24a4c41SKevin Wolf assert_bdrv_graph_readable();
94688095349SEmanuele Giuseppe Esposito
947e97190a4SAlberto Faria ret = bdrv_co_pwrite(child, offset, bytes, buf, flags);
94861007b31SStefan Hajnoczi if (ret < 0) {
94961007b31SStefan Hajnoczi return ret;
95061007b31SStefan Hajnoczi }
95161007b31SStefan Hajnoczi
952e97190a4SAlberto Faria ret = bdrv_co_flush(child->bs);
953855a6a93SKevin Wolf if (ret < 0) {
954855a6a93SKevin Wolf return ret;
95561007b31SStefan Hajnoczi }
95661007b31SStefan Hajnoczi
95761007b31SStefan Hajnoczi return 0;
95861007b31SStefan Hajnoczi }
95961007b31SStefan Hajnoczi
96008844473SKevin Wolf typedef struct CoroutineIOCompletion {
96108844473SKevin Wolf Coroutine *coroutine;
96208844473SKevin Wolf int ret;
96308844473SKevin Wolf } CoroutineIOCompletion;
96408844473SKevin Wolf
bdrv_co_io_em_complete(void * opaque,int ret)96508844473SKevin Wolf static void bdrv_co_io_em_complete(void *opaque, int ret)
96608844473SKevin Wolf {
96708844473SKevin Wolf CoroutineIOCompletion *co = opaque;
96808844473SKevin Wolf
96908844473SKevin Wolf co->ret = ret;
970b9e413ddSPaolo Bonzini aio_co_wake(co->coroutine);
97108844473SKevin Wolf }
97208844473SKevin Wolf
9737b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_driver_preadv(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,int flags)9747b1fb72eSKevin Wolf bdrv_driver_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
9757b1fb72eSKevin Wolf QEMUIOVector *qiov, size_t qiov_offset, int flags)
976166fe960SKevin Wolf {
977166fe960SKevin Wolf BlockDriver *drv = bs->drv;
9783fb06697SKevin Wolf int64_t sector_num;
9793fb06697SKevin Wolf unsigned int nb_sectors;
980ac850bf0SVladimir Sementsov-Ogievskiy QEMUIOVector local_qiov;
981ac850bf0SVladimir Sementsov-Ogievskiy int ret;
982b9b10c35SKevin Wolf assert_bdrv_graph_readable();
9833fb06697SKevin Wolf
98417abcbeeSVladimir Sementsov-Ogievskiy bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
985e8b65355SStefan Hajnoczi assert(!(flags & ~bs->supported_read_flags));
986fa166538SEric Blake
987d470ad42SMax Reitz if (!drv) {
988d470ad42SMax Reitz return -ENOMEDIUM;
989d470ad42SMax Reitz }
990d470ad42SMax Reitz
991ac850bf0SVladimir Sementsov-Ogievskiy if (drv->bdrv_co_preadv_part) {
992ac850bf0SVladimir Sementsov-Ogievskiy return drv->bdrv_co_preadv_part(bs, offset, bytes, qiov, qiov_offset,
993ac850bf0SVladimir Sementsov-Ogievskiy flags);
994ac850bf0SVladimir Sementsov-Ogievskiy }
995ac850bf0SVladimir Sementsov-Ogievskiy
996ac850bf0SVladimir Sementsov-Ogievskiy if (qiov_offset > 0 || bytes != qiov->size) {
997ac850bf0SVladimir Sementsov-Ogievskiy qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
998ac850bf0SVladimir Sementsov-Ogievskiy qiov = &local_qiov;
999ac850bf0SVladimir Sementsov-Ogievskiy }
1000ac850bf0SVladimir Sementsov-Ogievskiy
10013fb06697SKevin Wolf if (drv->bdrv_co_preadv) {
1002ac850bf0SVladimir Sementsov-Ogievskiy ret = drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
1003ac850bf0SVladimir Sementsov-Ogievskiy goto out;
10043fb06697SKevin Wolf }
10053fb06697SKevin Wolf
1006edfab6a0SEric Blake if (drv->bdrv_aio_preadv) {
100708844473SKevin Wolf BlockAIOCB *acb;
100808844473SKevin Wolf CoroutineIOCompletion co = {
100908844473SKevin Wolf .coroutine = qemu_coroutine_self(),
101008844473SKevin Wolf };
101108844473SKevin Wolf
1012e31f6864SEric Blake acb = drv->bdrv_aio_preadv(bs, offset, bytes, qiov, flags,
101308844473SKevin Wolf bdrv_co_io_em_complete, &co);
101408844473SKevin Wolf if (acb == NULL) {
1015ac850bf0SVladimir Sementsov-Ogievskiy ret = -EIO;
1016ac850bf0SVladimir Sementsov-Ogievskiy goto out;
101708844473SKevin Wolf } else {
101808844473SKevin Wolf qemu_coroutine_yield();
1019ac850bf0SVladimir Sementsov-Ogievskiy ret = co.ret;
1020ac850bf0SVladimir Sementsov-Ogievskiy goto out;
102108844473SKevin Wolf }
102208844473SKevin Wolf }
1023edfab6a0SEric Blake
1024edfab6a0SEric Blake sector_num = offset >> BDRV_SECTOR_BITS;
1025edfab6a0SEric Blake nb_sectors = bytes >> BDRV_SECTOR_BITS;
1026edfab6a0SEric Blake
10271bbbf32dSNir Soffer assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
10281bbbf32dSNir Soffer assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
102941ae31e3SAlberto Garcia assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1030edfab6a0SEric Blake assert(drv->bdrv_co_readv);
1031edfab6a0SEric Blake
1032ac850bf0SVladimir Sementsov-Ogievskiy ret = drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
1033ac850bf0SVladimir Sementsov-Ogievskiy
1034ac850bf0SVladimir Sementsov-Ogievskiy out:
1035ac850bf0SVladimir Sementsov-Ogievskiy if (qiov == &local_qiov) {
1036ac850bf0SVladimir Sementsov-Ogievskiy qemu_iovec_destroy(&local_qiov);
1037ac850bf0SVladimir Sementsov-Ogievskiy }
1038ac850bf0SVladimir Sementsov-Ogievskiy
1039ac850bf0SVladimir Sementsov-Ogievskiy return ret;
1040166fe960SKevin Wolf }
1041166fe960SKevin Wolf
10427b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_driver_pwritev(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,BdrvRequestFlags flags)10437b1fb72eSKevin Wolf bdrv_driver_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes,
10447b1fb72eSKevin Wolf QEMUIOVector *qiov, size_t qiov_offset,
1045e75abedaSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags)
104678a07294SKevin Wolf {
104778a07294SKevin Wolf BlockDriver *drv = bs->drv;
1048e8b65355SStefan Hajnoczi bool emulate_fua = false;
10493fb06697SKevin Wolf int64_t sector_num;
10503fb06697SKevin Wolf unsigned int nb_sectors;
1051ac850bf0SVladimir Sementsov-Ogievskiy QEMUIOVector local_qiov;
105278a07294SKevin Wolf int ret;
1053b9b10c35SKevin Wolf assert_bdrv_graph_readable();
105478a07294SKevin Wolf
105517abcbeeSVladimir Sementsov-Ogievskiy bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
1056fa166538SEric Blake
1057d470ad42SMax Reitz if (!drv) {
1058d470ad42SMax Reitz return -ENOMEDIUM;
1059d470ad42SMax Reitz }
1060d470ad42SMax Reitz
1061e8b65355SStefan Hajnoczi if ((flags & BDRV_REQ_FUA) &&
1062e8b65355SStefan Hajnoczi (~bs->supported_write_flags & BDRV_REQ_FUA)) {
1063e8b65355SStefan Hajnoczi flags &= ~BDRV_REQ_FUA;
1064e8b65355SStefan Hajnoczi emulate_fua = true;
1065e8b65355SStefan Hajnoczi }
1066e8b65355SStefan Hajnoczi
1067e8b65355SStefan Hajnoczi flags &= bs->supported_write_flags;
1068e8b65355SStefan Hajnoczi
1069ac850bf0SVladimir Sementsov-Ogievskiy if (drv->bdrv_co_pwritev_part) {
1070ac850bf0SVladimir Sementsov-Ogievskiy ret = drv->bdrv_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset,
1071e8b65355SStefan Hajnoczi flags);
1072ac850bf0SVladimir Sementsov-Ogievskiy goto emulate_flags;
1073ac850bf0SVladimir Sementsov-Ogievskiy }
1074ac850bf0SVladimir Sementsov-Ogievskiy
1075ac850bf0SVladimir Sementsov-Ogievskiy if (qiov_offset > 0 || bytes != qiov->size) {
1076ac850bf0SVladimir Sementsov-Ogievskiy qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1077ac850bf0SVladimir Sementsov-Ogievskiy qiov = &local_qiov;
1078ac850bf0SVladimir Sementsov-Ogievskiy }
1079ac850bf0SVladimir Sementsov-Ogievskiy
10803fb06697SKevin Wolf if (drv->bdrv_co_pwritev) {
1081e8b65355SStefan Hajnoczi ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov, flags);
10823fb06697SKevin Wolf goto emulate_flags;
10833fb06697SKevin Wolf }
10843fb06697SKevin Wolf
1085edfab6a0SEric Blake if (drv->bdrv_aio_pwritev) {
108608844473SKevin Wolf BlockAIOCB *acb;
108708844473SKevin Wolf CoroutineIOCompletion co = {
108808844473SKevin Wolf .coroutine = qemu_coroutine_self(),
108908844473SKevin Wolf };
109008844473SKevin Wolf
1091e8b65355SStefan Hajnoczi acb = drv->bdrv_aio_pwritev(bs, offset, bytes, qiov, flags,
109208844473SKevin Wolf bdrv_co_io_em_complete, &co);
109308844473SKevin Wolf if (acb == NULL) {
10943fb06697SKevin Wolf ret = -EIO;
109508844473SKevin Wolf } else {
109608844473SKevin Wolf qemu_coroutine_yield();
10973fb06697SKevin Wolf ret = co.ret;
109808844473SKevin Wolf }
1099edfab6a0SEric Blake goto emulate_flags;
1100edfab6a0SEric Blake }
1101edfab6a0SEric Blake
1102edfab6a0SEric Blake sector_num = offset >> BDRV_SECTOR_BITS;
1103edfab6a0SEric Blake nb_sectors = bytes >> BDRV_SECTOR_BITS;
1104edfab6a0SEric Blake
11051bbbf32dSNir Soffer assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
11061bbbf32dSNir Soffer assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
110741ae31e3SAlberto Garcia assert(bytes <= BDRV_REQUEST_MAX_BYTES);
1108edfab6a0SEric Blake
1109e18a58b4SEric Blake assert(drv->bdrv_co_writev);
1110e8b65355SStefan Hajnoczi ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov, flags);
111178a07294SKevin Wolf
11123fb06697SKevin Wolf emulate_flags:
1113e8b65355SStefan Hajnoczi if (ret == 0 && emulate_fua) {
111478a07294SKevin Wolf ret = bdrv_co_flush(bs);
111578a07294SKevin Wolf }
111678a07294SKevin Wolf
1117ac850bf0SVladimir Sementsov-Ogievskiy if (qiov == &local_qiov) {
1118ac850bf0SVladimir Sementsov-Ogievskiy qemu_iovec_destroy(&local_qiov);
1119ac850bf0SVladimir Sementsov-Ogievskiy }
1120ac850bf0SVladimir Sementsov-Ogievskiy
112178a07294SKevin Wolf return ret;
112278a07294SKevin Wolf }
112378a07294SKevin Wolf
11247b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_driver_pwritev_compressed(BlockDriverState * bs,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset)112517abcbeeSVladimir Sementsov-Ogievskiy bdrv_driver_pwritev_compressed(BlockDriverState *bs, int64_t offset,
112617abcbeeSVladimir Sementsov-Ogievskiy int64_t bytes, QEMUIOVector *qiov,
1127ac850bf0SVladimir Sementsov-Ogievskiy size_t qiov_offset)
112829a298afSPavel Butsykin {
112929a298afSPavel Butsykin BlockDriver *drv = bs->drv;
1130ac850bf0SVladimir Sementsov-Ogievskiy QEMUIOVector local_qiov;
1131ac850bf0SVladimir Sementsov-Ogievskiy int ret;
1132b9b10c35SKevin Wolf assert_bdrv_graph_readable();
113329a298afSPavel Butsykin
113417abcbeeSVladimir Sementsov-Ogievskiy bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
113517abcbeeSVladimir Sementsov-Ogievskiy
1136d470ad42SMax Reitz if (!drv) {
1137d470ad42SMax Reitz return -ENOMEDIUM;
1138d470ad42SMax Reitz }
1139d470ad42SMax Reitz
1140ac850bf0SVladimir Sementsov-Ogievskiy if (!block_driver_can_compress(drv)) {
114129a298afSPavel Butsykin return -ENOTSUP;
114229a298afSPavel Butsykin }
114329a298afSPavel Butsykin
1144ac850bf0SVladimir Sementsov-Ogievskiy if (drv->bdrv_co_pwritev_compressed_part) {
1145ac850bf0SVladimir Sementsov-Ogievskiy return drv->bdrv_co_pwritev_compressed_part(bs, offset, bytes,
1146ac850bf0SVladimir Sementsov-Ogievskiy qiov, qiov_offset);
1147ac850bf0SVladimir Sementsov-Ogievskiy }
1148ac850bf0SVladimir Sementsov-Ogievskiy
1149ac850bf0SVladimir Sementsov-Ogievskiy if (qiov_offset == 0) {
115029a298afSPavel Butsykin return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
115129a298afSPavel Butsykin }
115229a298afSPavel Butsykin
1153ac850bf0SVladimir Sementsov-Ogievskiy qemu_iovec_init_slice(&local_qiov, qiov, qiov_offset, bytes);
1154ac850bf0SVladimir Sementsov-Ogievskiy ret = drv->bdrv_co_pwritev_compressed(bs, offset, bytes, &local_qiov);
1155ac850bf0SVladimir Sementsov-Ogievskiy qemu_iovec_destroy(&local_qiov);
1156ac850bf0SVladimir Sementsov-Ogievskiy
1157ac850bf0SVladimir Sementsov-Ogievskiy return ret;
1158ac850bf0SVladimir Sementsov-Ogievskiy }
1159ac850bf0SVladimir Sementsov-Ogievskiy
11607b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_co_do_copy_on_readv(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,int flags)11617b1fb72eSKevin Wolf bdrv_co_do_copy_on_readv(BdrvChild *child, int64_t offset, int64_t bytes,
11627b1fb72eSKevin Wolf QEMUIOVector *qiov, size_t qiov_offset, int flags)
116361007b31SStefan Hajnoczi {
116485c97ca7SKevin Wolf BlockDriverState *bs = child->bs;
116585c97ca7SKevin Wolf
116661007b31SStefan Hajnoczi /* Perform I/O through a temporary buffer so that users who scribble over
116761007b31SStefan Hajnoczi * their read buffer while the operation is in progress do not end up
116861007b31SStefan Hajnoczi * modifying the image file. This is critical for zero-copy guest I/O
116961007b31SStefan Hajnoczi * where anything might happen inside guest memory.
117061007b31SStefan Hajnoczi */
11712275cc90SVladimir Sementsov-Ogievskiy void *bounce_buffer = NULL;
117261007b31SStefan Hajnoczi
117361007b31SStefan Hajnoczi BlockDriver *drv = bs->drv;
1174fc6b211fSAndrey Drobyshev int64_t align_offset;
1175fc6b211fSAndrey Drobyshev int64_t align_bytes;
11769df5afbdSVladimir Sementsov-Ogievskiy int64_t skip_bytes;
117761007b31SStefan Hajnoczi int ret;
1178cb2e2878SEric Blake int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
1179cb2e2878SEric Blake BDRV_REQUEST_MAX_BYTES);
11809df5afbdSVladimir Sementsov-Ogievskiy int64_t progress = 0;
11818644476eSMax Reitz bool skip_write;
118261007b31SStefan Hajnoczi
11839df5afbdSVladimir Sementsov-Ogievskiy bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
11849df5afbdSVladimir Sementsov-Ogievskiy
1185d470ad42SMax Reitz if (!drv) {
1186d470ad42SMax Reitz return -ENOMEDIUM;
1187d470ad42SMax Reitz }
1188d470ad42SMax Reitz
11898644476eSMax Reitz /*
11908644476eSMax Reitz * Do not write anything when the BDS is inactive. That is not
11918644476eSMax Reitz * allowed, and it would not help.
11928644476eSMax Reitz */
11938644476eSMax Reitz skip_write = (bs->open_flags & BDRV_O_INACTIVE);
11948644476eSMax Reitz
11951bf03e66SKevin Wolf /* FIXME We cannot require callers to have write permissions when all they
11961bf03e66SKevin Wolf * are doing is a read request. If we did things right, write permissions
11971bf03e66SKevin Wolf * would be obtained anyway, but internally by the copy-on-read code. As
1198765d9df9SEric Blake * long as it is implemented here rather than in a separate filter driver,
11991bf03e66SKevin Wolf * the copy-on-read code doesn't have its own BdrvChild, however, for which
12001bf03e66SKevin Wolf * it could request permissions. Therefore we have to bypass the permission
12011bf03e66SKevin Wolf * system for the moment. */
12021bf03e66SKevin Wolf // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
1203afa4b293SKevin Wolf
120461007b31SStefan Hajnoczi /* Cover entire cluster so no additional backing file I/O is required when
1205cb2e2878SEric Blake * allocating cluster in the image file. Note that this value may exceed
1206cb2e2878SEric Blake * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
1207cb2e2878SEric Blake * is one reason we loop rather than doing it all at once.
120861007b31SStefan Hajnoczi */
1209fc6b211fSAndrey Drobyshev bdrv_round_to_subclusters(bs, offset, bytes, &align_offset, &align_bytes);
1210fc6b211fSAndrey Drobyshev skip_bytes = offset - align_offset;
121161007b31SStefan Hajnoczi
1212244483e6SKevin Wolf trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
1213fc6b211fSAndrey Drobyshev align_offset, align_bytes);
121461007b31SStefan Hajnoczi
1215fc6b211fSAndrey Drobyshev while (align_bytes) {
1216cb2e2878SEric Blake int64_t pnum;
121761007b31SStefan Hajnoczi
12188644476eSMax Reitz if (skip_write) {
12198644476eSMax Reitz ret = 1; /* "already allocated", so nothing will be copied */
1220fc6b211fSAndrey Drobyshev pnum = MIN(align_bytes, max_transfer);
12218644476eSMax Reitz } else {
1222cc323997SPaolo Bonzini ret = bdrv_co_is_allocated(bs, align_offset,
1223fc6b211fSAndrey Drobyshev MIN(align_bytes, max_transfer), &pnum);
1224cb2e2878SEric Blake if (ret < 0) {
12258644476eSMax Reitz /*
12268644476eSMax Reitz * Safe to treat errors in querying allocation as if
1227cb2e2878SEric Blake * unallocated; we'll probably fail again soon on the
1228cb2e2878SEric Blake * read, but at least that will set a decent errno.
1229cb2e2878SEric Blake */
1230fc6b211fSAndrey Drobyshev pnum = MIN(align_bytes, max_transfer);
1231cb2e2878SEric Blake }
1232cb2e2878SEric Blake
1233b0ddcbbbSKevin Wolf /* Stop at EOF if the image ends in the middle of the cluster */
1234b0ddcbbbSKevin Wolf if (ret == 0 && pnum == 0) {
1235b0ddcbbbSKevin Wolf assert(progress >= bytes);
1236b0ddcbbbSKevin Wolf break;
1237b0ddcbbbSKevin Wolf }
1238b0ddcbbbSKevin Wolf
1239cb2e2878SEric Blake assert(skip_bytes < pnum);
12408644476eSMax Reitz }
1241cb2e2878SEric Blake
1242cb2e2878SEric Blake if (ret <= 0) {
12431143ec5eSVladimir Sementsov-Ogievskiy QEMUIOVector local_qiov;
12441143ec5eSVladimir Sementsov-Ogievskiy
1245cb2e2878SEric Blake /* Must copy-on-read; use the bounce buffer */
12460d93ed08SVladimir Sementsov-Ogievskiy pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
12472275cc90SVladimir Sementsov-Ogievskiy if (!bounce_buffer) {
1248fc6b211fSAndrey Drobyshev int64_t max_we_need = MAX(pnum, align_bytes - pnum);
12492275cc90SVladimir Sementsov-Ogievskiy int64_t max_allowed = MIN(max_transfer, MAX_BOUNCE_BUFFER);
12502275cc90SVladimir Sementsov-Ogievskiy int64_t bounce_buffer_len = MIN(max_we_need, max_allowed);
12512275cc90SVladimir Sementsov-Ogievskiy
12522275cc90SVladimir Sementsov-Ogievskiy bounce_buffer = qemu_try_blockalign(bs, bounce_buffer_len);
12532275cc90SVladimir Sementsov-Ogievskiy if (!bounce_buffer) {
12542275cc90SVladimir Sementsov-Ogievskiy ret = -ENOMEM;
12552275cc90SVladimir Sementsov-Ogievskiy goto err;
12562275cc90SVladimir Sementsov-Ogievskiy }
12572275cc90SVladimir Sementsov-Ogievskiy }
12580d93ed08SVladimir Sementsov-Ogievskiy qemu_iovec_init_buf(&local_qiov, bounce_buffer, pnum);
1259cb2e2878SEric Blake
1260fc6b211fSAndrey Drobyshev ret = bdrv_driver_preadv(bs, align_offset, pnum,
1261ac850bf0SVladimir Sementsov-Ogievskiy &local_qiov, 0, 0);
126261007b31SStefan Hajnoczi if (ret < 0) {
126361007b31SStefan Hajnoczi goto err;
126461007b31SStefan Hajnoczi }
126561007b31SStefan Hajnoczi
1266c834dc05SEmanuele Giuseppe Esposito bdrv_co_debug_event(bs, BLKDBG_COR_WRITE);
1267c1499a5eSEric Blake if (drv->bdrv_co_pwrite_zeroes &&
1268cb2e2878SEric Blake buffer_is_zero(bounce_buffer, pnum)) {
1269a604fa2bSEric Blake /* FIXME: Should we (perhaps conditionally) be setting
1270a604fa2bSEric Blake * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1271a604fa2bSEric Blake * that still correctly reads as zero? */
1272fc6b211fSAndrey Drobyshev ret = bdrv_co_do_pwrite_zeroes(bs, align_offset, pnum,
12737adcf59fSMax Reitz BDRV_REQ_WRITE_UNCHANGED);
127461007b31SStefan Hajnoczi } else {
1275cb2e2878SEric Blake /* This does not change the data on the disk, it is not
1276cb2e2878SEric Blake * necessary to flush even in cache=writethrough mode.
127761007b31SStefan Hajnoczi */
1278fc6b211fSAndrey Drobyshev ret = bdrv_driver_pwritev(bs, align_offset, pnum,
1279ac850bf0SVladimir Sementsov-Ogievskiy &local_qiov, 0,
12807adcf59fSMax Reitz BDRV_REQ_WRITE_UNCHANGED);
128161007b31SStefan Hajnoczi }
128261007b31SStefan Hajnoczi
128361007b31SStefan Hajnoczi if (ret < 0) {
1284cb2e2878SEric Blake /* It might be okay to ignore write errors for guest
1285cb2e2878SEric Blake * requests. If this is a deliberate copy-on-read
1286cb2e2878SEric Blake * then we don't want to ignore the error. Simply
1287cb2e2878SEric Blake * report it in all cases.
128861007b31SStefan Hajnoczi */
128961007b31SStefan Hajnoczi goto err;
129061007b31SStefan Hajnoczi }
129161007b31SStefan Hajnoczi
12923299e5ecSVladimir Sementsov-Ogievskiy if (!(flags & BDRV_REQ_PREFETCH)) {
12931143ec5eSVladimir Sementsov-Ogievskiy qemu_iovec_from_buf(qiov, qiov_offset + progress,
12941143ec5eSVladimir Sementsov-Ogievskiy bounce_buffer + skip_bytes,
12954ab78b19SVladimir Sementsov-Ogievskiy MIN(pnum - skip_bytes, bytes - progress));
12963299e5ecSVladimir Sementsov-Ogievskiy }
12973299e5ecSVladimir Sementsov-Ogievskiy } else if (!(flags & BDRV_REQ_PREFETCH)) {
1298cb2e2878SEric Blake /* Read directly into the destination */
12991143ec5eSVladimir Sementsov-Ogievskiy ret = bdrv_driver_preadv(bs, offset + progress,
13001143ec5eSVladimir Sementsov-Ogievskiy MIN(pnum - skip_bytes, bytes - progress),
13011143ec5eSVladimir Sementsov-Ogievskiy qiov, qiov_offset + progress, 0);
1302cb2e2878SEric Blake if (ret < 0) {
1303cb2e2878SEric Blake goto err;
1304cb2e2878SEric Blake }
1305cb2e2878SEric Blake }
1306cb2e2878SEric Blake
1307fc6b211fSAndrey Drobyshev align_offset += pnum;
1308fc6b211fSAndrey Drobyshev align_bytes -= pnum;
1309cb2e2878SEric Blake progress += pnum - skip_bytes;
1310cb2e2878SEric Blake skip_bytes = 0;
1311cb2e2878SEric Blake }
1312cb2e2878SEric Blake ret = 0;
131361007b31SStefan Hajnoczi
131461007b31SStefan Hajnoczi err:
131561007b31SStefan Hajnoczi qemu_vfree(bounce_buffer);
131661007b31SStefan Hajnoczi return ret;
131761007b31SStefan Hajnoczi }
131861007b31SStefan Hajnoczi
131961007b31SStefan Hajnoczi /*
132061007b31SStefan Hajnoczi * Forwards an already correctly aligned request to the BlockDriver. This
13211a62d0acSEric Blake * handles copy on read, zeroing after EOF, and fragmentation of large
13221a62d0acSEric Blake * reads; any other features must be implemented by the caller.
132361007b31SStefan Hajnoczi */
13247b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_aligned_preadv(BdrvChild * child,BdrvTrackedRequest * req,int64_t offset,int64_t bytes,int64_t align,QEMUIOVector * qiov,size_t qiov_offset,int flags)13257b1fb72eSKevin Wolf bdrv_aligned_preadv(BdrvChild *child, BdrvTrackedRequest *req,
13267b1fb72eSKevin Wolf int64_t offset, int64_t bytes, int64_t align,
13277b1fb72eSKevin Wolf QEMUIOVector *qiov, size_t qiov_offset, int flags)
132861007b31SStefan Hajnoczi {
132985c97ca7SKevin Wolf BlockDriverState *bs = child->bs;
1330c9d20029SKevin Wolf int64_t total_bytes, max_bytes;
13311a62d0acSEric Blake int ret = 0;
13328b0c5d76SVladimir Sementsov-Ogievskiy int64_t bytes_remaining = bytes;
13331a62d0acSEric Blake int max_transfer;
133461007b31SStefan Hajnoczi
13358b0c5d76SVladimir Sementsov-Ogievskiy bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
133649c07526SKevin Wolf assert(is_power_of_2(align));
133749c07526SKevin Wolf assert((offset & (align - 1)) == 0);
133849c07526SKevin Wolf assert((bytes & (align - 1)) == 0);
1339abb06c5aSDaniel P. Berrange assert((bs->open_flags & BDRV_O_NO_IO) == 0);
13401a62d0acSEric Blake max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
13411a62d0acSEric Blake align);
1342a604fa2bSEric Blake
1343e8b65355SStefan Hajnoczi /*
1344e8b65355SStefan Hajnoczi * TODO: We would need a per-BDS .supported_read_flags and
1345a604fa2bSEric Blake * potential fallback support, if we ever implement any read flags
1346a604fa2bSEric Blake * to pass through to drivers. For now, there aren't any
1347e8b65355SStefan Hajnoczi * passthrough flags except the BDRV_REQ_REGISTERED_BUF optimization hint.
1348e8b65355SStefan Hajnoczi */
1349e8b65355SStefan Hajnoczi assert(!(flags & ~(BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH |
1350e8b65355SStefan Hajnoczi BDRV_REQ_REGISTERED_BUF)));
135161007b31SStefan Hajnoczi
135261007b31SStefan Hajnoczi /* Handle Copy on Read and associated serialisation */
135361007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) {
135461007b31SStefan Hajnoczi /* If we touch the same cluster it counts as an overlap. This
135561007b31SStefan Hajnoczi * guarantees that allocating writes will be serialized and not race
135661007b31SStefan Hajnoczi * with each other for the same cluster. For example, in copy-on-read
135761007b31SStefan Hajnoczi * it ensures that the CoR read and write operations are atomic and
135861007b31SStefan Hajnoczi * guest writes cannot interleave between them. */
13598ac5aab2SVladimir Sementsov-Ogievskiy bdrv_make_request_serialising(req, bdrv_get_cluster_size(bs));
136018fbd0deSPaolo Bonzini } else {
1361304d9d7fSMax Reitz bdrv_wait_serialising_requests(req);
136218fbd0deSPaolo Bonzini }
136361007b31SStefan Hajnoczi
136461007b31SStefan Hajnoczi if (flags & BDRV_REQ_COPY_ON_READ) {
1365d6a644bbSEric Blake int64_t pnum;
136661007b31SStefan Hajnoczi
1367897dd0ecSAndrey Shinkevich /* The flag BDRV_REQ_COPY_ON_READ has reached its addressee */
1368897dd0ecSAndrey Shinkevich flags &= ~BDRV_REQ_COPY_ON_READ;
1369897dd0ecSAndrey Shinkevich
1370cc323997SPaolo Bonzini ret = bdrv_co_is_allocated(bs, offset, bytes, &pnum);
137161007b31SStefan Hajnoczi if (ret < 0) {
137261007b31SStefan Hajnoczi goto out;
137361007b31SStefan Hajnoczi }
137461007b31SStefan Hajnoczi
137588e63df2SEric Blake if (!ret || pnum != bytes) {
137665cd4424SVladimir Sementsov-Ogievskiy ret = bdrv_co_do_copy_on_readv(child, offset, bytes,
137765cd4424SVladimir Sementsov-Ogievskiy qiov, qiov_offset, flags);
13783299e5ecSVladimir Sementsov-Ogievskiy goto out;
13793299e5ecSVladimir Sementsov-Ogievskiy } else if (flags & BDRV_REQ_PREFETCH) {
138061007b31SStefan Hajnoczi goto out;
138161007b31SStefan Hajnoczi }
138261007b31SStefan Hajnoczi }
138361007b31SStefan Hajnoczi
13841a62d0acSEric Blake /* Forward the request to the BlockDriver, possibly fragmenting it */
13850af02bd1SPaolo Bonzini total_bytes = bdrv_co_getlength(bs);
138649c07526SKevin Wolf if (total_bytes < 0) {
138749c07526SKevin Wolf ret = total_bytes;
138861007b31SStefan Hajnoczi goto out;
138961007b31SStefan Hajnoczi }
139061007b31SStefan Hajnoczi
1391e8b65355SStefan Hajnoczi assert(!(flags & ~(bs->supported_read_flags | BDRV_REQ_REGISTERED_BUF)));
1392897dd0ecSAndrey Shinkevich
139349c07526SKevin Wolf max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
13941a62d0acSEric Blake if (bytes <= max_bytes && bytes <= max_transfer) {
1395897dd0ecSAndrey Shinkevich ret = bdrv_driver_preadv(bs, offset, bytes, qiov, qiov_offset, flags);
13961a62d0acSEric Blake goto out;
139761007b31SStefan Hajnoczi }
139861007b31SStefan Hajnoczi
13991a62d0acSEric Blake while (bytes_remaining) {
14008b0c5d76SVladimir Sementsov-Ogievskiy int64_t num;
14011a62d0acSEric Blake
14021a62d0acSEric Blake if (max_bytes) {
14031a62d0acSEric Blake num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
14041a62d0acSEric Blake assert(num);
14051a62d0acSEric Blake
14061a62d0acSEric Blake ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1407134b7decSMax Reitz num, qiov,
1408897dd0ecSAndrey Shinkevich qiov_offset + bytes - bytes_remaining,
1409897dd0ecSAndrey Shinkevich flags);
14101a62d0acSEric Blake max_bytes -= num;
14111a62d0acSEric Blake } else {
14121a62d0acSEric Blake num = bytes_remaining;
1413134b7decSMax Reitz ret = qemu_iovec_memset(qiov, qiov_offset + bytes - bytes_remaining,
1414134b7decSMax Reitz 0, bytes_remaining);
14151a62d0acSEric Blake }
14161a62d0acSEric Blake if (ret < 0) {
14171a62d0acSEric Blake goto out;
14181a62d0acSEric Blake }
14191a62d0acSEric Blake bytes_remaining -= num;
142061007b31SStefan Hajnoczi }
142161007b31SStefan Hajnoczi
142261007b31SStefan Hajnoczi out:
14231a62d0acSEric Blake return ret < 0 ? ret : 0;
142461007b31SStefan Hajnoczi }
142561007b31SStefan Hajnoczi
142661007b31SStefan Hajnoczi /*
14277a3f542fSVladimir Sementsov-Ogievskiy * Request padding
14287a3f542fSVladimir Sementsov-Ogievskiy *
14297a3f542fSVladimir Sementsov-Ogievskiy * |<---- align ----->| |<----- align ---->|
14307a3f542fSVladimir Sementsov-Ogievskiy * |<- head ->|<------------- bytes ------------->|<-- tail -->|
14317a3f542fSVladimir Sementsov-Ogievskiy * | | | | | |
14327a3f542fSVladimir Sementsov-Ogievskiy * -*----------$-------*-------- ... --------*-----$------------*---
14337a3f542fSVladimir Sementsov-Ogievskiy * | | | | | |
14347a3f542fSVladimir Sementsov-Ogievskiy * | offset | | end |
14357a3f542fSVladimir Sementsov-Ogievskiy * ALIGN_DOWN(offset) ALIGN_UP(offset) ALIGN_DOWN(end) ALIGN_UP(end)
14367a3f542fSVladimir Sementsov-Ogievskiy * [buf ... ) [tail_buf )
14377a3f542fSVladimir Sementsov-Ogievskiy *
14387a3f542fSVladimir Sementsov-Ogievskiy * @buf is an aligned allocation needed to store @head and @tail paddings. @head
14397a3f542fSVladimir Sementsov-Ogievskiy * is placed at the beginning of @buf and @tail at the @end.
14407a3f542fSVladimir Sementsov-Ogievskiy *
14417a3f542fSVladimir Sementsov-Ogievskiy * @tail_buf is a pointer to sub-buffer, corresponding to align-sized chunk
14427a3f542fSVladimir Sementsov-Ogievskiy * around tail, if tail exists.
14437a3f542fSVladimir Sementsov-Ogievskiy *
14447a3f542fSVladimir Sementsov-Ogievskiy * @merge_reads is true for small requests,
14457a3f542fSVladimir Sementsov-Ogievskiy * if @buf_len == @head + bytes + @tail. In this case it is possible that both
14467a3f542fSVladimir Sementsov-Ogievskiy * head and tail exist but @buf_len == align and @tail_buf == @buf.
144718743311SHanna Czenczek *
144818743311SHanna Czenczek * @write is true for write requests, false for read requests.
144918743311SHanna Czenczek *
145018743311SHanna Czenczek * If padding makes the vector too long (exceeding IOV_MAX), then we need to
145118743311SHanna Czenczek * merge existing vector elements into a single one. @collapse_bounce_buf acts
145218743311SHanna Czenczek * as the bounce buffer in such cases. @pre_collapse_qiov has the pre-collapse
145318743311SHanna Czenczek * I/O vector elements so for read requests, the data can be copied back after
145418743311SHanna Czenczek * the read is done.
145561007b31SStefan Hajnoczi */
14567a3f542fSVladimir Sementsov-Ogievskiy typedef struct BdrvRequestPadding {
14577a3f542fSVladimir Sementsov-Ogievskiy uint8_t *buf;
14587a3f542fSVladimir Sementsov-Ogievskiy size_t buf_len;
14597a3f542fSVladimir Sementsov-Ogievskiy uint8_t *tail_buf;
14607a3f542fSVladimir Sementsov-Ogievskiy size_t head;
14617a3f542fSVladimir Sementsov-Ogievskiy size_t tail;
14627a3f542fSVladimir Sementsov-Ogievskiy bool merge_reads;
146318743311SHanna Czenczek bool write;
14647a3f542fSVladimir Sementsov-Ogievskiy QEMUIOVector local_qiov;
146518743311SHanna Czenczek
146618743311SHanna Czenczek uint8_t *collapse_bounce_buf;
146718743311SHanna Czenczek size_t collapse_len;
146818743311SHanna Czenczek QEMUIOVector pre_collapse_qiov;
14697a3f542fSVladimir Sementsov-Ogievskiy } BdrvRequestPadding;
14707a3f542fSVladimir Sementsov-Ogievskiy
bdrv_init_padding(BlockDriverState * bs,int64_t offset,int64_t bytes,bool write,BdrvRequestPadding * pad)14717a3f542fSVladimir Sementsov-Ogievskiy static bool bdrv_init_padding(BlockDriverState *bs,
14727a3f542fSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes,
147318743311SHanna Czenczek bool write,
14747a3f542fSVladimir Sementsov-Ogievskiy BdrvRequestPadding *pad)
14757a3f542fSVladimir Sementsov-Ogievskiy {
1476a56ed80cSVladimir Sementsov-Ogievskiy int64_t align = bs->bl.request_alignment;
1477a56ed80cSVladimir Sementsov-Ogievskiy int64_t sum;
1478a56ed80cSVladimir Sementsov-Ogievskiy
1479a56ed80cSVladimir Sementsov-Ogievskiy bdrv_check_request(offset, bytes, &error_abort);
1480a56ed80cSVladimir Sementsov-Ogievskiy assert(align <= INT_MAX); /* documented in block/block_int.h */
1481a56ed80cSVladimir Sementsov-Ogievskiy assert(align <= SIZE_MAX / 2); /* so we can allocate the buffer */
14827a3f542fSVladimir Sementsov-Ogievskiy
14837a3f542fSVladimir Sementsov-Ogievskiy memset(pad, 0, sizeof(*pad));
14847a3f542fSVladimir Sementsov-Ogievskiy
14857a3f542fSVladimir Sementsov-Ogievskiy pad->head = offset & (align - 1);
14867a3f542fSVladimir Sementsov-Ogievskiy pad->tail = ((offset + bytes) & (align - 1));
14877a3f542fSVladimir Sementsov-Ogievskiy if (pad->tail) {
14887a3f542fSVladimir Sementsov-Ogievskiy pad->tail = align - pad->tail;
14897a3f542fSVladimir Sementsov-Ogievskiy }
14907a3f542fSVladimir Sementsov-Ogievskiy
1491ac9d00bfSVladimir Sementsov-Ogievskiy if (!pad->head && !pad->tail) {
14927a3f542fSVladimir Sementsov-Ogievskiy return false;
14937a3f542fSVladimir Sementsov-Ogievskiy }
14947a3f542fSVladimir Sementsov-Ogievskiy
1495ac9d00bfSVladimir Sementsov-Ogievskiy assert(bytes); /* Nothing good in aligning zero-length requests */
1496ac9d00bfSVladimir Sementsov-Ogievskiy
14977a3f542fSVladimir Sementsov-Ogievskiy sum = pad->head + bytes + pad->tail;
14987a3f542fSVladimir Sementsov-Ogievskiy pad->buf_len = (sum > align && pad->head && pad->tail) ? 2 * align : align;
14997a3f542fSVladimir Sementsov-Ogievskiy pad->buf = qemu_blockalign(bs, pad->buf_len);
15007a3f542fSVladimir Sementsov-Ogievskiy pad->merge_reads = sum == pad->buf_len;
15017a3f542fSVladimir Sementsov-Ogievskiy if (pad->tail) {
15027a3f542fSVladimir Sementsov-Ogievskiy pad->tail_buf = pad->buf + pad->buf_len - align;
15037a3f542fSVladimir Sementsov-Ogievskiy }
15047a3f542fSVladimir Sementsov-Ogievskiy
150518743311SHanna Czenczek pad->write = write;
150618743311SHanna Czenczek
15077a3f542fSVladimir Sementsov-Ogievskiy return true;
15087a3f542fSVladimir Sementsov-Ogievskiy }
15097a3f542fSVladimir Sementsov-Ogievskiy
15107b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_padding_rmw_read(BdrvChild * child,BdrvTrackedRequest * req,BdrvRequestPadding * pad,bool zero_middle)15117b1fb72eSKevin Wolf bdrv_padding_rmw_read(BdrvChild *child, BdrvTrackedRequest *req,
15127b1fb72eSKevin Wolf BdrvRequestPadding *pad, bool zero_middle)
15137a3f542fSVladimir Sementsov-Ogievskiy {
15147a3f542fSVladimir Sementsov-Ogievskiy QEMUIOVector local_qiov;
15157a3f542fSVladimir Sementsov-Ogievskiy BlockDriverState *bs = child->bs;
15167a3f542fSVladimir Sementsov-Ogievskiy uint64_t align = bs->bl.request_alignment;
15177a3f542fSVladimir Sementsov-Ogievskiy int ret;
15187a3f542fSVladimir Sementsov-Ogievskiy
15197a3f542fSVladimir Sementsov-Ogievskiy assert(req->serialising && pad->buf);
15207a3f542fSVladimir Sementsov-Ogievskiy
15217a3f542fSVladimir Sementsov-Ogievskiy if (pad->head || pad->merge_reads) {
15228b0c5d76SVladimir Sementsov-Ogievskiy int64_t bytes = pad->merge_reads ? pad->buf_len : align;
15237a3f542fSVladimir Sementsov-Ogievskiy
15247a3f542fSVladimir Sementsov-Ogievskiy qemu_iovec_init_buf(&local_qiov, pad->buf, bytes);
15257a3f542fSVladimir Sementsov-Ogievskiy
15267a3f542fSVladimir Sementsov-Ogievskiy if (pad->head) {
1527c834dc05SEmanuele Giuseppe Esposito bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
15287a3f542fSVladimir Sementsov-Ogievskiy }
15297a3f542fSVladimir Sementsov-Ogievskiy if (pad->merge_reads && pad->tail) {
1530c834dc05SEmanuele Giuseppe Esposito bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
15317a3f542fSVladimir Sementsov-Ogievskiy }
15327a3f542fSVladimir Sementsov-Ogievskiy ret = bdrv_aligned_preadv(child, req, req->overlap_offset, bytes,
153365cd4424SVladimir Sementsov-Ogievskiy align, &local_qiov, 0, 0);
15347a3f542fSVladimir Sementsov-Ogievskiy if (ret < 0) {
15357a3f542fSVladimir Sementsov-Ogievskiy return ret;
15367a3f542fSVladimir Sementsov-Ogievskiy }
15377a3f542fSVladimir Sementsov-Ogievskiy if (pad->head) {
1538c834dc05SEmanuele Giuseppe Esposito bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
15397a3f542fSVladimir Sementsov-Ogievskiy }
15407a3f542fSVladimir Sementsov-Ogievskiy if (pad->merge_reads && pad->tail) {
1541c834dc05SEmanuele Giuseppe Esposito bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
15427a3f542fSVladimir Sementsov-Ogievskiy }
15437a3f542fSVladimir Sementsov-Ogievskiy
15447a3f542fSVladimir Sementsov-Ogievskiy if (pad->merge_reads) {
15457a3f542fSVladimir Sementsov-Ogievskiy goto zero_mem;
15467a3f542fSVladimir Sementsov-Ogievskiy }
15477a3f542fSVladimir Sementsov-Ogievskiy }
15487a3f542fSVladimir Sementsov-Ogievskiy
15497a3f542fSVladimir Sementsov-Ogievskiy if (pad->tail) {
15507a3f542fSVladimir Sementsov-Ogievskiy qemu_iovec_init_buf(&local_qiov, pad->tail_buf, align);
15517a3f542fSVladimir Sementsov-Ogievskiy
1552c834dc05SEmanuele Giuseppe Esposito bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
15537a3f542fSVladimir Sementsov-Ogievskiy ret = bdrv_aligned_preadv(
15547a3f542fSVladimir Sementsov-Ogievskiy child, req,
15557a3f542fSVladimir Sementsov-Ogievskiy req->overlap_offset + req->overlap_bytes - align,
155665cd4424SVladimir Sementsov-Ogievskiy align, align, &local_qiov, 0, 0);
15577a3f542fSVladimir Sementsov-Ogievskiy if (ret < 0) {
15587a3f542fSVladimir Sementsov-Ogievskiy return ret;
15597a3f542fSVladimir Sementsov-Ogievskiy }
1560c834dc05SEmanuele Giuseppe Esposito bdrv_co_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
15617a3f542fSVladimir Sementsov-Ogievskiy }
15627a3f542fSVladimir Sementsov-Ogievskiy
15637a3f542fSVladimir Sementsov-Ogievskiy zero_mem:
15647a3f542fSVladimir Sementsov-Ogievskiy if (zero_middle) {
15657a3f542fSVladimir Sementsov-Ogievskiy memset(pad->buf + pad->head, 0, pad->buf_len - pad->head - pad->tail);
15667a3f542fSVladimir Sementsov-Ogievskiy }
15677a3f542fSVladimir Sementsov-Ogievskiy
15687a3f542fSVladimir Sementsov-Ogievskiy return 0;
15697a3f542fSVladimir Sementsov-Ogievskiy }
15707a3f542fSVladimir Sementsov-Ogievskiy
157118743311SHanna Czenczek /**
157218743311SHanna Czenczek * Free *pad's associated buffers, and perform any necessary finalization steps.
157318743311SHanna Czenczek */
bdrv_padding_finalize(BdrvRequestPadding * pad)157418743311SHanna Czenczek static void bdrv_padding_finalize(BdrvRequestPadding *pad)
15757a3f542fSVladimir Sementsov-Ogievskiy {
157618743311SHanna Czenczek if (pad->collapse_bounce_buf) {
157718743311SHanna Czenczek if (!pad->write) {
157818743311SHanna Czenczek /*
157918743311SHanna Czenczek * If padding required elements in the vector to be collapsed into a
158018743311SHanna Czenczek * bounce buffer, copy the bounce buffer content back
158118743311SHanna Czenczek */
158218743311SHanna Czenczek qemu_iovec_from_buf(&pad->pre_collapse_qiov, 0,
158318743311SHanna Czenczek pad->collapse_bounce_buf, pad->collapse_len);
158418743311SHanna Czenczek }
158518743311SHanna Czenczek qemu_vfree(pad->collapse_bounce_buf);
158618743311SHanna Czenczek qemu_iovec_destroy(&pad->pre_collapse_qiov);
158718743311SHanna Czenczek }
15887a3f542fSVladimir Sementsov-Ogievskiy if (pad->buf) {
15897a3f542fSVladimir Sementsov-Ogievskiy qemu_vfree(pad->buf);
15907a3f542fSVladimir Sementsov-Ogievskiy qemu_iovec_destroy(&pad->local_qiov);
15917a3f542fSVladimir Sementsov-Ogievskiy }
159298ca4549SVladimir Sementsov-Ogievskiy memset(pad, 0, sizeof(*pad));
15937a3f542fSVladimir Sementsov-Ogievskiy }
15947a3f542fSVladimir Sementsov-Ogievskiy
15957a3f542fSVladimir Sementsov-Ogievskiy /*
159618743311SHanna Czenczek * Create pad->local_qiov by wrapping @iov in the padding head and tail, while
159718743311SHanna Czenczek * ensuring that the resulting vector will not exceed IOV_MAX elements.
159818743311SHanna Czenczek *
159918743311SHanna Czenczek * To ensure this, when necessary, the first two or three elements of @iov are
160018743311SHanna Czenczek * merged into pad->collapse_bounce_buf and replaced by a reference to that
160118743311SHanna Czenczek * bounce buffer in pad->local_qiov.
160218743311SHanna Czenczek *
160318743311SHanna Czenczek * After performing a read request, the data from the bounce buffer must be
160418743311SHanna Czenczek * copied back into pad->pre_collapse_qiov (e.g. by bdrv_padding_finalize()).
160518743311SHanna Czenczek */
bdrv_create_padded_qiov(BlockDriverState * bs,BdrvRequestPadding * pad,struct iovec * iov,int niov,size_t iov_offset,size_t bytes)160618743311SHanna Czenczek static int bdrv_create_padded_qiov(BlockDriverState *bs,
160718743311SHanna Czenczek BdrvRequestPadding *pad,
160818743311SHanna Czenczek struct iovec *iov, int niov,
160918743311SHanna Czenczek size_t iov_offset, size_t bytes)
161018743311SHanna Czenczek {
161118743311SHanna Czenczek int padded_niov, surplus_count, collapse_count;
161218743311SHanna Czenczek
161318743311SHanna Czenczek /* Assert this invariant */
161418743311SHanna Czenczek assert(niov <= IOV_MAX);
161518743311SHanna Czenczek
161618743311SHanna Czenczek /*
161718743311SHanna Czenczek * Cannot pad if resulting length would exceed SIZE_MAX. Returning an error
161818743311SHanna Czenczek * to the guest is not ideal, but there is little else we can do. At least
161918743311SHanna Czenczek * this will practically never happen on 64-bit systems.
162018743311SHanna Czenczek */
162118743311SHanna Czenczek if (SIZE_MAX - pad->head < bytes ||
162218743311SHanna Czenczek SIZE_MAX - pad->head - bytes < pad->tail)
162318743311SHanna Czenczek {
162418743311SHanna Czenczek return -EINVAL;
162518743311SHanna Czenczek }
162618743311SHanna Czenczek
162718743311SHanna Czenczek /* Length of the resulting IOV if we just concatenated everything */
162818743311SHanna Czenczek padded_niov = !!pad->head + niov + !!pad->tail;
162918743311SHanna Czenczek
163018743311SHanna Czenczek qemu_iovec_init(&pad->local_qiov, MIN(padded_niov, IOV_MAX));
163118743311SHanna Czenczek
163218743311SHanna Czenczek if (pad->head) {
163318743311SHanna Czenczek qemu_iovec_add(&pad->local_qiov, pad->buf, pad->head);
163418743311SHanna Czenczek }
163518743311SHanna Czenczek
163618743311SHanna Czenczek /*
163718743311SHanna Czenczek * If padded_niov > IOV_MAX, we cannot just concatenate everything.
163818743311SHanna Czenczek * Instead, merge the first two or three elements of @iov to reduce the
163918743311SHanna Czenczek * number of vector elements as necessary.
164018743311SHanna Czenczek */
164118743311SHanna Czenczek if (padded_niov > IOV_MAX) {
164218743311SHanna Czenczek /*
164318743311SHanna Czenczek * Only head and tail can have lead to the number of entries exceeding
164418743311SHanna Czenczek * IOV_MAX, so we can exceed it by the head and tail at most. We need
164518743311SHanna Czenczek * to reduce the number of elements by `surplus_count`, so we merge that
164618743311SHanna Czenczek * many elements plus one into one element.
164718743311SHanna Czenczek */
164818743311SHanna Czenczek surplus_count = padded_niov - IOV_MAX;
164918743311SHanna Czenczek assert(surplus_count <= !!pad->head + !!pad->tail);
165018743311SHanna Czenczek collapse_count = surplus_count + 1;
165118743311SHanna Czenczek
165218743311SHanna Czenczek /*
165318743311SHanna Czenczek * Move the elements to collapse into `pad->pre_collapse_qiov`, then
165418743311SHanna Czenczek * advance `iov` (and associated variables) by those elements.
165518743311SHanna Czenczek */
165618743311SHanna Czenczek qemu_iovec_init(&pad->pre_collapse_qiov, collapse_count);
165718743311SHanna Czenczek qemu_iovec_concat_iov(&pad->pre_collapse_qiov, iov,
165818743311SHanna Czenczek collapse_count, iov_offset, SIZE_MAX);
165918743311SHanna Czenczek iov += collapse_count;
166018743311SHanna Czenczek iov_offset = 0;
166118743311SHanna Czenczek niov -= collapse_count;
166218743311SHanna Czenczek bytes -= pad->pre_collapse_qiov.size;
166318743311SHanna Czenczek
166418743311SHanna Czenczek /*
166518743311SHanna Czenczek * Construct the bounce buffer to match the length of the to-collapse
166618743311SHanna Czenczek * vector elements, and for write requests, initialize it with the data
166718743311SHanna Czenczek * from those elements. Then add it to `pad->local_qiov`.
166818743311SHanna Czenczek */
166918743311SHanna Czenczek pad->collapse_len = pad->pre_collapse_qiov.size;
167018743311SHanna Czenczek pad->collapse_bounce_buf = qemu_blockalign(bs, pad->collapse_len);
167118743311SHanna Czenczek if (pad->write) {
167218743311SHanna Czenczek qemu_iovec_to_buf(&pad->pre_collapse_qiov, 0,
167318743311SHanna Czenczek pad->collapse_bounce_buf, pad->collapse_len);
167418743311SHanna Czenczek }
167518743311SHanna Czenczek qemu_iovec_add(&pad->local_qiov,
167618743311SHanna Czenczek pad->collapse_bounce_buf, pad->collapse_len);
167718743311SHanna Czenczek }
167818743311SHanna Czenczek
167918743311SHanna Czenczek qemu_iovec_concat_iov(&pad->local_qiov, iov, niov, iov_offset, bytes);
168018743311SHanna Czenczek
168118743311SHanna Czenczek if (pad->tail) {
168218743311SHanna Czenczek qemu_iovec_add(&pad->local_qiov,
168318743311SHanna Czenczek pad->buf + pad->buf_len - pad->tail, pad->tail);
168418743311SHanna Czenczek }
168518743311SHanna Czenczek
168618743311SHanna Czenczek assert(pad->local_qiov.niov == MIN(padded_niov, IOV_MAX));
168718743311SHanna Czenczek return 0;
168818743311SHanna Czenczek }
168918743311SHanna Czenczek
169018743311SHanna Czenczek /*
16917a3f542fSVladimir Sementsov-Ogievskiy * bdrv_pad_request
16927a3f542fSVladimir Sementsov-Ogievskiy *
16937a3f542fSVladimir Sementsov-Ogievskiy * Exchange request parameters with padded request if needed. Don't include RMW
16947a3f542fSVladimir Sementsov-Ogievskiy * read of padding, bdrv_padding_rmw_read() should be called separately if
16957a3f542fSVladimir Sementsov-Ogievskiy * needed.
16967a3f542fSVladimir Sementsov-Ogievskiy *
169718743311SHanna Czenczek * @write is true for write requests, false for read requests.
169818743311SHanna Czenczek *
169998ca4549SVladimir Sementsov-Ogievskiy * Request parameters (@qiov, &qiov_offset, &offset, &bytes) are in-out:
170098ca4549SVladimir Sementsov-Ogievskiy * - on function start they represent original request
170198ca4549SVladimir Sementsov-Ogievskiy * - on failure or when padding is not needed they are unchanged
170298ca4549SVladimir Sementsov-Ogievskiy * - on success when padding is needed they represent padded request
17037a3f542fSVladimir Sementsov-Ogievskiy */
bdrv_pad_request(BlockDriverState * bs,QEMUIOVector ** qiov,size_t * qiov_offset,int64_t * offset,int64_t * bytes,bool write,BdrvRequestPadding * pad,bool * padded,BdrvRequestFlags * flags)170498ca4549SVladimir Sementsov-Ogievskiy static int bdrv_pad_request(BlockDriverState *bs,
17051acc3466SVladimir Sementsov-Ogievskiy QEMUIOVector **qiov, size_t *qiov_offset,
170637e9403eSVladimir Sementsov-Ogievskiy int64_t *offset, int64_t *bytes,
170718743311SHanna Czenczek bool write,
1708e8b65355SStefan Hajnoczi BdrvRequestPadding *pad, bool *padded,
1709e8b65355SStefan Hajnoczi BdrvRequestFlags *flags)
17107a3f542fSVladimir Sementsov-Ogievskiy {
17114c002cefSVladimir Sementsov-Ogievskiy int ret;
171218743311SHanna Czenczek struct iovec *sliced_iov;
171318743311SHanna Czenczek int sliced_niov;
171418743311SHanna Czenczek size_t sliced_head, sliced_tail;
17154c002cefSVladimir Sementsov-Ogievskiy
1716ef256751SHanna Czenczek /* Should have been checked by the caller already */
1717ef256751SHanna Czenczek ret = bdrv_check_request32(*offset, *bytes, *qiov, *qiov_offset);
1718ef256751SHanna Czenczek if (ret < 0) {
1719ef256751SHanna Czenczek return ret;
1720ef256751SHanna Czenczek }
172137e9403eSVladimir Sementsov-Ogievskiy
172218743311SHanna Czenczek if (!bdrv_init_padding(bs, *offset, *bytes, write, pad)) {
172398ca4549SVladimir Sementsov-Ogievskiy if (padded) {
172498ca4549SVladimir Sementsov-Ogievskiy *padded = false;
172598ca4549SVladimir Sementsov-Ogievskiy }
172698ca4549SVladimir Sementsov-Ogievskiy return 0;
17277a3f542fSVladimir Sementsov-Ogievskiy }
17287a3f542fSVladimir Sementsov-Ogievskiy
17293f934817SStefan Reiter /*
17303f934817SStefan Reiter * For prefetching in stream_populate(), no qiov is passed along, because
17313f934817SStefan Reiter * only copy-on-read matters.
17323f934817SStefan Reiter */
1733e193d4bdSKevin Wolf if (*qiov) {
173418743311SHanna Czenczek sliced_iov = qemu_iovec_slice(*qiov, *qiov_offset, *bytes,
173518743311SHanna Czenczek &sliced_head, &sliced_tail,
173618743311SHanna Czenczek &sliced_niov);
173718743311SHanna Czenczek
1738ef256751SHanna Czenczek /* Guaranteed by bdrv_check_request32() */
173918743311SHanna Czenczek assert(*bytes <= SIZE_MAX);
174018743311SHanna Czenczek ret = bdrv_create_padded_qiov(bs, pad, sliced_iov, sliced_niov,
174118743311SHanna Czenczek sliced_head, *bytes);
174298ca4549SVladimir Sementsov-Ogievskiy if (ret < 0) {
174318743311SHanna Czenczek bdrv_padding_finalize(pad);
174498ca4549SVladimir Sementsov-Ogievskiy return ret;
174598ca4549SVladimir Sementsov-Ogievskiy }
17467a3f542fSVladimir Sementsov-Ogievskiy *qiov = &pad->local_qiov;
17471acc3466SVladimir Sementsov-Ogievskiy *qiov_offset = 0;
17483f934817SStefan Reiter }
17493f934817SStefan Reiter
17503f934817SStefan Reiter *bytes += pad->head + pad->tail;
17513f934817SStefan Reiter *offset -= pad->head;
175298ca4549SVladimir Sementsov-Ogievskiy if (padded) {
175398ca4549SVladimir Sementsov-Ogievskiy *padded = true;
175498ca4549SVladimir Sementsov-Ogievskiy }
1755e8b65355SStefan Hajnoczi if (flags) {
1756e8b65355SStefan Hajnoczi /* Can't use optimization hint with bounce buffer */
1757e8b65355SStefan Hajnoczi *flags &= ~BDRV_REQ_REGISTERED_BUF;
1758e8b65355SStefan Hajnoczi }
17597a3f542fSVladimir Sementsov-Ogievskiy
176098ca4549SVladimir Sementsov-Ogievskiy return 0;
17617a3f542fSVladimir Sementsov-Ogievskiy }
17627a3f542fSVladimir Sementsov-Ogievskiy
bdrv_co_preadv(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)1763a03ef88fSKevin Wolf int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1764e9e52efdSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, QEMUIOVector *qiov,
176561007b31SStefan Hajnoczi BdrvRequestFlags flags)
176661007b31SStefan Hajnoczi {
1767967d7905SEmanuele Giuseppe Esposito IO_CODE();
17681acc3466SVladimir Sementsov-Ogievskiy return bdrv_co_preadv_part(child, offset, bytes, qiov, 0, flags);
17691acc3466SVladimir Sementsov-Ogievskiy }
17701acc3466SVladimir Sementsov-Ogievskiy
bdrv_co_preadv_part(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,BdrvRequestFlags flags)17711acc3466SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_preadv_part(BdrvChild *child,
177237e9403eSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes,
17731acc3466SVladimir Sementsov-Ogievskiy QEMUIOVector *qiov, size_t qiov_offset,
17741acc3466SVladimir Sementsov-Ogievskiy BdrvRequestFlags flags)
17751acc3466SVladimir Sementsov-Ogievskiy {
1776a03ef88fSKevin Wolf BlockDriverState *bs = child->bs;
177761007b31SStefan Hajnoczi BdrvTrackedRequest req;
17787a3f542fSVladimir Sementsov-Ogievskiy BdrvRequestPadding pad;
177961007b31SStefan Hajnoczi int ret;
1780967d7905SEmanuele Giuseppe Esposito IO_CODE();
178161007b31SStefan Hajnoczi
178237e9403eSVladimir Sementsov-Ogievskiy trace_bdrv_co_preadv_part(bs, offset, bytes, flags);
178361007b31SStefan Hajnoczi
17841e97be91SEmanuele Giuseppe Esposito if (!bdrv_co_is_inserted(bs)) {
1785f4dad307SVladimir Sementsov-Ogievskiy return -ENOMEDIUM;
1786f4dad307SVladimir Sementsov-Ogievskiy }
1787f4dad307SVladimir Sementsov-Ogievskiy
178863f4ad11SVladimir Sementsov-Ogievskiy ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
178961007b31SStefan Hajnoczi if (ret < 0) {
179061007b31SStefan Hajnoczi return ret;
179161007b31SStefan Hajnoczi }
179261007b31SStefan Hajnoczi
1793ac9d00bfSVladimir Sementsov-Ogievskiy if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
1794ac9d00bfSVladimir Sementsov-Ogievskiy /*
1795ac9d00bfSVladimir Sementsov-Ogievskiy * Aligning zero request is nonsense. Even if driver has special meaning
1796ac9d00bfSVladimir Sementsov-Ogievskiy * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
1797ac9d00bfSVladimir Sementsov-Ogievskiy * it to driver due to request_alignment.
1798ac9d00bfSVladimir Sementsov-Ogievskiy *
1799ac9d00bfSVladimir Sementsov-Ogievskiy * Still, no reason to return an error if someone do unaligned
1800ac9d00bfSVladimir Sementsov-Ogievskiy * zero-length read occasionally.
1801ac9d00bfSVladimir Sementsov-Ogievskiy */
1802ac9d00bfSVladimir Sementsov-Ogievskiy return 0;
1803ac9d00bfSVladimir Sementsov-Ogievskiy }
1804ac9d00bfSVladimir Sementsov-Ogievskiy
180599723548SPaolo Bonzini bdrv_inc_in_flight(bs);
180699723548SPaolo Bonzini
18079568b511SWen Congyang /* Don't do copy-on-read if we read data before write operation */
1808d73415a3SStefan Hajnoczi if (qatomic_read(&bs->copy_on_read)) {
180961007b31SStefan Hajnoczi flags |= BDRV_REQ_COPY_ON_READ;
181061007b31SStefan Hajnoczi }
181161007b31SStefan Hajnoczi
181218743311SHanna Czenczek ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, false,
181318743311SHanna Czenczek &pad, NULL, &flags);
181498ca4549SVladimir Sementsov-Ogievskiy if (ret < 0) {
181587ab8802SKevin Wolf goto fail;
181698ca4549SVladimir Sementsov-Ogievskiy }
181761007b31SStefan Hajnoczi
1818ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
18197a3f542fSVladimir Sementsov-Ogievskiy ret = bdrv_aligned_preadv(child, &req, offset, bytes,
18207a3f542fSVladimir Sementsov-Ogievskiy bs->bl.request_alignment,
18211acc3466SVladimir Sementsov-Ogievskiy qiov, qiov_offset, flags);
182261007b31SStefan Hajnoczi tracked_request_end(&req);
182318743311SHanna Czenczek bdrv_padding_finalize(&pad);
182461007b31SStefan Hajnoczi
182587ab8802SKevin Wolf fail:
182687ab8802SKevin Wolf bdrv_dec_in_flight(bs);
182787ab8802SKevin Wolf
182861007b31SStefan Hajnoczi return ret;
182961007b31SStefan Hajnoczi }
183061007b31SStefan Hajnoczi
1831eeb47775SKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_co_do_pwrite_zeroes(BlockDriverState * bs,int64_t offset,int64_t bytes,BdrvRequestFlags flags)1832eeb47775SKevin Wolf bdrv_co_do_pwrite_zeroes(BlockDriverState *bs, int64_t offset, int64_t bytes,
1833eeb47775SKevin Wolf BdrvRequestFlags flags)
183461007b31SStefan Hajnoczi {
183561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv;
183661007b31SStefan Hajnoczi QEMUIOVector qiov;
18370d93ed08SVladimir Sementsov-Ogievskiy void *buf = NULL;
183861007b31SStefan Hajnoczi int ret = 0;
1839465fe887SEric Blake bool need_flush = false;
1840443668caSDenis V. Lunev int head = 0;
1841443668caSDenis V. Lunev int tail = 0;
184261007b31SStefan Hajnoczi
18432aaa3f9bSVladimir Sementsov-Ogievskiy int64_t max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes,
18442aaa3f9bSVladimir Sementsov-Ogievskiy INT64_MAX);
1845a5b8dd2cSEric Blake int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1846a5b8dd2cSEric Blake bs->bl.request_alignment);
1847cb2e2878SEric Blake int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1848cf081fcaSEric Blake
1849abaf8b75SKevin Wolf assert_bdrv_graph_readable();
18505ae07b14SVladimir Sementsov-Ogievskiy bdrv_check_request(offset, bytes, &error_abort);
18515ae07b14SVladimir Sementsov-Ogievskiy
1852d470ad42SMax Reitz if (!drv) {
1853d470ad42SMax Reitz return -ENOMEDIUM;
1854d470ad42SMax Reitz }
1855d470ad42SMax Reitz
1856fe0480d6SKevin Wolf if ((flags & ~bs->supported_zero_flags) & BDRV_REQ_NO_FALLBACK) {
1857fe0480d6SKevin Wolf return -ENOTSUP;
1858fe0480d6SKevin Wolf }
1859fe0480d6SKevin Wolf
1860e8b65355SStefan Hajnoczi /* By definition there is no user buffer so this flag doesn't make sense */
1861e8b65355SStefan Hajnoczi if (flags & BDRV_REQ_REGISTERED_BUF) {
1862e8b65355SStefan Hajnoczi return -EINVAL;
1863e8b65355SStefan Hajnoczi }
1864e8b65355SStefan Hajnoczi
1865*d05ae948SNir Soffer /* If opened with discard=off we should never unmap. */
1866*d05ae948SNir Soffer if (!(bs->open_flags & BDRV_O_UNMAP)) {
1867*d05ae948SNir Soffer flags &= ~BDRV_REQ_MAY_UNMAP;
1868*d05ae948SNir Soffer }
1869*d05ae948SNir Soffer
18700bc329fbSHanna Reitz /* Invalidate the cached block-status data range if this write overlaps */
18710bc329fbSHanna Reitz bdrv_bsc_invalidate_range(bs, offset, bytes);
18720bc329fbSHanna Reitz
1873b8d0a980SEric Blake assert(alignment % bs->bl.request_alignment == 0);
1874b8d0a980SEric Blake head = offset % alignment;
1875f5a5ca79SManos Pitsidianakis tail = (offset + bytes) % alignment;
1876b8d0a980SEric Blake max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1877b8d0a980SEric Blake assert(max_write_zeroes >= bs->bl.request_alignment);
187861007b31SStefan Hajnoczi
1879f5a5ca79SManos Pitsidianakis while (bytes > 0 && !ret) {
18805ae07b14SVladimir Sementsov-Ogievskiy int64_t num = bytes;
188161007b31SStefan Hajnoczi
188261007b31SStefan Hajnoczi /* Align request. Block drivers can expect the "bulk" of the request
1883443668caSDenis V. Lunev * to be aligned, and that unaligned requests do not cross cluster
1884443668caSDenis V. Lunev * boundaries.
188561007b31SStefan Hajnoczi */
1886443668caSDenis V. Lunev if (head) {
1887b2f95feeSEric Blake /* Make a small request up to the first aligned sector. For
1888b2f95feeSEric Blake * convenience, limit this request to max_transfer even if
1889b2f95feeSEric Blake * we don't need to fall back to writes. */
1890f5a5ca79SManos Pitsidianakis num = MIN(MIN(bytes, max_transfer), alignment - head);
1891b2f95feeSEric Blake head = (head + num) % alignment;
1892b2f95feeSEric Blake assert(num < max_write_zeroes);
1893d05aa8bbSEric Blake } else if (tail && num > alignment) {
1894443668caSDenis V. Lunev /* Shorten the request to the last aligned sector. */
1895443668caSDenis V. Lunev num -= tail;
189661007b31SStefan Hajnoczi }
189761007b31SStefan Hajnoczi
189861007b31SStefan Hajnoczi /* limit request size */
189961007b31SStefan Hajnoczi if (num > max_write_zeroes) {
190061007b31SStefan Hajnoczi num = max_write_zeroes;
190161007b31SStefan Hajnoczi }
190261007b31SStefan Hajnoczi
190361007b31SStefan Hajnoczi ret = -ENOTSUP;
190461007b31SStefan Hajnoczi /* First try the efficient write zeroes operation */
1905d05aa8bbSEric Blake if (drv->bdrv_co_pwrite_zeroes) {
1906d05aa8bbSEric Blake ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1907d05aa8bbSEric Blake flags & bs->supported_zero_flags);
1908d05aa8bbSEric Blake if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1909d05aa8bbSEric Blake !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1910d05aa8bbSEric Blake need_flush = true;
1911d05aa8bbSEric Blake }
1912465fe887SEric Blake } else {
1913465fe887SEric Blake assert(!bs->supported_zero_flags);
191461007b31SStefan Hajnoczi }
191561007b31SStefan Hajnoczi
1916294682ccSAndrey Shinkevich if (ret == -ENOTSUP && !(flags & BDRV_REQ_NO_FALLBACK)) {
191761007b31SStefan Hajnoczi /* Fall back to bounce buffer if write zeroes is unsupported */
1918465fe887SEric Blake BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1919465fe887SEric Blake
1920465fe887SEric Blake if ((flags & BDRV_REQ_FUA) &&
1921465fe887SEric Blake !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1922465fe887SEric Blake /* No need for bdrv_driver_pwrite() to do a fallback
1923465fe887SEric Blake * flush on each chunk; use just one at the end */
1924465fe887SEric Blake write_flags &= ~BDRV_REQ_FUA;
1925465fe887SEric Blake need_flush = true;
1926465fe887SEric Blake }
19275def6b80SEric Blake num = MIN(num, max_transfer);
19280d93ed08SVladimir Sementsov-Ogievskiy if (buf == NULL) {
19290d93ed08SVladimir Sementsov-Ogievskiy buf = qemu_try_blockalign0(bs, num);
19300d93ed08SVladimir Sementsov-Ogievskiy if (buf == NULL) {
193161007b31SStefan Hajnoczi ret = -ENOMEM;
193261007b31SStefan Hajnoczi goto fail;
193361007b31SStefan Hajnoczi }
193461007b31SStefan Hajnoczi }
19350d93ed08SVladimir Sementsov-Ogievskiy qemu_iovec_init_buf(&qiov, buf, num);
193661007b31SStefan Hajnoczi
1937ac850bf0SVladimir Sementsov-Ogievskiy ret = bdrv_driver_pwritev(bs, offset, num, &qiov, 0, write_flags);
193861007b31SStefan Hajnoczi
193961007b31SStefan Hajnoczi /* Keep bounce buffer around if it is big enough for all
194061007b31SStefan Hajnoczi * all future requests.
194161007b31SStefan Hajnoczi */
19425def6b80SEric Blake if (num < max_transfer) {
19430d93ed08SVladimir Sementsov-Ogievskiy qemu_vfree(buf);
19440d93ed08SVladimir Sementsov-Ogievskiy buf = NULL;
194561007b31SStefan Hajnoczi }
194661007b31SStefan Hajnoczi }
194761007b31SStefan Hajnoczi
1948d05aa8bbSEric Blake offset += num;
1949f5a5ca79SManos Pitsidianakis bytes -= num;
195061007b31SStefan Hajnoczi }
195161007b31SStefan Hajnoczi
195261007b31SStefan Hajnoczi fail:
1953465fe887SEric Blake if (ret == 0 && need_flush) {
1954465fe887SEric Blake ret = bdrv_co_flush(bs);
1955465fe887SEric Blake }
19560d93ed08SVladimir Sementsov-Ogievskiy qemu_vfree(buf);
195761007b31SStefan Hajnoczi return ret;
195861007b31SStefan Hajnoczi }
195961007b31SStefan Hajnoczi
1960a00e70c0SEmanuele Giuseppe Esposito static inline int coroutine_fn GRAPH_RDLOCK
bdrv_co_write_req_prepare(BdrvChild * child,int64_t offset,int64_t bytes,BdrvTrackedRequest * req,int flags)1961fcfd9adeSVladimir Sementsov-Ogievskiy bdrv_co_write_req_prepare(BdrvChild *child, int64_t offset, int64_t bytes,
196285fe2479SFam Zheng BdrvTrackedRequest *req, int flags)
196385fe2479SFam Zheng {
196485fe2479SFam Zheng BlockDriverState *bs = child->bs;
1965fcfd9adeSVladimir Sementsov-Ogievskiy
1966fcfd9adeSVladimir Sementsov-Ogievskiy bdrv_check_request(offset, bytes, &error_abort);
196785fe2479SFam Zheng
1968307261b2SVladimir Sementsov-Ogievskiy if (bdrv_is_read_only(bs)) {
196985fe2479SFam Zheng return -EPERM;
197085fe2479SFam Zheng }
197185fe2479SFam Zheng
197285fe2479SFam Zheng assert(!(bs->open_flags & BDRV_O_INACTIVE));
197385fe2479SFam Zheng assert((bs->open_flags & BDRV_O_NO_IO) == 0);
197485fe2479SFam Zheng assert(!(flags & ~BDRV_REQ_MASK));
1975d1a764d1SVladimir Sementsov-Ogievskiy assert(!((flags & BDRV_REQ_NO_WAIT) && !(flags & BDRV_REQ_SERIALISING)));
197685fe2479SFam Zheng
197785fe2479SFam Zheng if (flags & BDRV_REQ_SERIALISING) {
1978d1a764d1SVladimir Sementsov-Ogievskiy QEMU_LOCK_GUARD(&bs->reqs_lock);
1979d1a764d1SVladimir Sementsov-Ogievskiy
1980d1a764d1SVladimir Sementsov-Ogievskiy tracked_request_set_serialising(req, bdrv_get_cluster_size(bs));
1981d1a764d1SVladimir Sementsov-Ogievskiy
1982d1a764d1SVladimir Sementsov-Ogievskiy if ((flags & BDRV_REQ_NO_WAIT) && bdrv_find_conflicting_request(req)) {
1983d1a764d1SVladimir Sementsov-Ogievskiy return -EBUSY;
1984d1a764d1SVladimir Sementsov-Ogievskiy }
1985d1a764d1SVladimir Sementsov-Ogievskiy
1986d1a764d1SVladimir Sementsov-Ogievskiy bdrv_wait_serialising_requests_locked(req);
198718fbd0deSPaolo Bonzini } else {
198818fbd0deSPaolo Bonzini bdrv_wait_serialising_requests(req);
198985fe2479SFam Zheng }
199085fe2479SFam Zheng
199185fe2479SFam Zheng assert(req->overlap_offset <= offset);
199285fe2479SFam Zheng assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1993fcfd9adeSVladimir Sementsov-Ogievskiy assert(offset + bytes <= bs->total_sectors * BDRV_SECTOR_SIZE ||
1994fcfd9adeSVladimir Sementsov-Ogievskiy child->perm & BLK_PERM_RESIZE);
199585fe2479SFam Zheng
1996cd47d792SFam Zheng switch (req->type) {
1997cd47d792SFam Zheng case BDRV_TRACKED_WRITE:
1998cd47d792SFam Zheng case BDRV_TRACKED_DISCARD:
199985fe2479SFam Zheng if (flags & BDRV_REQ_WRITE_UNCHANGED) {
200085fe2479SFam Zheng assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
200185fe2479SFam Zheng } else {
200285fe2479SFam Zheng assert(child->perm & BLK_PERM_WRITE);
200385fe2479SFam Zheng }
200494783301SVladimir Sementsov-Ogievskiy bdrv_write_threshold_check_write(bs, offset, bytes);
200594783301SVladimir Sementsov-Ogievskiy return 0;
2006cd47d792SFam Zheng case BDRV_TRACKED_TRUNCATE:
2007cd47d792SFam Zheng assert(child->perm & BLK_PERM_RESIZE);
2008cd47d792SFam Zheng return 0;
2009cd47d792SFam Zheng default:
2010cd47d792SFam Zheng abort();
2011cd47d792SFam Zheng }
201285fe2479SFam Zheng }
201385fe2479SFam Zheng
20147859c45aSKevin Wolf static inline void coroutine_fn GRAPH_RDLOCK
bdrv_co_write_req_finish(BdrvChild * child,int64_t offset,int64_t bytes,BdrvTrackedRequest * req,int ret)2015fcfd9adeSVladimir Sementsov-Ogievskiy bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, int64_t bytes,
201685fe2479SFam Zheng BdrvTrackedRequest *req, int ret)
201785fe2479SFam Zheng {
201885fe2479SFam Zheng int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
201985fe2479SFam Zheng BlockDriverState *bs = child->bs;
202085fe2479SFam Zheng
2021fcfd9adeSVladimir Sementsov-Ogievskiy bdrv_check_request(offset, bytes, &error_abort);
2022fcfd9adeSVladimir Sementsov-Ogievskiy
2023d73415a3SStefan Hajnoczi qatomic_inc(&bs->write_gen);
202485fe2479SFam Zheng
202500695c27SFam Zheng /*
202600695c27SFam Zheng * Discard cannot extend the image, but in error handling cases, such as
202700695c27SFam Zheng * when reverting a qcow2 cluster allocation, the discarded range can pass
202800695c27SFam Zheng * the end of image file, so we cannot assert about BDRV_TRACKED_DISCARD
202900695c27SFam Zheng * here. Instead, just skip it, since semantically a discard request
203000695c27SFam Zheng * beyond EOF cannot expand the image anyway.
203100695c27SFam Zheng */
20327f8f03efSFam Zheng if (ret == 0 &&
2033cd47d792SFam Zheng (req->type == BDRV_TRACKED_TRUNCATE ||
2034cd47d792SFam Zheng end_sector > bs->total_sectors) &&
203500695c27SFam Zheng req->type != BDRV_TRACKED_DISCARD) {
20367f8f03efSFam Zheng bs->total_sectors = end_sector;
20377f8f03efSFam Zheng bdrv_parent_cb_resize(bs);
20387f8f03efSFam Zheng bdrv_dirty_bitmap_truncate(bs, end_sector << BDRV_SECTOR_BITS);
203985fe2479SFam Zheng }
204000695c27SFam Zheng if (req->bytes) {
204100695c27SFam Zheng switch (req->type) {
204200695c27SFam Zheng case BDRV_TRACKED_WRITE:
204300695c27SFam Zheng stat64_max(&bs->wr_highest_offset, offset + bytes);
204400695c27SFam Zheng /* fall through, to set dirty bits */
204500695c27SFam Zheng case BDRV_TRACKED_DISCARD:
20467f8f03efSFam Zheng bdrv_set_dirty(bs, offset, bytes);
204700695c27SFam Zheng break;
204800695c27SFam Zheng default:
204900695c27SFam Zheng break;
205000695c27SFam Zheng }
205100695c27SFam Zheng }
205285fe2479SFam Zheng }
205385fe2479SFam Zheng
205461007b31SStefan Hajnoczi /*
205504ed95f4SEric Blake * Forwards an already correctly aligned write request to the BlockDriver,
205604ed95f4SEric Blake * after possibly fragmenting it.
205761007b31SStefan Hajnoczi */
20587b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_aligned_pwritev(BdrvChild * child,BdrvTrackedRequest * req,int64_t offset,int64_t bytes,int64_t align,QEMUIOVector * qiov,size_t qiov_offset,BdrvRequestFlags flags)20597b1fb72eSKevin Wolf bdrv_aligned_pwritev(BdrvChild *child, BdrvTrackedRequest *req,
20607b1fb72eSKevin Wolf int64_t offset, int64_t bytes, int64_t align,
20617b1fb72eSKevin Wolf QEMUIOVector *qiov, size_t qiov_offset,
2062e75abedaSVladimir Sementsov-Ogievskiy BdrvRequestFlags flags)
206361007b31SStefan Hajnoczi {
206485c97ca7SKevin Wolf BlockDriverState *bs = child->bs;
206561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv;
206661007b31SStefan Hajnoczi int ret;
206761007b31SStefan Hajnoczi
2068fcfd9adeSVladimir Sementsov-Ogievskiy int64_t bytes_remaining = bytes;
206904ed95f4SEric Blake int max_transfer;
207061007b31SStefan Hajnoczi
2071fcfd9adeSVladimir Sementsov-Ogievskiy bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, &error_abort);
2072fcfd9adeSVladimir Sementsov-Ogievskiy
2073d470ad42SMax Reitz if (!drv) {
2074d470ad42SMax Reitz return -ENOMEDIUM;
2075d470ad42SMax Reitz }
2076d470ad42SMax Reitz
2077d6883bc9SVladimir Sementsov-Ogievskiy if (bdrv_has_readonly_bitmaps(bs)) {
2078d6883bc9SVladimir Sementsov-Ogievskiy return -EPERM;
2079d6883bc9SVladimir Sementsov-Ogievskiy }
2080d6883bc9SVladimir Sementsov-Ogievskiy
2081cff86b38SEric Blake assert(is_power_of_2(align));
2082cff86b38SEric Blake assert((offset & (align - 1)) == 0);
2083cff86b38SEric Blake assert((bytes & (align - 1)) == 0);
208404ed95f4SEric Blake max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
208504ed95f4SEric Blake align);
208661007b31SStefan Hajnoczi
208785fe2479SFam Zheng ret = bdrv_co_write_req_prepare(child, offset, bytes, req, flags);
208861007b31SStefan Hajnoczi
208961007b31SStefan Hajnoczi if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
2090c1499a5eSEric Blake !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
209128c4da28SVladimir Sementsov-Ogievskiy qemu_iovec_is_zero(qiov, qiov_offset, bytes)) {
209261007b31SStefan Hajnoczi flags |= BDRV_REQ_ZERO_WRITE;
209361007b31SStefan Hajnoczi if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
209461007b31SStefan Hajnoczi flags |= BDRV_REQ_MAY_UNMAP;
209561007b31SStefan Hajnoczi }
20963c586715SStefan Hajnoczi
20973c586715SStefan Hajnoczi /* Can't use optimization hint with bufferless zero write */
20983c586715SStefan Hajnoczi flags &= ~BDRV_REQ_REGISTERED_BUF;
209961007b31SStefan Hajnoczi }
210061007b31SStefan Hajnoczi
210161007b31SStefan Hajnoczi if (ret < 0) {
210261007b31SStefan Hajnoczi /* Do nothing, write notifier decided to fail this request */
210361007b31SStefan Hajnoczi } else if (flags & BDRV_REQ_ZERO_WRITE) {
2104c834dc05SEmanuele Giuseppe Esposito bdrv_co_debug_event(bs, BLKDBG_PWRITEV_ZERO);
21059896c876SKevin Wolf ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
21063ea1a091SPavel Butsykin } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
210728c4da28SVladimir Sementsov-Ogievskiy ret = bdrv_driver_pwritev_compressed(bs, offset, bytes,
210828c4da28SVladimir Sementsov-Ogievskiy qiov, qiov_offset);
210904ed95f4SEric Blake } else if (bytes <= max_transfer) {
2110c834dc05SEmanuele Giuseppe Esposito bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
211128c4da28SVladimir Sementsov-Ogievskiy ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, qiov_offset, flags);
211204ed95f4SEric Blake } else {
2113c834dc05SEmanuele Giuseppe Esposito bdrv_co_debug_event(bs, BLKDBG_PWRITEV);
211404ed95f4SEric Blake while (bytes_remaining) {
211504ed95f4SEric Blake int num = MIN(bytes_remaining, max_transfer);
211604ed95f4SEric Blake int local_flags = flags;
211704ed95f4SEric Blake
211804ed95f4SEric Blake assert(num);
211904ed95f4SEric Blake if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
212004ed95f4SEric Blake !(bs->supported_write_flags & BDRV_REQ_FUA)) {
212104ed95f4SEric Blake /* If FUA is going to be emulated by flush, we only
212204ed95f4SEric Blake * need to flush on the last iteration */
212304ed95f4SEric Blake local_flags &= ~BDRV_REQ_FUA;
212404ed95f4SEric Blake }
212504ed95f4SEric Blake
212604ed95f4SEric Blake ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
2127134b7decSMax Reitz num, qiov,
2128134b7decSMax Reitz qiov_offset + bytes - bytes_remaining,
212928c4da28SVladimir Sementsov-Ogievskiy local_flags);
213004ed95f4SEric Blake if (ret < 0) {
213104ed95f4SEric Blake break;
213204ed95f4SEric Blake }
213304ed95f4SEric Blake bytes_remaining -= num;
213404ed95f4SEric Blake }
213561007b31SStefan Hajnoczi }
2136c834dc05SEmanuele Giuseppe Esposito bdrv_co_debug_event(bs, BLKDBG_PWRITEV_DONE);
213761007b31SStefan Hajnoczi
213861007b31SStefan Hajnoczi if (ret >= 0) {
213904ed95f4SEric Blake ret = 0;
214061007b31SStefan Hajnoczi }
214185fe2479SFam Zheng bdrv_co_write_req_finish(child, offset, bytes, req, ret);
214261007b31SStefan Hajnoczi
214361007b31SStefan Hajnoczi return ret;
214461007b31SStefan Hajnoczi }
214561007b31SStefan Hajnoczi
21467b1fb72eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_co_do_zero_pwritev(BdrvChild * child,int64_t offset,int64_t bytes,BdrvRequestFlags flags,BdrvTrackedRequest * req)21477b1fb72eSKevin Wolf bdrv_co_do_zero_pwritev(BdrvChild *child, int64_t offset, int64_t bytes,
21487b1fb72eSKevin Wolf BdrvRequestFlags flags, BdrvTrackedRequest *req)
21499eeb6dd1SFam Zheng {
215085c97ca7SKevin Wolf BlockDriverState *bs = child->bs;
21519eeb6dd1SFam Zheng QEMUIOVector local_qiov;
2152a5b8dd2cSEric Blake uint64_t align = bs->bl.request_alignment;
21539eeb6dd1SFam Zheng int ret = 0;
21547a3f542fSVladimir Sementsov-Ogievskiy bool padding;
21557a3f542fSVladimir Sementsov-Ogievskiy BdrvRequestPadding pad;
21569eeb6dd1SFam Zheng
2157e8b65355SStefan Hajnoczi /* This flag doesn't make sense for padding or zero writes */
2158e8b65355SStefan Hajnoczi flags &= ~BDRV_REQ_REGISTERED_BUF;
2159e8b65355SStefan Hajnoczi
216018743311SHanna Czenczek padding = bdrv_init_padding(bs, offset, bytes, true, &pad);
21617a3f542fSVladimir Sementsov-Ogievskiy if (padding) {
216245e62b46SVladimir Sementsov-Ogievskiy assert(!(flags & BDRV_REQ_NO_WAIT));
21638ac5aab2SVladimir Sementsov-Ogievskiy bdrv_make_request_serialising(req, align);
21649eeb6dd1SFam Zheng
21657a3f542fSVladimir Sementsov-Ogievskiy bdrv_padding_rmw_read(child, req, &pad, true);
21667a3f542fSVladimir Sementsov-Ogievskiy
21677a3f542fSVladimir Sementsov-Ogievskiy if (pad.head || pad.merge_reads) {
21687a3f542fSVladimir Sementsov-Ogievskiy int64_t aligned_offset = offset & ~(align - 1);
21697a3f542fSVladimir Sementsov-Ogievskiy int64_t write_bytes = pad.merge_reads ? pad.buf_len : align;
21707a3f542fSVladimir Sementsov-Ogievskiy
21717a3f542fSVladimir Sementsov-Ogievskiy qemu_iovec_init_buf(&local_qiov, pad.buf, write_bytes);
21727a3f542fSVladimir Sementsov-Ogievskiy ret = bdrv_aligned_pwritev(child, req, aligned_offset, write_bytes,
217328c4da28SVladimir Sementsov-Ogievskiy align, &local_qiov, 0,
21749eeb6dd1SFam Zheng flags & ~BDRV_REQ_ZERO_WRITE);
21757a3f542fSVladimir Sementsov-Ogievskiy if (ret < 0 || pad.merge_reads) {
21767a3f542fSVladimir Sementsov-Ogievskiy /* Error or all work is done */
21777a3f542fSVladimir Sementsov-Ogievskiy goto out;
21789eeb6dd1SFam Zheng }
21797a3f542fSVladimir Sementsov-Ogievskiy offset += write_bytes - pad.head;
21807a3f542fSVladimir Sementsov-Ogievskiy bytes -= write_bytes - pad.head;
21817a3f542fSVladimir Sementsov-Ogievskiy }
21829eeb6dd1SFam Zheng }
21839eeb6dd1SFam Zheng
21849eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0);
21859eeb6dd1SFam Zheng if (bytes >= align) {
21869eeb6dd1SFam Zheng /* Write the aligned part in the middle. */
2187fcfd9adeSVladimir Sementsov-Ogievskiy int64_t aligned_bytes = bytes & ~(align - 1);
218885c97ca7SKevin Wolf ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
218928c4da28SVladimir Sementsov-Ogievskiy NULL, 0, flags);
21909eeb6dd1SFam Zheng if (ret < 0) {
21917a3f542fSVladimir Sementsov-Ogievskiy goto out;
21929eeb6dd1SFam Zheng }
21939eeb6dd1SFam Zheng bytes -= aligned_bytes;
21949eeb6dd1SFam Zheng offset += aligned_bytes;
21959eeb6dd1SFam Zheng }
21969eeb6dd1SFam Zheng
21979eeb6dd1SFam Zheng assert(!bytes || (offset & (align - 1)) == 0);
21989eeb6dd1SFam Zheng if (bytes) {
21997a3f542fSVladimir Sementsov-Ogievskiy assert(align == pad.tail + bytes);
22009eeb6dd1SFam Zheng
22017a3f542fSVladimir Sementsov-Ogievskiy qemu_iovec_init_buf(&local_qiov, pad.tail_buf, align);
220285c97ca7SKevin Wolf ret = bdrv_aligned_pwritev(child, req, offset, align, align,
220328c4da28SVladimir Sementsov-Ogievskiy &local_qiov, 0,
220428c4da28SVladimir Sementsov-Ogievskiy flags & ~BDRV_REQ_ZERO_WRITE);
22059eeb6dd1SFam Zheng }
22069eeb6dd1SFam Zheng
22077a3f542fSVladimir Sementsov-Ogievskiy out:
220818743311SHanna Czenczek bdrv_padding_finalize(&pad);
22097a3f542fSVladimir Sementsov-Ogievskiy
22107a3f542fSVladimir Sementsov-Ogievskiy return ret;
22119eeb6dd1SFam Zheng }
22129eeb6dd1SFam Zheng
221361007b31SStefan Hajnoczi /*
221461007b31SStefan Hajnoczi * Handle a write request in coroutine context
221561007b31SStefan Hajnoczi */
bdrv_co_pwritev(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,BdrvRequestFlags flags)2216a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
2217e9e52efdSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, QEMUIOVector *qiov,
221861007b31SStefan Hajnoczi BdrvRequestFlags flags)
221961007b31SStefan Hajnoczi {
2220967d7905SEmanuele Giuseppe Esposito IO_CODE();
22211acc3466SVladimir Sementsov-Ogievskiy return bdrv_co_pwritev_part(child, offset, bytes, qiov, 0, flags);
22221acc3466SVladimir Sementsov-Ogievskiy }
22231acc3466SVladimir Sementsov-Ogievskiy
bdrv_co_pwritev_part(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset,BdrvRequestFlags flags)22241acc3466SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_pwritev_part(BdrvChild *child,
222537e9403eSVladimir Sementsov-Ogievskiy int64_t offset, int64_t bytes, QEMUIOVector *qiov, size_t qiov_offset,
22261acc3466SVladimir Sementsov-Ogievskiy BdrvRequestFlags flags)
22271acc3466SVladimir Sementsov-Ogievskiy {
2228a03ef88fSKevin Wolf BlockDriverState *bs = child->bs;
222961007b31SStefan Hajnoczi BdrvTrackedRequest req;
2230a5b8dd2cSEric Blake uint64_t align = bs->bl.request_alignment;
22317a3f542fSVladimir Sementsov-Ogievskiy BdrvRequestPadding pad;
223261007b31SStefan Hajnoczi int ret;
2233f0deecffSVladimir Sementsov-Ogievskiy bool padded = false;
2234967d7905SEmanuele Giuseppe Esposito IO_CODE();
223561007b31SStefan Hajnoczi
223637e9403eSVladimir Sementsov-Ogievskiy trace_bdrv_co_pwritev_part(child->bs, offset, bytes, flags);
2237f42cf447SDaniel P. Berrange
22381e97be91SEmanuele Giuseppe Esposito if (!bdrv_co_is_inserted(bs)) {
223961007b31SStefan Hajnoczi return -ENOMEDIUM;
224061007b31SStefan Hajnoczi }
224161007b31SStefan Hajnoczi
22422aaa3f9bSVladimir Sementsov-Ogievskiy if (flags & BDRV_REQ_ZERO_WRITE) {
22432aaa3f9bSVladimir Sementsov-Ogievskiy ret = bdrv_check_qiov_request(offset, bytes, qiov, qiov_offset, NULL);
22442aaa3f9bSVladimir Sementsov-Ogievskiy } else {
224563f4ad11SVladimir Sementsov-Ogievskiy ret = bdrv_check_request32(offset, bytes, qiov, qiov_offset);
22462aaa3f9bSVladimir Sementsov-Ogievskiy }
224761007b31SStefan Hajnoczi if (ret < 0) {
224861007b31SStefan Hajnoczi return ret;
224961007b31SStefan Hajnoczi }
225061007b31SStefan Hajnoczi
2251f2208fdcSAlberto Garcia /* If the request is misaligned then we can't make it efficient */
2252f2208fdcSAlberto Garcia if ((flags & BDRV_REQ_NO_FALLBACK) &&
2253f2208fdcSAlberto Garcia !QEMU_IS_ALIGNED(offset | bytes, align))
2254f2208fdcSAlberto Garcia {
2255f2208fdcSAlberto Garcia return -ENOTSUP;
2256f2208fdcSAlberto Garcia }
2257f2208fdcSAlberto Garcia
2258ac9d00bfSVladimir Sementsov-Ogievskiy if (bytes == 0 && !QEMU_IS_ALIGNED(offset, bs->bl.request_alignment)) {
2259ac9d00bfSVladimir Sementsov-Ogievskiy /*
2260ac9d00bfSVladimir Sementsov-Ogievskiy * Aligning zero request is nonsense. Even if driver has special meaning
2261ac9d00bfSVladimir Sementsov-Ogievskiy * of zero-length (like qcow2_co_pwritev_compressed_part), we can't pass
2262ac9d00bfSVladimir Sementsov-Ogievskiy * it to driver due to request_alignment.
2263ac9d00bfSVladimir Sementsov-Ogievskiy *
2264ac9d00bfSVladimir Sementsov-Ogievskiy * Still, no reason to return an error if someone do unaligned
2265ac9d00bfSVladimir Sementsov-Ogievskiy * zero-length write occasionally.
2266ac9d00bfSVladimir Sementsov-Ogievskiy */
2267ac9d00bfSVladimir Sementsov-Ogievskiy return 0;
2268ac9d00bfSVladimir Sementsov-Ogievskiy }
2269ac9d00bfSVladimir Sementsov-Ogievskiy
2270f0deecffSVladimir Sementsov-Ogievskiy if (!(flags & BDRV_REQ_ZERO_WRITE)) {
227161007b31SStefan Hajnoczi /*
2272f0deecffSVladimir Sementsov-Ogievskiy * Pad request for following read-modify-write cycle.
2273f0deecffSVladimir Sementsov-Ogievskiy * bdrv_co_do_zero_pwritev() does aligning by itself, so, we do
2274f0deecffSVladimir Sementsov-Ogievskiy * alignment only if there is no ZERO flag.
227561007b31SStefan Hajnoczi */
227618743311SHanna Czenczek ret = bdrv_pad_request(bs, &qiov, &qiov_offset, &offset, &bytes, true,
227718743311SHanna Czenczek &pad, &padded, &flags);
227898ca4549SVladimir Sementsov-Ogievskiy if (ret < 0) {
227998ca4549SVladimir Sementsov-Ogievskiy return ret;
228098ca4549SVladimir Sementsov-Ogievskiy }
2281f0deecffSVladimir Sementsov-Ogievskiy }
2282f0deecffSVladimir Sementsov-Ogievskiy
2283f0deecffSVladimir Sementsov-Ogievskiy bdrv_inc_in_flight(bs);
2284ebde595cSFam Zheng tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
228561007b31SStefan Hajnoczi
228618a59f03SAnton Nefedov if (flags & BDRV_REQ_ZERO_WRITE) {
2287f0deecffSVladimir Sementsov-Ogievskiy assert(!padded);
228885c97ca7SKevin Wolf ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
22899eeb6dd1SFam Zheng goto out;
22909eeb6dd1SFam Zheng }
22919eeb6dd1SFam Zheng
2292f0deecffSVladimir Sementsov-Ogievskiy if (padded) {
2293f0deecffSVladimir Sementsov-Ogievskiy /*
2294f0deecffSVladimir Sementsov-Ogievskiy * Request was unaligned to request_alignment and therefore
2295f0deecffSVladimir Sementsov-Ogievskiy * padded. We are going to do read-modify-write, and must
2296f0deecffSVladimir Sementsov-Ogievskiy * serialize the request to prevent interactions of the
2297f0deecffSVladimir Sementsov-Ogievskiy * widened region with other transactions.
2298f0deecffSVladimir Sementsov-Ogievskiy */
229945e62b46SVladimir Sementsov-Ogievskiy assert(!(flags & BDRV_REQ_NO_WAIT));
23008ac5aab2SVladimir Sementsov-Ogievskiy bdrv_make_request_serialising(&req, align);
23017a3f542fSVladimir Sementsov-Ogievskiy bdrv_padding_rmw_read(child, &req, &pad, false);
230261007b31SStefan Hajnoczi }
230361007b31SStefan Hajnoczi
230485c97ca7SKevin Wolf ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
23051acc3466SVladimir Sementsov-Ogievskiy qiov, qiov_offset, flags);
230661007b31SStefan Hajnoczi
230718743311SHanna Czenczek bdrv_padding_finalize(&pad);
230861007b31SStefan Hajnoczi
23099eeb6dd1SFam Zheng out:
23109eeb6dd1SFam Zheng tracked_request_end(&req);
231199723548SPaolo Bonzini bdrv_dec_in_flight(bs);
23127a3f542fSVladimir Sementsov-Ogievskiy
231361007b31SStefan Hajnoczi return ret;
231461007b31SStefan Hajnoczi }
231561007b31SStefan Hajnoczi
bdrv_co_pwrite_zeroes(BdrvChild * child,int64_t offset,int64_t bytes,BdrvRequestFlags flags)2316a03ef88fSKevin Wolf int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
2317e9e52efdSVladimir Sementsov-Ogievskiy int64_t bytes, BdrvRequestFlags flags)
231861007b31SStefan Hajnoczi {
2319384a48fbSEmanuele Giuseppe Esposito IO_CODE();
2320f5a5ca79SManos Pitsidianakis trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
2321abaf8b75SKevin Wolf assert_bdrv_graph_readable();
232261007b31SStefan Hajnoczi
2323f5a5ca79SManos Pitsidianakis return bdrv_co_pwritev(child, offset, bytes, NULL,
232461007b31SStefan Hajnoczi BDRV_REQ_ZERO_WRITE | flags);
232561007b31SStefan Hajnoczi }
232661007b31SStefan Hajnoczi
23274085f5c7SJohn Snow /*
23284085f5c7SJohn Snow * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
23294085f5c7SJohn Snow */
bdrv_flush_all(void)23304085f5c7SJohn Snow int bdrv_flush_all(void)
23314085f5c7SJohn Snow {
23324085f5c7SJohn Snow BdrvNextIterator it;
23334085f5c7SJohn Snow BlockDriverState *bs = NULL;
23344085f5c7SJohn Snow int result = 0;
23354085f5c7SJohn Snow
2336f791bf7fSEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
23372b3912f1SKevin Wolf GRAPH_RDLOCK_GUARD_MAINLOOP();
2338f791bf7fSEmanuele Giuseppe Esposito
2339c8aa7895SPavel Dovgalyuk /*
2340c8aa7895SPavel Dovgalyuk * bdrv queue is managed by record/replay,
2341c8aa7895SPavel Dovgalyuk * creating new flush request for stopping
2342c8aa7895SPavel Dovgalyuk * the VM may break the determinism
2343c8aa7895SPavel Dovgalyuk */
2344c8aa7895SPavel Dovgalyuk if (replay_events_enabled()) {
2345c8aa7895SPavel Dovgalyuk return result;
2346c8aa7895SPavel Dovgalyuk }
2347c8aa7895SPavel Dovgalyuk
23484085f5c7SJohn Snow for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
2349b49f4755SStefan Hajnoczi int ret = bdrv_flush(bs);
23504085f5c7SJohn Snow if (ret < 0 && !result) {
23514085f5c7SJohn Snow result = ret;
23524085f5c7SJohn Snow }
23534085f5c7SJohn Snow }
23544085f5c7SJohn Snow
23554085f5c7SJohn Snow return result;
23564085f5c7SJohn Snow }
23574085f5c7SJohn Snow
235861007b31SStefan Hajnoczi /*
235961007b31SStefan Hajnoczi * Returns the allocation status of the specified sectors.
236061007b31SStefan Hajnoczi * Drivers not implementing the functionality are assumed to not support
236161007b31SStefan Hajnoczi * backing files, hence all their sectors are reported as allocated.
236261007b31SStefan Hajnoczi *
236386a3d5c6SEric Blake * If 'want_zero' is true, the caller is querying for mapping
236486a3d5c6SEric Blake * purposes, with a focus on valid BDRV_BLOCK_OFFSET_VALID, _DATA, and
236586a3d5c6SEric Blake * _ZERO where possible; otherwise, the result favors larger 'pnum',
236686a3d5c6SEric Blake * with a focus on accurate BDRV_BLOCK_ALLOCATED.
2367c9ce8c4dSEric Blake *
23682e8bc787SEric Blake * If 'offset' is beyond the end of the disk image the return value is
2369fb0d8654SEric Blake * BDRV_BLOCK_EOF and 'pnum' is set to 0.
237061007b31SStefan Hajnoczi *
23712e8bc787SEric Blake * 'bytes' is the max value 'pnum' should be set to. If bytes goes
2372fb0d8654SEric Blake * beyond the end of the disk image it will be clamped; if 'pnum' is set to
2373fb0d8654SEric Blake * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
237467a0fd2aSFam Zheng *
23752e8bc787SEric Blake * 'pnum' is set to the number of bytes (including and immediately
23762e8bc787SEric Blake * following the specified offset) that are easily known to be in the
23772e8bc787SEric Blake * same allocated/unallocated state. Note that a second call starting
23782e8bc787SEric Blake * at the original offset plus returned pnum may have the same status.
23792e8bc787SEric Blake * The returned value is non-zero on success except at end-of-file.
23802e8bc787SEric Blake *
23812e8bc787SEric Blake * Returns negative errno on failure. Otherwise, if the
23822e8bc787SEric Blake * BDRV_BLOCK_OFFSET_VALID bit is set, 'map' and 'file' (if non-NULL) are
23832e8bc787SEric Blake * set to the host mapping and BDS corresponding to the guest offset.
238461007b31SStefan Hajnoczi */
23857ff9579eSKevin Wolf static int coroutine_fn GRAPH_RDLOCK
bdrv_co_do_block_status(BlockDriverState * bs,bool want_zero,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)2386b170e929SPaolo Bonzini bdrv_co_do_block_status(BlockDriverState *bs, bool want_zero,
23872e8bc787SEric Blake int64_t offset, int64_t bytes,
23887ff9579eSKevin Wolf int64_t *pnum, int64_t *map, BlockDriverState **file)
238961007b31SStefan Hajnoczi {
23902e8bc787SEric Blake int64_t total_size;
23912e8bc787SEric Blake int64_t n; /* bytes */
2392efa6e2edSEric Blake int ret;
23932e8bc787SEric Blake int64_t local_map = 0;
2394298a1665SEric Blake BlockDriverState *local_file = NULL;
2395efa6e2edSEric Blake int64_t aligned_offset, aligned_bytes;
2396efa6e2edSEric Blake uint32_t align;
2397549ec0d9SMax Reitz bool has_filtered_child;
239861007b31SStefan Hajnoczi
2399298a1665SEric Blake assert(pnum);
24007ff9579eSKevin Wolf assert_bdrv_graph_readable();
2401298a1665SEric Blake *pnum = 0;
24020af02bd1SPaolo Bonzini total_size = bdrv_co_getlength(bs);
24032e8bc787SEric Blake if (total_size < 0) {
24042e8bc787SEric Blake ret = total_size;
2405298a1665SEric Blake goto early_out;
240661007b31SStefan Hajnoczi }
240761007b31SStefan Hajnoczi
24082e8bc787SEric Blake if (offset >= total_size) {
2409298a1665SEric Blake ret = BDRV_BLOCK_EOF;
2410298a1665SEric Blake goto early_out;
241161007b31SStefan Hajnoczi }
24122e8bc787SEric Blake if (!bytes) {
2413298a1665SEric Blake ret = 0;
2414298a1665SEric Blake goto early_out;
24159cdcfd9fSEric Blake }
241661007b31SStefan Hajnoczi
24172e8bc787SEric Blake n = total_size - offset;
24182e8bc787SEric Blake if (n < bytes) {
24192e8bc787SEric Blake bytes = n;
242061007b31SStefan Hajnoczi }
242161007b31SStefan Hajnoczi
24220af02bd1SPaolo Bonzini /* Must be non-NULL or bdrv_co_getlength() would have failed */
2423d470ad42SMax Reitz assert(bs->drv);
2424549ec0d9SMax Reitz has_filtered_child = bdrv_filter_child(bs);
2425549ec0d9SMax Reitz if (!bs->drv->bdrv_co_block_status && !has_filtered_child) {
24262e8bc787SEric Blake *pnum = bytes;
242761007b31SStefan Hajnoczi ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
24282e8bc787SEric Blake if (offset + bytes == total_size) {
2429fb0d8654SEric Blake ret |= BDRV_BLOCK_EOF;
2430fb0d8654SEric Blake }
243161007b31SStefan Hajnoczi if (bs->drv->protocol_name) {
24322e8bc787SEric Blake ret |= BDRV_BLOCK_OFFSET_VALID;
24332e8bc787SEric Blake local_map = offset;
2434298a1665SEric Blake local_file = bs;
243561007b31SStefan Hajnoczi }
2436298a1665SEric Blake goto early_out;
243761007b31SStefan Hajnoczi }
243861007b31SStefan Hajnoczi
243999723548SPaolo Bonzini bdrv_inc_in_flight(bs);
2440efa6e2edSEric Blake
2441efa6e2edSEric Blake /* Round out to request_alignment boundaries */
244286a3d5c6SEric Blake align = bs->bl.request_alignment;
2443efa6e2edSEric Blake aligned_offset = QEMU_ALIGN_DOWN(offset, align);
2444efa6e2edSEric Blake aligned_bytes = ROUND_UP(offset + bytes, align) - aligned_offset;
2445efa6e2edSEric Blake
2446549ec0d9SMax Reitz if (bs->drv->bdrv_co_block_status) {
24470bc329fbSHanna Reitz /*
24480bc329fbSHanna Reitz * Use the block-status cache only for protocol nodes: Format
24490bc329fbSHanna Reitz * drivers are generally quick to inquire the status, but protocol
24500bc329fbSHanna Reitz * drivers often need to get information from outside of qemu, so
24510bc329fbSHanna Reitz * we do not have control over the actual implementation. There
24520bc329fbSHanna Reitz * have been cases where inquiring the status took an unreasonably
24530bc329fbSHanna Reitz * long time, and we can do nothing in qemu to fix it.
24540bc329fbSHanna Reitz * This is especially problematic for images with large data areas,
24550bc329fbSHanna Reitz * because finding the few holes in them and giving them special
24560bc329fbSHanna Reitz * treatment does not gain much performance. Therefore, we try to
24570bc329fbSHanna Reitz * cache the last-identified data region.
24580bc329fbSHanna Reitz *
24590bc329fbSHanna Reitz * Second, limiting ourselves to protocol nodes allows us to assume
24600bc329fbSHanna Reitz * the block status for data regions to be DATA | OFFSET_VALID, and
24610bc329fbSHanna Reitz * that the host offset is the same as the guest offset.
24620bc329fbSHanna Reitz *
24630bc329fbSHanna Reitz * Note that it is possible that external writers zero parts of
24640bc329fbSHanna Reitz * the cached regions without the cache being invalidated, and so
24650bc329fbSHanna Reitz * we may report zeroes as data. This is not catastrophic,
24660bc329fbSHanna Reitz * however, because reporting zeroes as data is fine.
24670bc329fbSHanna Reitz */
24680bc329fbSHanna Reitz if (QLIST_EMPTY(&bs->children) &&
24690bc329fbSHanna Reitz bdrv_bsc_is_data(bs, aligned_offset, pnum))
24700bc329fbSHanna Reitz {
24710bc329fbSHanna Reitz ret = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID;
24720bc329fbSHanna Reitz local_file = bs;
24730bc329fbSHanna Reitz local_map = aligned_offset;
24740bc329fbSHanna Reitz } else {
247586a3d5c6SEric Blake ret = bs->drv->bdrv_co_block_status(bs, want_zero, aligned_offset,
247686a3d5c6SEric Blake aligned_bytes, pnum, &local_map,
247786a3d5c6SEric Blake &local_file);
24780bc329fbSHanna Reitz
24790bc329fbSHanna Reitz /*
24800bc329fbSHanna Reitz * Note that checking QLIST_EMPTY(&bs->children) is also done when
24810bc329fbSHanna Reitz * the cache is queried above. Technically, we do not need to check
24820bc329fbSHanna Reitz * it here; the worst that can happen is that we fill the cache for
24830bc329fbSHanna Reitz * non-protocol nodes, and then it is never used. However, filling
24840bc329fbSHanna Reitz * the cache requires an RCU update, so double check here to avoid
24850bc329fbSHanna Reitz * such an update if possible.
2486113b727cSHanna Reitz *
2487113b727cSHanna Reitz * Check want_zero, because we only want to update the cache when we
2488113b727cSHanna Reitz * have accurate information about what is zero and what is data.
24890bc329fbSHanna Reitz */
2490113b727cSHanna Reitz if (want_zero &&
2491113b727cSHanna Reitz ret == (BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID) &&
24920bc329fbSHanna Reitz QLIST_EMPTY(&bs->children))
24930bc329fbSHanna Reitz {
24940bc329fbSHanna Reitz /*
24950bc329fbSHanna Reitz * When a protocol driver reports BLOCK_OFFSET_VALID, the
24960bc329fbSHanna Reitz * returned local_map value must be the same as the offset we
24970bc329fbSHanna Reitz * have passed (aligned_offset), and local_bs must be the node
24980bc329fbSHanna Reitz * itself.
24990bc329fbSHanna Reitz * Assert this, because we follow this rule when reading from
25000bc329fbSHanna Reitz * the cache (see the `local_file = bs` and
25010bc329fbSHanna Reitz * `local_map = aligned_offset` assignments above), and the
25020bc329fbSHanna Reitz * result the cache delivers must be the same as the driver
25030bc329fbSHanna Reitz * would deliver.
25040bc329fbSHanna Reitz */
25050bc329fbSHanna Reitz assert(local_file == bs);
25060bc329fbSHanna Reitz assert(local_map == aligned_offset);
25070bc329fbSHanna Reitz bdrv_bsc_fill(bs, aligned_offset, *pnum);
25080bc329fbSHanna Reitz }
25090bc329fbSHanna Reitz }
2510549ec0d9SMax Reitz } else {
2511549ec0d9SMax Reitz /* Default code for filters */
2512549ec0d9SMax Reitz
2513549ec0d9SMax Reitz local_file = bdrv_filter_bs(bs);
2514549ec0d9SMax Reitz assert(local_file);
2515549ec0d9SMax Reitz
2516549ec0d9SMax Reitz *pnum = aligned_bytes;
2517549ec0d9SMax Reitz local_map = aligned_offset;
2518549ec0d9SMax Reitz ret = BDRV_BLOCK_RAW | BDRV_BLOCK_OFFSET_VALID;
2519549ec0d9SMax Reitz }
252086a3d5c6SEric Blake if (ret < 0) {
252186a3d5c6SEric Blake *pnum = 0;
252286a3d5c6SEric Blake goto out;
252386a3d5c6SEric Blake }
2524efa6e2edSEric Blake
2525efa6e2edSEric Blake /*
2526636cb512SEric Blake * The driver's result must be a non-zero multiple of request_alignment.
2527efa6e2edSEric Blake * Clamp pnum and adjust map to original request.
2528efa6e2edSEric Blake */
2529636cb512SEric Blake assert(*pnum && QEMU_IS_ALIGNED(*pnum, align) &&
2530636cb512SEric Blake align > offset - aligned_offset);
253169f47505SVladimir Sementsov-Ogievskiy if (ret & BDRV_BLOCK_RECURSE) {
253269f47505SVladimir Sementsov-Ogievskiy assert(ret & BDRV_BLOCK_DATA);
253369f47505SVladimir Sementsov-Ogievskiy assert(ret & BDRV_BLOCK_OFFSET_VALID);
253469f47505SVladimir Sementsov-Ogievskiy assert(!(ret & BDRV_BLOCK_ZERO));
253569f47505SVladimir Sementsov-Ogievskiy }
253669f47505SVladimir Sementsov-Ogievskiy
2537efa6e2edSEric Blake *pnum -= offset - aligned_offset;
2538efa6e2edSEric Blake if (*pnum > bytes) {
2539efa6e2edSEric Blake *pnum = bytes;
2540efa6e2edSEric Blake }
2541efa6e2edSEric Blake if (ret & BDRV_BLOCK_OFFSET_VALID) {
2542efa6e2edSEric Blake local_map += offset - aligned_offset;
2543efa6e2edSEric Blake }
254461007b31SStefan Hajnoczi
254561007b31SStefan Hajnoczi if (ret & BDRV_BLOCK_RAW) {
2546298a1665SEric Blake assert(ret & BDRV_BLOCK_OFFSET_VALID && local_file);
2547b170e929SPaolo Bonzini ret = bdrv_co_do_block_status(local_file, want_zero, local_map,
25482e8bc787SEric Blake *pnum, pnum, &local_map, &local_file);
254999723548SPaolo Bonzini goto out;
255061007b31SStefan Hajnoczi }
255161007b31SStefan Hajnoczi
255261007b31SStefan Hajnoczi if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
255361007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ALLOCATED;
2554d40f4a56SAlberto Garcia } else if (bs->drv->supports_backing) {
2555cb850315SMax Reitz BlockDriverState *cow_bs = bdrv_cow_bs(bs);
2556cb850315SMax Reitz
2557d40f4a56SAlberto Garcia if (!cow_bs) {
2558d40f4a56SAlberto Garcia ret |= BDRV_BLOCK_ZERO;
2559d40f4a56SAlberto Garcia } else if (want_zero) {
25600af02bd1SPaolo Bonzini int64_t size2 = bdrv_co_getlength(cow_bs);
2561c9ce8c4dSEric Blake
25622e8bc787SEric Blake if (size2 >= 0 && offset >= size2) {
256361007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO;
256461007b31SStefan Hajnoczi }
25657b1efe99SVladimir Sementsov-Ogievskiy }
256661007b31SStefan Hajnoczi }
256761007b31SStefan Hajnoczi
256869f47505SVladimir Sementsov-Ogievskiy if (want_zero && ret & BDRV_BLOCK_RECURSE &&
256969f47505SVladimir Sementsov-Ogievskiy local_file && local_file != bs &&
257061007b31SStefan Hajnoczi (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
257161007b31SStefan Hajnoczi (ret & BDRV_BLOCK_OFFSET_VALID)) {
25722e8bc787SEric Blake int64_t file_pnum;
25732e8bc787SEric Blake int ret2;
257461007b31SStefan Hajnoczi
2575b170e929SPaolo Bonzini ret2 = bdrv_co_do_block_status(local_file, want_zero, local_map,
25762e8bc787SEric Blake *pnum, &file_pnum, NULL, NULL);
257761007b31SStefan Hajnoczi if (ret2 >= 0) {
257861007b31SStefan Hajnoczi /* Ignore errors. This is just providing extra information, it
257961007b31SStefan Hajnoczi * is useful but not necessary.
258061007b31SStefan Hajnoczi */
2581c61e684eSEric Blake if (ret2 & BDRV_BLOCK_EOF &&
2582c61e684eSEric Blake (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
2583c61e684eSEric Blake /*
2584c61e684eSEric Blake * It is valid for the format block driver to read
2585c61e684eSEric Blake * beyond the end of the underlying file's current
2586c61e684eSEric Blake * size; such areas read as zero.
2587c61e684eSEric Blake */
258861007b31SStefan Hajnoczi ret |= BDRV_BLOCK_ZERO;
258961007b31SStefan Hajnoczi } else {
259061007b31SStefan Hajnoczi /* Limit request to the range reported by the protocol driver */
259161007b31SStefan Hajnoczi *pnum = file_pnum;
259261007b31SStefan Hajnoczi ret |= (ret2 & BDRV_BLOCK_ZERO);
259361007b31SStefan Hajnoczi }
259461007b31SStefan Hajnoczi }
25958a9be799SFiona Ebner
25968a9be799SFiona Ebner /*
25978a9be799SFiona Ebner * Now that the recursive search was done, clear the flag. Otherwise,
25988a9be799SFiona Ebner * with more complicated block graphs like snapshot-access ->
25998a9be799SFiona Ebner * copy-before-write -> qcow2, where the return value will be propagated
26008a9be799SFiona Ebner * further up to a parent bdrv_co_do_block_status() call, both the
26018a9be799SFiona Ebner * BDRV_BLOCK_RECURSE and BDRV_BLOCK_ZERO flags would be set, which is
26028a9be799SFiona Ebner * not allowed.
26038a9be799SFiona Ebner */
26048a9be799SFiona Ebner ret &= ~BDRV_BLOCK_RECURSE;
260561007b31SStefan Hajnoczi }
260661007b31SStefan Hajnoczi
260799723548SPaolo Bonzini out:
260899723548SPaolo Bonzini bdrv_dec_in_flight(bs);
26092e8bc787SEric Blake if (ret >= 0 && offset + *pnum == total_size) {
2610fb0d8654SEric Blake ret |= BDRV_BLOCK_EOF;
2611fb0d8654SEric Blake }
2612298a1665SEric Blake early_out:
2613298a1665SEric Blake if (file) {
2614298a1665SEric Blake *file = local_file;
2615298a1665SEric Blake }
26162e8bc787SEric Blake if (map) {
26172e8bc787SEric Blake *map = local_map;
26182e8bc787SEric Blake }
261961007b31SStefan Hajnoczi return ret;
262061007b31SStefan Hajnoczi }
262161007b31SStefan Hajnoczi
262221c2283eSVladimir Sementsov-Ogievskiy int coroutine_fn
bdrv_co_common_block_status_above(BlockDriverState * bs,BlockDriverState * base,bool include_base,bool want_zero,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file,int * depth)2623f9e694cbSVladimir Sementsov-Ogievskiy bdrv_co_common_block_status_above(BlockDriverState *bs,
2624ba3f0e25SFam Zheng BlockDriverState *base,
26253555a432SVladimir Sementsov-Ogievskiy bool include_base,
2626c9ce8c4dSEric Blake bool want_zero,
26275b648c67SEric Blake int64_t offset,
26285b648c67SEric Blake int64_t bytes,
26295b648c67SEric Blake int64_t *pnum,
26305b648c67SEric Blake int64_t *map,
2631a92b1b06SEric Blake BlockDriverState **file,
2632a92b1b06SEric Blake int *depth)
2633ba3f0e25SFam Zheng {
263467c095c8SVladimir Sementsov-Ogievskiy int ret;
2635ba3f0e25SFam Zheng BlockDriverState *p;
263667c095c8SVladimir Sementsov-Ogievskiy int64_t eof = 0;
2637a92b1b06SEric Blake int dummy;
26381581a70dSEmanuele Giuseppe Esposito IO_CODE();
2639ba3f0e25SFam Zheng
26403555a432SVladimir Sementsov-Ogievskiy assert(!include_base || base); /* Can't include NULL base */
26417ff9579eSKevin Wolf assert_bdrv_graph_readable();
264267c095c8SVladimir Sementsov-Ogievskiy
2643a92b1b06SEric Blake if (!depth) {
2644a92b1b06SEric Blake depth = &dummy;
2645a92b1b06SEric Blake }
2646a92b1b06SEric Blake *depth = 0;
2647a92b1b06SEric Blake
2648624f27bbSVladimir Sementsov-Ogievskiy if (!include_base && bs == base) {
2649624f27bbSVladimir Sementsov-Ogievskiy *pnum = bytes;
2650624f27bbSVladimir Sementsov-Ogievskiy return 0;
2651624f27bbSVladimir Sementsov-Ogievskiy }
2652624f27bbSVladimir Sementsov-Ogievskiy
2653b170e929SPaolo Bonzini ret = bdrv_co_do_block_status(bs, want_zero, offset, bytes, pnum,
2654b170e929SPaolo Bonzini map, file);
2655a92b1b06SEric Blake ++*depth;
26563555a432SVladimir Sementsov-Ogievskiy if (ret < 0 || *pnum == 0 || ret & BDRV_BLOCK_ALLOCATED || bs == base) {
265767c095c8SVladimir Sementsov-Ogievskiy return ret;
265867c095c8SVladimir Sementsov-Ogievskiy }
265967c095c8SVladimir Sementsov-Ogievskiy
266067c095c8SVladimir Sementsov-Ogievskiy if (ret & BDRV_BLOCK_EOF) {
266167c095c8SVladimir Sementsov-Ogievskiy eof = offset + *pnum;
266267c095c8SVladimir Sementsov-Ogievskiy }
266367c095c8SVladimir Sementsov-Ogievskiy
266467c095c8SVladimir Sementsov-Ogievskiy assert(*pnum <= bytes);
266567c095c8SVladimir Sementsov-Ogievskiy bytes = *pnum;
266667c095c8SVladimir Sementsov-Ogievskiy
26673555a432SVladimir Sementsov-Ogievskiy for (p = bdrv_filter_or_cow_bs(bs); include_base || p != base;
266867c095c8SVladimir Sementsov-Ogievskiy p = bdrv_filter_or_cow_bs(p))
266967c095c8SVladimir Sementsov-Ogievskiy {
2670b170e929SPaolo Bonzini ret = bdrv_co_do_block_status(p, want_zero, offset, bytes, pnum,
2671b170e929SPaolo Bonzini map, file);
2672a92b1b06SEric Blake ++*depth;
2673c61e684eSEric Blake if (ret < 0) {
267467c095c8SVladimir Sementsov-Ogievskiy return ret;
2675c61e684eSEric Blake }
267667c095c8SVladimir Sementsov-Ogievskiy if (*pnum == 0) {
2677c61e684eSEric Blake /*
267867c095c8SVladimir Sementsov-Ogievskiy * The top layer deferred to this layer, and because this layer is
267967c095c8SVladimir Sementsov-Ogievskiy * short, any zeroes that we synthesize beyond EOF behave as if they
268067c095c8SVladimir Sementsov-Ogievskiy * were allocated at this layer.
268167c095c8SVladimir Sementsov-Ogievskiy *
268267c095c8SVladimir Sementsov-Ogievskiy * We don't include BDRV_BLOCK_EOF into ret, as upper layer may be
268367c095c8SVladimir Sementsov-Ogievskiy * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
268467c095c8SVladimir Sementsov-Ogievskiy * below.
2685c61e684eSEric Blake */
268667c095c8SVladimir Sementsov-Ogievskiy assert(ret & BDRV_BLOCK_EOF);
26875b648c67SEric Blake *pnum = bytes;
268867c095c8SVladimir Sementsov-Ogievskiy if (file) {
268967c095c8SVladimir Sementsov-Ogievskiy *file = p;
2690c61e684eSEric Blake }
269167c095c8SVladimir Sementsov-Ogievskiy ret = BDRV_BLOCK_ZERO | BDRV_BLOCK_ALLOCATED;
2692ba3f0e25SFam Zheng break;
2693ba3f0e25SFam Zheng }
269467c095c8SVladimir Sementsov-Ogievskiy if (ret & BDRV_BLOCK_ALLOCATED) {
269567c095c8SVladimir Sementsov-Ogievskiy /*
269667c095c8SVladimir Sementsov-Ogievskiy * We've found the node and the status, we must break.
269767c095c8SVladimir Sementsov-Ogievskiy *
269867c095c8SVladimir Sementsov-Ogievskiy * Drop BDRV_BLOCK_EOF, as it's not for upper layer, which may be
269967c095c8SVladimir Sementsov-Ogievskiy * larger. We'll add BDRV_BLOCK_EOF if needed at function end, see
270067c095c8SVladimir Sementsov-Ogievskiy * below.
270167c095c8SVladimir Sementsov-Ogievskiy */
270267c095c8SVladimir Sementsov-Ogievskiy ret &= ~BDRV_BLOCK_EOF;
270367c095c8SVladimir Sementsov-Ogievskiy break;
2704ba3f0e25SFam Zheng }
270567c095c8SVladimir Sementsov-Ogievskiy
27063555a432SVladimir Sementsov-Ogievskiy if (p == base) {
27073555a432SVladimir Sementsov-Ogievskiy assert(include_base);
27083555a432SVladimir Sementsov-Ogievskiy break;
27093555a432SVladimir Sementsov-Ogievskiy }
27103555a432SVladimir Sementsov-Ogievskiy
271167c095c8SVladimir Sementsov-Ogievskiy /*
271267c095c8SVladimir Sementsov-Ogievskiy * OK, [offset, offset + *pnum) region is unallocated on this layer,
271367c095c8SVladimir Sementsov-Ogievskiy * let's continue the diving.
271467c095c8SVladimir Sementsov-Ogievskiy */
271567c095c8SVladimir Sementsov-Ogievskiy assert(*pnum <= bytes);
271667c095c8SVladimir Sementsov-Ogievskiy bytes = *pnum;
271767c095c8SVladimir Sementsov-Ogievskiy }
271867c095c8SVladimir Sementsov-Ogievskiy
271967c095c8SVladimir Sementsov-Ogievskiy if (offset + *pnum == eof) {
272067c095c8SVladimir Sementsov-Ogievskiy ret |= BDRV_BLOCK_EOF;
272167c095c8SVladimir Sementsov-Ogievskiy }
272267c095c8SVladimir Sementsov-Ogievskiy
2723ba3f0e25SFam Zheng return ret;
2724ba3f0e25SFam Zheng }
2725ba3f0e25SFam Zheng
bdrv_co_block_status_above(BlockDriverState * bs,BlockDriverState * base,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)27267b52a921SEmanuele Giuseppe Esposito int coroutine_fn bdrv_co_block_status_above(BlockDriverState *bs,
27277b52a921SEmanuele Giuseppe Esposito BlockDriverState *base,
27287b52a921SEmanuele Giuseppe Esposito int64_t offset, int64_t bytes,
27297b52a921SEmanuele Giuseppe Esposito int64_t *pnum, int64_t *map,
27307b52a921SEmanuele Giuseppe Esposito BlockDriverState **file)
27317b52a921SEmanuele Giuseppe Esposito {
27327b52a921SEmanuele Giuseppe Esposito IO_CODE();
27337b52a921SEmanuele Giuseppe Esposito return bdrv_co_common_block_status_above(bs, base, false, true, offset,
27347b52a921SEmanuele Giuseppe Esposito bytes, pnum, map, file, NULL);
27357b52a921SEmanuele Giuseppe Esposito }
27367b52a921SEmanuele Giuseppe Esposito
bdrv_co_block_status(BlockDriverState * bs,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)27371b88457eSPaolo Bonzini int coroutine_fn bdrv_co_block_status(BlockDriverState *bs, int64_t offset,
27381b88457eSPaolo Bonzini int64_t bytes, int64_t *pnum,
273931826642SEric Blake int64_t *map, BlockDriverState **file)
2740c9ce8c4dSEric Blake {
2741384a48fbSEmanuele Giuseppe Esposito IO_CODE();
27421b88457eSPaolo Bonzini return bdrv_co_block_status_above(bs, bdrv_filter_or_cow_bs(bs),
274331826642SEric Blake offset, bytes, pnum, map, file);
2744ba3f0e25SFam Zheng }
2745ba3f0e25SFam Zheng
274646cd1e8aSAlberto Garcia /*
274746cd1e8aSAlberto Garcia * Check @bs (and its backing chain) to see if the range defined
274846cd1e8aSAlberto Garcia * by @offset and @bytes is known to read as zeroes.
274946cd1e8aSAlberto Garcia * Return 1 if that is the case, 0 otherwise and -errno on error.
275046cd1e8aSAlberto Garcia * This test is meant to be fast rather than accurate so returning 0
275146cd1e8aSAlberto Garcia * does not guarantee non-zero data.
275246cd1e8aSAlberto Garcia */
bdrv_co_is_zero_fast(BlockDriverState * bs,int64_t offset,int64_t bytes)275346cd1e8aSAlberto Garcia int coroutine_fn bdrv_co_is_zero_fast(BlockDriverState *bs, int64_t offset,
275446cd1e8aSAlberto Garcia int64_t bytes)
275546cd1e8aSAlberto Garcia {
275646cd1e8aSAlberto Garcia int ret;
275746cd1e8aSAlberto Garcia int64_t pnum = bytes;
2758384a48fbSEmanuele Giuseppe Esposito IO_CODE();
275946cd1e8aSAlberto Garcia
276046cd1e8aSAlberto Garcia if (!bytes) {
276146cd1e8aSAlberto Garcia return 1;
276246cd1e8aSAlberto Garcia }
276346cd1e8aSAlberto Garcia
2764ce47ff20SAlberto Faria ret = bdrv_co_common_block_status_above(bs, NULL, false, false, offset,
2765a92b1b06SEric Blake bytes, &pnum, NULL, NULL, NULL);
276646cd1e8aSAlberto Garcia
276746cd1e8aSAlberto Garcia if (ret < 0) {
276846cd1e8aSAlberto Garcia return ret;
276946cd1e8aSAlberto Garcia }
277046cd1e8aSAlberto Garcia
277146cd1e8aSAlberto Garcia return (pnum == bytes) && (ret & BDRV_BLOCK_ZERO);
277246cd1e8aSAlberto Garcia }
277346cd1e8aSAlberto Garcia
bdrv_co_is_allocated(BlockDriverState * bs,int64_t offset,int64_t bytes,int64_t * pnum)27747b52a921SEmanuele Giuseppe Esposito int coroutine_fn bdrv_co_is_allocated(BlockDriverState *bs, int64_t offset,
27757b52a921SEmanuele Giuseppe Esposito int64_t bytes, int64_t *pnum)
27767b52a921SEmanuele Giuseppe Esposito {
27777b52a921SEmanuele Giuseppe Esposito int ret;
27787b52a921SEmanuele Giuseppe Esposito int64_t dummy;
27797b52a921SEmanuele Giuseppe Esposito IO_CODE();
27807b52a921SEmanuele Giuseppe Esposito
27817b52a921SEmanuele Giuseppe Esposito ret = bdrv_co_common_block_status_above(bs, bs, true, false, offset,
27827b52a921SEmanuele Giuseppe Esposito bytes, pnum ? pnum : &dummy, NULL,
27837b52a921SEmanuele Giuseppe Esposito NULL, NULL);
27847b52a921SEmanuele Giuseppe Esposito if (ret < 0) {
27857b52a921SEmanuele Giuseppe Esposito return ret;
27867b52a921SEmanuele Giuseppe Esposito }
27877b52a921SEmanuele Giuseppe Esposito return !!(ret & BDRV_BLOCK_ALLOCATED);
27887b52a921SEmanuele Giuseppe Esposito }
27897b52a921SEmanuele Giuseppe Esposito
279061007b31SStefan Hajnoczi /*
279161007b31SStefan Hajnoczi * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
279261007b31SStefan Hajnoczi *
2793a92b1b06SEric Blake * Return a positive depth if (a prefix of) the given range is allocated
2794a92b1b06SEric Blake * in any image between BASE and TOP (BASE is only included if include_base
2795a92b1b06SEric Blake * is set). Depth 1 is TOP, 2 is the first backing layer, and so forth.
2796170d3bd3SAndrey Shinkevich * BASE can be NULL to check if the given offset is allocated in any
2797170d3bd3SAndrey Shinkevich * image of the chain. Return 0 otherwise, or negative errno on
2798170d3bd3SAndrey Shinkevich * failure.
279961007b31SStefan Hajnoczi *
280051b0a488SEric Blake * 'pnum' is set to the number of bytes (including and immediately
280151b0a488SEric Blake * following the specified offset) that are known to be in the same
280251b0a488SEric Blake * allocated/unallocated state. Note that a subsequent call starting
280351b0a488SEric Blake * at 'offset + *pnum' may return the same allocation status (in other
280451b0a488SEric Blake * words, the result is not necessarily the maximum possible range);
280551b0a488SEric Blake * but 'pnum' will only be 0 when end of file is reached.
280661007b31SStefan Hajnoczi */
bdrv_co_is_allocated_above(BlockDriverState * bs,BlockDriverState * base,bool include_base,int64_t offset,int64_t bytes,int64_t * pnum)2807578ffa9fSPaolo Bonzini int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *bs,
280861007b31SStefan Hajnoczi BlockDriverState *base,
2809170d3bd3SAndrey Shinkevich bool include_base, int64_t offset,
2810170d3bd3SAndrey Shinkevich int64_t bytes, int64_t *pnum)
281161007b31SStefan Hajnoczi {
2812a92b1b06SEric Blake int depth;
28137b52a921SEmanuele Giuseppe Esposito int ret;
28147b52a921SEmanuele Giuseppe Esposito IO_CODE();
28157b52a921SEmanuele Giuseppe Esposito
2816578ffa9fSPaolo Bonzini ret = bdrv_co_common_block_status_above(bs, base, include_base, false,
2817a92b1b06SEric Blake offset, bytes, pnum, NULL, NULL,
2818a92b1b06SEric Blake &depth);
281961007b31SStefan Hajnoczi if (ret < 0) {
282061007b31SStefan Hajnoczi return ret;
2821d6a644bbSEric Blake }
282261007b31SStefan Hajnoczi
2823a92b1b06SEric Blake if (ret & BDRV_BLOCK_ALLOCATED) {
2824a92b1b06SEric Blake return depth;
2825a92b1b06SEric Blake }
2826a92b1b06SEric Blake return 0;
282761007b31SStefan Hajnoczi }
282861007b31SStefan Hajnoczi
282921c2283eSVladimir Sementsov-Ogievskiy int coroutine_fn
bdrv_co_readv_vmstate(BlockDriverState * bs,QEMUIOVector * qiov,int64_t pos)2830b33b354fSVladimir Sementsov-Ogievskiy bdrv_co_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
28311a8ae822SKevin Wolf {
28321a8ae822SKevin Wolf BlockDriver *drv = bs->drv;
2833c4db2e25SMax Reitz BlockDriverState *child_bs = bdrv_primary_bs(bs);
2834b984b296SVladimir Sementsov-Ogievskiy int ret;
28351581a70dSEmanuele Giuseppe Esposito IO_CODE();
28361b3ff9feSKevin Wolf assert_bdrv_graph_readable();
2837b984b296SVladimir Sementsov-Ogievskiy
2838b984b296SVladimir Sementsov-Ogievskiy ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2839b984b296SVladimir Sementsov-Ogievskiy if (ret < 0) {
2840b984b296SVladimir Sementsov-Ogievskiy return ret;
2841b984b296SVladimir Sementsov-Ogievskiy }
2842dc88a467SStefan Hajnoczi
2843b33b354fSVladimir Sementsov-Ogievskiy if (!drv) {
2844b33b354fSVladimir Sementsov-Ogievskiy return -ENOMEDIUM;
2845b33b354fSVladimir Sementsov-Ogievskiy }
2846b33b354fSVladimir Sementsov-Ogievskiy
2847dc88a467SStefan Hajnoczi bdrv_inc_in_flight(bs);
28481a8ae822SKevin Wolf
2849ca5e2ad9SEmanuele Giuseppe Esposito if (drv->bdrv_co_load_vmstate) {
2850ca5e2ad9SEmanuele Giuseppe Esposito ret = drv->bdrv_co_load_vmstate(bs, qiov, pos);
2851c4db2e25SMax Reitz } else if (child_bs) {
2852b33b354fSVladimir Sementsov-Ogievskiy ret = bdrv_co_readv_vmstate(child_bs, qiov, pos);
2853b984b296SVladimir Sementsov-Ogievskiy } else {
2854b984b296SVladimir Sementsov-Ogievskiy ret = -ENOTSUP;
28551a8ae822SKevin Wolf }
28561a8ae822SKevin Wolf
2857dc88a467SStefan Hajnoczi bdrv_dec_in_flight(bs);
2858b33b354fSVladimir Sementsov-Ogievskiy
2859b33b354fSVladimir Sementsov-Ogievskiy return ret;
2860b33b354fSVladimir Sementsov-Ogievskiy }
2861b33b354fSVladimir Sementsov-Ogievskiy
2862b33b354fSVladimir Sementsov-Ogievskiy int coroutine_fn
bdrv_co_writev_vmstate(BlockDriverState * bs,QEMUIOVector * qiov,int64_t pos)2863b33b354fSVladimir Sementsov-Ogievskiy bdrv_co_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2864b33b354fSVladimir Sementsov-Ogievskiy {
2865b33b354fSVladimir Sementsov-Ogievskiy BlockDriver *drv = bs->drv;
2866b33b354fSVladimir Sementsov-Ogievskiy BlockDriverState *child_bs = bdrv_primary_bs(bs);
2867b984b296SVladimir Sementsov-Ogievskiy int ret;
28681581a70dSEmanuele Giuseppe Esposito IO_CODE();
28691b3ff9feSKevin Wolf assert_bdrv_graph_readable();
2870b984b296SVladimir Sementsov-Ogievskiy
2871b984b296SVladimir Sementsov-Ogievskiy ret = bdrv_check_qiov_request(pos, qiov->size, qiov, 0, NULL);
2872b984b296SVladimir Sementsov-Ogievskiy if (ret < 0) {
2873b984b296SVladimir Sementsov-Ogievskiy return ret;
2874b984b296SVladimir Sementsov-Ogievskiy }
2875b33b354fSVladimir Sementsov-Ogievskiy
2876b33b354fSVladimir Sementsov-Ogievskiy if (!drv) {
2877b33b354fSVladimir Sementsov-Ogievskiy return -ENOMEDIUM;
2878b33b354fSVladimir Sementsov-Ogievskiy }
2879b33b354fSVladimir Sementsov-Ogievskiy
2880b33b354fSVladimir Sementsov-Ogievskiy bdrv_inc_in_flight(bs);
2881b33b354fSVladimir Sementsov-Ogievskiy
2882ca5e2ad9SEmanuele Giuseppe Esposito if (drv->bdrv_co_save_vmstate) {
2883ca5e2ad9SEmanuele Giuseppe Esposito ret = drv->bdrv_co_save_vmstate(bs, qiov, pos);
2884b33b354fSVladimir Sementsov-Ogievskiy } else if (child_bs) {
2885b33b354fSVladimir Sementsov-Ogievskiy ret = bdrv_co_writev_vmstate(child_bs, qiov, pos);
2886b984b296SVladimir Sementsov-Ogievskiy } else {
2887b984b296SVladimir Sementsov-Ogievskiy ret = -ENOTSUP;
2888b33b354fSVladimir Sementsov-Ogievskiy }
2889b33b354fSVladimir Sementsov-Ogievskiy
2890b33b354fSVladimir Sementsov-Ogievskiy bdrv_dec_in_flight(bs);
2891b33b354fSVladimir Sementsov-Ogievskiy
2892dc88a467SStefan Hajnoczi return ret;
28931a8ae822SKevin Wolf }
28941a8ae822SKevin Wolf
bdrv_save_vmstate(BlockDriverState * bs,const uint8_t * buf,int64_t pos,int size)289561007b31SStefan Hajnoczi int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
289661007b31SStefan Hajnoczi int64_t pos, int size)
289761007b31SStefan Hajnoczi {
28980d93ed08SVladimir Sementsov-Ogievskiy QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2899b33b354fSVladimir Sementsov-Ogievskiy int ret = bdrv_writev_vmstate(bs, &qiov, pos);
2900384a48fbSEmanuele Giuseppe Esposito IO_CODE();
290161007b31SStefan Hajnoczi
2902b33b354fSVladimir Sementsov-Ogievskiy return ret < 0 ? ret : size;
290361007b31SStefan Hajnoczi }
290461007b31SStefan Hajnoczi
bdrv_load_vmstate(BlockDriverState * bs,uint8_t * buf,int64_t pos,int size)290561007b31SStefan Hajnoczi int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
290661007b31SStefan Hajnoczi int64_t pos, int size)
290761007b31SStefan Hajnoczi {
29080d93ed08SVladimir Sementsov-Ogievskiy QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, size);
2909b33b354fSVladimir Sementsov-Ogievskiy int ret = bdrv_readv_vmstate(bs, &qiov, pos);
2910384a48fbSEmanuele Giuseppe Esposito IO_CODE();
29115ddda0b8SKevin Wolf
2912b33b354fSVladimir Sementsov-Ogievskiy return ret < 0 ? ret : size;
291361007b31SStefan Hajnoczi }
291461007b31SStefan Hajnoczi
291561007b31SStefan Hajnoczi /**************************************************************/
291661007b31SStefan Hajnoczi /* async I/Os */
291761007b31SStefan Hajnoczi
2918652b0dd8SStefan Hajnoczi /**
2919652b0dd8SStefan Hajnoczi * Synchronously cancels an acb. Must be called with the BQL held and the acb
2920652b0dd8SStefan Hajnoczi * must be processed with the BQL held too (IOThreads are not allowed).
2921652b0dd8SStefan Hajnoczi *
2922652b0dd8SStefan Hajnoczi * Use bdrv_aio_cancel_async() instead when possible.
2923652b0dd8SStefan Hajnoczi */
bdrv_aio_cancel(BlockAIOCB * acb)292461007b31SStefan Hajnoczi void bdrv_aio_cancel(BlockAIOCB *acb)
292561007b31SStefan Hajnoczi {
2926652b0dd8SStefan Hajnoczi GLOBAL_STATE_CODE();
292761007b31SStefan Hajnoczi qemu_aio_ref(acb);
292861007b31SStefan Hajnoczi bdrv_aio_cancel_async(acb);
2929652b0dd8SStefan Hajnoczi AIO_WAIT_WHILE_UNLOCKED(NULL, acb->refcnt > 1);
293061007b31SStefan Hajnoczi qemu_aio_unref(acb);
293161007b31SStefan Hajnoczi }
293261007b31SStefan Hajnoczi
293361007b31SStefan Hajnoczi /* Async version of aio cancel. The caller is not blocked if the acb implements
293461007b31SStefan Hajnoczi * cancel_async, otherwise we do nothing and let the request normally complete.
293561007b31SStefan Hajnoczi * In either case the completion callback must be called. */
bdrv_aio_cancel_async(BlockAIOCB * acb)293661007b31SStefan Hajnoczi void bdrv_aio_cancel_async(BlockAIOCB *acb)
293761007b31SStefan Hajnoczi {
2938384a48fbSEmanuele Giuseppe Esposito IO_CODE();
293961007b31SStefan Hajnoczi if (acb->aiocb_info->cancel_async) {
294061007b31SStefan Hajnoczi acb->aiocb_info->cancel_async(acb);
294161007b31SStefan Hajnoczi }
294261007b31SStefan Hajnoczi }
294361007b31SStefan Hajnoczi
294461007b31SStefan Hajnoczi /**************************************************************/
294561007b31SStefan Hajnoczi /* Coroutine block device emulation */
294661007b31SStefan Hajnoczi
bdrv_co_flush(BlockDriverState * bs)294761007b31SStefan Hajnoczi int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
294861007b31SStefan Hajnoczi {
2949883833e2SMax Reitz BdrvChild *primary_child = bdrv_primary_child(bs);
2950883833e2SMax Reitz BdrvChild *child;
295149ca6259SFam Zheng int current_gen;
295249ca6259SFam Zheng int ret = 0;
2953384a48fbSEmanuele Giuseppe Esposito IO_CODE();
295461007b31SStefan Hajnoczi
295588095349SEmanuele Giuseppe Esposito assert_bdrv_graph_readable();
295699723548SPaolo Bonzini bdrv_inc_in_flight(bs);
2957c32b82afSPavel Dovgalyuk
29581e97be91SEmanuele Giuseppe Esposito if (!bdrv_co_is_inserted(bs) || bdrv_is_read_only(bs) ||
295949ca6259SFam Zheng bdrv_is_sg(bs)) {
296049ca6259SFam Zheng goto early_exit;
296149ca6259SFam Zheng }
296249ca6259SFam Zheng
2963fa9185fcSStefan Hajnoczi qemu_mutex_lock(&bs->reqs_lock);
2964d73415a3SStefan Hajnoczi current_gen = qatomic_read(&bs->write_gen);
29653ff2f67aSEvgeny Yakovlev
29663ff2f67aSEvgeny Yakovlev /* Wait until any previous flushes are completed */
296799723548SPaolo Bonzini while (bs->active_flush_req) {
29683783fa3dSPaolo Bonzini qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
29693ff2f67aSEvgeny Yakovlev }
29703ff2f67aSEvgeny Yakovlev
29713783fa3dSPaolo Bonzini /* Flushes reach this point in nondecreasing current_gen order. */
297299723548SPaolo Bonzini bs->active_flush_req = true;
2973fa9185fcSStefan Hajnoczi qemu_mutex_unlock(&bs->reqs_lock);
29743ff2f67aSEvgeny Yakovlev
2975c32b82afSPavel Dovgalyuk /* Write back all layers by calling one driver function */
2976c32b82afSPavel Dovgalyuk if (bs->drv->bdrv_co_flush) {
2977c32b82afSPavel Dovgalyuk ret = bs->drv->bdrv_co_flush(bs);
2978c32b82afSPavel Dovgalyuk goto out;
2979c32b82afSPavel Dovgalyuk }
2980c32b82afSPavel Dovgalyuk
298161007b31SStefan Hajnoczi /* Write back cached data to the OS even with cache=unsafe */
298217362398SPaolo Bonzini BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_OS);
298361007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_os) {
298461007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_os(bs);
298561007b31SStefan Hajnoczi if (ret < 0) {
2986cdb5e315SFam Zheng goto out;
298761007b31SStefan Hajnoczi }
298861007b31SStefan Hajnoczi }
298961007b31SStefan Hajnoczi
299061007b31SStefan Hajnoczi /* But don't actually force it to the disk with cache=unsafe */
299161007b31SStefan Hajnoczi if (bs->open_flags & BDRV_O_NO_FLUSH) {
2992883833e2SMax Reitz goto flush_children;
299361007b31SStefan Hajnoczi }
299461007b31SStefan Hajnoczi
29953ff2f67aSEvgeny Yakovlev /* Check if we really need to flush anything */
29963ff2f67aSEvgeny Yakovlev if (bs->flushed_gen == current_gen) {
2997883833e2SMax Reitz goto flush_children;
29983ff2f67aSEvgeny Yakovlev }
29993ff2f67aSEvgeny Yakovlev
300017362398SPaolo Bonzini BLKDBG_CO_EVENT(primary_child, BLKDBG_FLUSH_TO_DISK);
3001d470ad42SMax Reitz if (!bs->drv) {
3002d470ad42SMax Reitz /* bs->drv->bdrv_co_flush() might have ejected the BDS
3003d470ad42SMax Reitz * (even in case of apparent success) */
3004d470ad42SMax Reitz ret = -ENOMEDIUM;
3005d470ad42SMax Reitz goto out;
3006d470ad42SMax Reitz }
300761007b31SStefan Hajnoczi if (bs->drv->bdrv_co_flush_to_disk) {
300861007b31SStefan Hajnoczi ret = bs->drv->bdrv_co_flush_to_disk(bs);
300961007b31SStefan Hajnoczi } else if (bs->drv->bdrv_aio_flush) {
301061007b31SStefan Hajnoczi BlockAIOCB *acb;
301161007b31SStefan Hajnoczi CoroutineIOCompletion co = {
301261007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(),
301361007b31SStefan Hajnoczi };
301461007b31SStefan Hajnoczi
301561007b31SStefan Hajnoczi acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
301661007b31SStefan Hajnoczi if (acb == NULL) {
301761007b31SStefan Hajnoczi ret = -EIO;
301861007b31SStefan Hajnoczi } else {
301961007b31SStefan Hajnoczi qemu_coroutine_yield();
302061007b31SStefan Hajnoczi ret = co.ret;
302161007b31SStefan Hajnoczi }
302261007b31SStefan Hajnoczi } else {
302361007b31SStefan Hajnoczi /*
302461007b31SStefan Hajnoczi * Some block drivers always operate in either writethrough or unsafe
302561007b31SStefan Hajnoczi * mode and don't support bdrv_flush therefore. Usually qemu doesn't
302661007b31SStefan Hajnoczi * know how the server works (because the behaviour is hardcoded or
302761007b31SStefan Hajnoczi * depends on server-side configuration), so we can't ensure that
302861007b31SStefan Hajnoczi * everything is safe on disk. Returning an error doesn't work because
302961007b31SStefan Hajnoczi * that would break guests even if the server operates in writethrough
303061007b31SStefan Hajnoczi * mode.
303161007b31SStefan Hajnoczi *
303261007b31SStefan Hajnoczi * Let's hope the user knows what he's doing.
303361007b31SStefan Hajnoczi */
303461007b31SStefan Hajnoczi ret = 0;
303561007b31SStefan Hajnoczi }
30363ff2f67aSEvgeny Yakovlev
303761007b31SStefan Hajnoczi if (ret < 0) {
3038cdb5e315SFam Zheng goto out;
303961007b31SStefan Hajnoczi }
304061007b31SStefan Hajnoczi
304161007b31SStefan Hajnoczi /* Now flush the underlying protocol. It will also have BDRV_O_NO_FLUSH
304261007b31SStefan Hajnoczi * in the case of cache=unsafe, so there are no useless flushes.
304361007b31SStefan Hajnoczi */
3044883833e2SMax Reitz flush_children:
3045883833e2SMax Reitz ret = 0;
3046883833e2SMax Reitz QLIST_FOREACH(child, &bs->children, next) {
3047883833e2SMax Reitz if (child->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED)) {
3048883833e2SMax Reitz int this_child_ret = bdrv_co_flush(child->bs);
3049883833e2SMax Reitz if (!ret) {
3050883833e2SMax Reitz ret = this_child_ret;
3051883833e2SMax Reitz }
3052883833e2SMax Reitz }
3053883833e2SMax Reitz }
3054883833e2SMax Reitz
3055cdb5e315SFam Zheng out:
30563ff2f67aSEvgeny Yakovlev /* Notify any pending flushes that we have completed */
3057e6af1e08SKevin Wolf if (ret == 0) {
30583ff2f67aSEvgeny Yakovlev bs->flushed_gen = current_gen;
3059e6af1e08SKevin Wolf }
30603783fa3dSPaolo Bonzini
3061fa9185fcSStefan Hajnoczi qemu_mutex_lock(&bs->reqs_lock);
306299723548SPaolo Bonzini bs->active_flush_req = false;
3063156af3acSDenis V. Lunev /* Return value is ignored - it's ok if wait queue is empty */
3064156af3acSDenis V. Lunev qemu_co_queue_next(&bs->flush_queue);
3065fa9185fcSStefan Hajnoczi qemu_mutex_unlock(&bs->reqs_lock);
30663ff2f67aSEvgeny Yakovlev
306749ca6259SFam Zheng early_exit:
306899723548SPaolo Bonzini bdrv_dec_in_flight(bs);
3069cdb5e315SFam Zheng return ret;
307061007b31SStefan Hajnoczi }
307161007b31SStefan Hajnoczi
bdrv_co_pdiscard(BdrvChild * child,int64_t offset,int64_t bytes)3072d93e5726SVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_pdiscard(BdrvChild *child, int64_t offset,
3073d93e5726SVladimir Sementsov-Ogievskiy int64_t bytes)
307461007b31SStefan Hajnoczi {
3075b1066c87SFam Zheng BdrvTrackedRequest req;
307639af49c0SVladimir Sementsov-Ogievskiy int ret;
307739af49c0SVladimir Sementsov-Ogievskiy int64_t max_pdiscard;
30783482b9bcSEric Blake int head, tail, align;
30790b9fd3f4SFam Zheng BlockDriverState *bs = child->bs;
3080384a48fbSEmanuele Giuseppe Esposito IO_CODE();
30819a5a1c62SEmanuele Giuseppe Esposito assert_bdrv_graph_readable();
308261007b31SStefan Hajnoczi
30831e97be91SEmanuele Giuseppe Esposito if (!bs || !bs->drv || !bdrv_co_is_inserted(bs)) {
308461007b31SStefan Hajnoczi return -ENOMEDIUM;
308561007b31SStefan Hajnoczi }
308661007b31SStefan Hajnoczi
3087d6883bc9SVladimir Sementsov-Ogievskiy if (bdrv_has_readonly_bitmaps(bs)) {
3088d6883bc9SVladimir Sementsov-Ogievskiy return -EPERM;
3089d6883bc9SVladimir Sementsov-Ogievskiy }
3090d6883bc9SVladimir Sementsov-Ogievskiy
309169b55e03SVladimir Sementsov-Ogievskiy ret = bdrv_check_request(offset, bytes, NULL);
30928b117001SVladimir Sementsov-Ogievskiy if (ret < 0) {
30938b117001SVladimir Sementsov-Ogievskiy return ret;
309461007b31SStefan Hajnoczi }
309561007b31SStefan Hajnoczi
309661007b31SStefan Hajnoczi /* Do nothing if disabled. */
309761007b31SStefan Hajnoczi if (!(bs->open_flags & BDRV_O_UNMAP)) {
309861007b31SStefan Hajnoczi return 0;
309961007b31SStefan Hajnoczi }
310061007b31SStefan Hajnoczi
310102aefe43SEric Blake if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
310261007b31SStefan Hajnoczi return 0;
310361007b31SStefan Hajnoczi }
310461007b31SStefan Hajnoczi
31050bc329fbSHanna Reitz /* Invalidate the cached block-status data range if this discard overlaps */
31060bc329fbSHanna Reitz bdrv_bsc_invalidate_range(bs, offset, bytes);
31070bc329fbSHanna Reitz
31083482b9bcSEric Blake /* Discard is advisory, but some devices track and coalesce
31093482b9bcSEric Blake * unaligned requests, so we must pass everything down rather than
31103482b9bcSEric Blake * round here. Still, most devices will just silently ignore
31113482b9bcSEric Blake * unaligned requests (by returning -ENOTSUP), so we must fragment
31123482b9bcSEric Blake * the request accordingly. */
311302aefe43SEric Blake align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
3114b8d0a980SEric Blake assert(align % bs->bl.request_alignment == 0);
3115b8d0a980SEric Blake head = offset % align;
3116f5a5ca79SManos Pitsidianakis tail = (offset + bytes) % align;
31179f1963b3SEric Blake
311899723548SPaolo Bonzini bdrv_inc_in_flight(bs);
3119f5a5ca79SManos Pitsidianakis tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
312050824995SFam Zheng
312100695c27SFam Zheng ret = bdrv_co_write_req_prepare(child, offset, bytes, &req, 0);
3122ec050f77SDenis V. Lunev if (ret < 0) {
3123ec050f77SDenis V. Lunev goto out;
3124ec050f77SDenis V. Lunev }
3125ec050f77SDenis V. Lunev
31266a8f3dbbSVladimir Sementsov-Ogievskiy max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT64_MAX),
31279f1963b3SEric Blake align);
31283482b9bcSEric Blake assert(max_pdiscard >= bs->bl.request_alignment);
31299f1963b3SEric Blake
3130f5a5ca79SManos Pitsidianakis while (bytes > 0) {
3131d93e5726SVladimir Sementsov-Ogievskiy int64_t num = bytes;
31323482b9bcSEric Blake
31333482b9bcSEric Blake if (head) {
31343482b9bcSEric Blake /* Make small requests to get to alignment boundaries. */
3135f5a5ca79SManos Pitsidianakis num = MIN(bytes, align - head);
31363482b9bcSEric Blake if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
31373482b9bcSEric Blake num %= bs->bl.request_alignment;
31383482b9bcSEric Blake }
31393482b9bcSEric Blake head = (head + num) % align;
31403482b9bcSEric Blake assert(num < max_pdiscard);
31413482b9bcSEric Blake } else if (tail) {
31423482b9bcSEric Blake if (num > align) {
31433482b9bcSEric Blake /* Shorten the request to the last aligned cluster. */
31443482b9bcSEric Blake num -= tail;
31453482b9bcSEric Blake } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
31463482b9bcSEric Blake tail > bs->bl.request_alignment) {
31473482b9bcSEric Blake tail %= bs->bl.request_alignment;
31483482b9bcSEric Blake num -= tail;
31493482b9bcSEric Blake }
31503482b9bcSEric Blake }
31513482b9bcSEric Blake /* limit request size */
31523482b9bcSEric Blake if (num > max_pdiscard) {
31533482b9bcSEric Blake num = max_pdiscard;
31543482b9bcSEric Blake }
315561007b31SStefan Hajnoczi
3156d470ad42SMax Reitz if (!bs->drv) {
3157d470ad42SMax Reitz ret = -ENOMEDIUM;
3158d470ad42SMax Reitz goto out;
3159d470ad42SMax Reitz }
316047a5486dSEric Blake if (bs->drv->bdrv_co_pdiscard) {
316147a5486dSEric Blake ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
316261007b31SStefan Hajnoczi } else {
316361007b31SStefan Hajnoczi BlockAIOCB *acb;
316461007b31SStefan Hajnoczi CoroutineIOCompletion co = {
316561007b31SStefan Hajnoczi .coroutine = qemu_coroutine_self(),
316661007b31SStefan Hajnoczi };
316761007b31SStefan Hajnoczi
31684da444a0SEric Blake acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
316961007b31SStefan Hajnoczi bdrv_co_io_em_complete, &co);
317061007b31SStefan Hajnoczi if (acb == NULL) {
3171b1066c87SFam Zheng ret = -EIO;
3172b1066c87SFam Zheng goto out;
317361007b31SStefan Hajnoczi } else {
317461007b31SStefan Hajnoczi qemu_coroutine_yield();
317561007b31SStefan Hajnoczi ret = co.ret;
317661007b31SStefan Hajnoczi }
317761007b31SStefan Hajnoczi }
317861007b31SStefan Hajnoczi if (ret && ret != -ENOTSUP) {
3179b1066c87SFam Zheng goto out;
318061007b31SStefan Hajnoczi }
318161007b31SStefan Hajnoczi
31829f1963b3SEric Blake offset += num;
3183f5a5ca79SManos Pitsidianakis bytes -= num;
318461007b31SStefan Hajnoczi }
3185b1066c87SFam Zheng ret = 0;
3186b1066c87SFam Zheng out:
318700695c27SFam Zheng bdrv_co_write_req_finish(child, req.offset, req.bytes, &req, ret);
3188b1066c87SFam Zheng tracked_request_end(&req);
318999723548SPaolo Bonzini bdrv_dec_in_flight(bs);
3190b1066c87SFam Zheng return ret;
319161007b31SStefan Hajnoczi }
319261007b31SStefan Hajnoczi
bdrv_co_ioctl(BlockDriverState * bs,int req,void * buf)3193881a4c55SPaolo Bonzini int coroutine_fn bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
319461007b31SStefan Hajnoczi {
319561007b31SStefan Hajnoczi BlockDriver *drv = bs->drv;
31965c5ae76aSFam Zheng CoroutineIOCompletion co = {
31975c5ae76aSFam Zheng .coroutine = qemu_coroutine_self(),
31985c5ae76aSFam Zheng };
31995c5ae76aSFam Zheng BlockAIOCB *acb;
3200384a48fbSEmanuele Giuseppe Esposito IO_CODE();
320126c518abSKevin Wolf assert_bdrv_graph_readable();
320261007b31SStefan Hajnoczi
320399723548SPaolo Bonzini bdrv_inc_in_flight(bs);
320416a389dcSKevin Wolf if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
32055c5ae76aSFam Zheng co.ret = -ENOTSUP;
32065c5ae76aSFam Zheng goto out;
32075c5ae76aSFam Zheng }
32085c5ae76aSFam Zheng
320916a389dcSKevin Wolf if (drv->bdrv_co_ioctl) {
321016a389dcSKevin Wolf co.ret = drv->bdrv_co_ioctl(bs, req, buf);
321116a389dcSKevin Wolf } else {
32125c5ae76aSFam Zheng acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
32135c5ae76aSFam Zheng if (!acb) {
3214c8a9fd80SFam Zheng co.ret = -ENOTSUP;
3215c8a9fd80SFam Zheng goto out;
32165c5ae76aSFam Zheng }
32175c5ae76aSFam Zheng qemu_coroutine_yield();
321816a389dcSKevin Wolf }
32195c5ae76aSFam Zheng out:
322099723548SPaolo Bonzini bdrv_dec_in_flight(bs);
32215c5ae76aSFam Zheng return co.ret;
32225c5ae76aSFam Zheng }
32235c5ae76aSFam Zheng
bdrv_co_zone_report(BlockDriverState * bs,int64_t offset,unsigned int * nr_zones,BlockZoneDescriptor * zones)32246d43eaa3SSam Li int coroutine_fn bdrv_co_zone_report(BlockDriverState *bs, int64_t offset,
32256d43eaa3SSam Li unsigned int *nr_zones,
32266d43eaa3SSam Li BlockZoneDescriptor *zones)
32276d43eaa3SSam Li {
32286d43eaa3SSam Li BlockDriver *drv = bs->drv;
32296d43eaa3SSam Li CoroutineIOCompletion co = {
32306d43eaa3SSam Li .coroutine = qemu_coroutine_self(),
32316d43eaa3SSam Li };
32326d43eaa3SSam Li IO_CODE();
32336d43eaa3SSam Li
32346d43eaa3SSam Li bdrv_inc_in_flight(bs);
32356d43eaa3SSam Li if (!drv || !drv->bdrv_co_zone_report || bs->bl.zoned == BLK_Z_NONE) {
32366d43eaa3SSam Li co.ret = -ENOTSUP;
32376d43eaa3SSam Li goto out;
32386d43eaa3SSam Li }
32396d43eaa3SSam Li co.ret = drv->bdrv_co_zone_report(bs, offset, nr_zones, zones);
32406d43eaa3SSam Li out:
32416d43eaa3SSam Li bdrv_dec_in_flight(bs);
32426d43eaa3SSam Li return co.ret;
32436d43eaa3SSam Li }
32446d43eaa3SSam Li
bdrv_co_zone_mgmt(BlockDriverState * bs,BlockZoneOp op,int64_t offset,int64_t len)32456d43eaa3SSam Li int coroutine_fn bdrv_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
32466d43eaa3SSam Li int64_t offset, int64_t len)
32476d43eaa3SSam Li {
32486d43eaa3SSam Li BlockDriver *drv = bs->drv;
32496d43eaa3SSam Li CoroutineIOCompletion co = {
32506d43eaa3SSam Li .coroutine = qemu_coroutine_self(),
32516d43eaa3SSam Li };
32526d43eaa3SSam Li IO_CODE();
32536d43eaa3SSam Li
32546d43eaa3SSam Li bdrv_inc_in_flight(bs);
32556d43eaa3SSam Li if (!drv || !drv->bdrv_co_zone_mgmt || bs->bl.zoned == BLK_Z_NONE) {
32566d43eaa3SSam Li co.ret = -ENOTSUP;
32576d43eaa3SSam Li goto out;
32586d43eaa3SSam Li }
32596d43eaa3SSam Li co.ret = drv->bdrv_co_zone_mgmt(bs, op, offset, len);
32606d43eaa3SSam Li out:
32616d43eaa3SSam Li bdrv_dec_in_flight(bs);
32626d43eaa3SSam Li return co.ret;
32636d43eaa3SSam Li }
32646d43eaa3SSam Li
bdrv_co_zone_append(BlockDriverState * bs,int64_t * offset,QEMUIOVector * qiov,BdrvRequestFlags flags)32654751d09aSSam Li int coroutine_fn bdrv_co_zone_append(BlockDriverState *bs, int64_t *offset,
32664751d09aSSam Li QEMUIOVector *qiov,
32674751d09aSSam Li BdrvRequestFlags flags)
32684751d09aSSam Li {
32694751d09aSSam Li int ret;
32704751d09aSSam Li BlockDriver *drv = bs->drv;
32714751d09aSSam Li CoroutineIOCompletion co = {
32724751d09aSSam Li .coroutine = qemu_coroutine_self(),
32734751d09aSSam Li };
32744751d09aSSam Li IO_CODE();
32754751d09aSSam Li
32764751d09aSSam Li ret = bdrv_check_qiov_request(*offset, qiov->size, qiov, 0, NULL);
32774751d09aSSam Li if (ret < 0) {
32784751d09aSSam Li return ret;
32794751d09aSSam Li }
32804751d09aSSam Li
32814751d09aSSam Li bdrv_inc_in_flight(bs);
32824751d09aSSam Li if (!drv || !drv->bdrv_co_zone_append || bs->bl.zoned == BLK_Z_NONE) {
32834751d09aSSam Li co.ret = -ENOTSUP;
32844751d09aSSam Li goto out;
32854751d09aSSam Li }
32864751d09aSSam Li co.ret = drv->bdrv_co_zone_append(bs, offset, qiov, flags);
32874751d09aSSam Li out:
32884751d09aSSam Li bdrv_dec_in_flight(bs);
32894751d09aSSam Li return co.ret;
32904751d09aSSam Li }
32914751d09aSSam Li
qemu_blockalign(BlockDriverState * bs,size_t size)329261007b31SStefan Hajnoczi void *qemu_blockalign(BlockDriverState *bs, size_t size)
329361007b31SStefan Hajnoczi {
3294384a48fbSEmanuele Giuseppe Esposito IO_CODE();
329561007b31SStefan Hajnoczi return qemu_memalign(bdrv_opt_mem_align(bs), size);
329661007b31SStefan Hajnoczi }
329761007b31SStefan Hajnoczi
qemu_blockalign0(BlockDriverState * bs,size_t size)329861007b31SStefan Hajnoczi void *qemu_blockalign0(BlockDriverState *bs, size_t size)
329961007b31SStefan Hajnoczi {
3300384a48fbSEmanuele Giuseppe Esposito IO_CODE();
330161007b31SStefan Hajnoczi return memset(qemu_blockalign(bs, size), 0, size);
330261007b31SStefan Hajnoczi }
330361007b31SStefan Hajnoczi
qemu_try_blockalign(BlockDriverState * bs,size_t size)330461007b31SStefan Hajnoczi void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
330561007b31SStefan Hajnoczi {
330661007b31SStefan Hajnoczi size_t align = bdrv_opt_mem_align(bs);
3307384a48fbSEmanuele Giuseppe Esposito IO_CODE();
330861007b31SStefan Hajnoczi
330961007b31SStefan Hajnoczi /* Ensure that NULL is never returned on success */
331061007b31SStefan Hajnoczi assert(align > 0);
331161007b31SStefan Hajnoczi if (size == 0) {
331261007b31SStefan Hajnoczi size = align;
331361007b31SStefan Hajnoczi }
331461007b31SStefan Hajnoczi
331561007b31SStefan Hajnoczi return qemu_try_memalign(align, size);
331661007b31SStefan Hajnoczi }
331761007b31SStefan Hajnoczi
qemu_try_blockalign0(BlockDriverState * bs,size_t size)331861007b31SStefan Hajnoczi void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
331961007b31SStefan Hajnoczi {
332061007b31SStefan Hajnoczi void *mem = qemu_try_blockalign(bs, size);
3321384a48fbSEmanuele Giuseppe Esposito IO_CODE();
332261007b31SStefan Hajnoczi
332361007b31SStefan Hajnoczi if (mem) {
332461007b31SStefan Hajnoczi memset(mem, 0, size);
332561007b31SStefan Hajnoczi }
332661007b31SStefan Hajnoczi
332761007b31SStefan Hajnoczi return mem;
332861007b31SStefan Hajnoczi }
332961007b31SStefan Hajnoczi
3330f4ec04baSStefan Hajnoczi /* Helper that undoes bdrv_register_buf() when it fails partway through */
3331d9249c25SKevin Wolf static void GRAPH_RDLOCK
bdrv_register_buf_rollback(BlockDriverState * bs,void * host,size_t size,BdrvChild * final_child)3332d9249c25SKevin Wolf bdrv_register_buf_rollback(BlockDriverState *bs, void *host, size_t size,
3333f4ec04baSStefan Hajnoczi BdrvChild *final_child)
3334f4ec04baSStefan Hajnoczi {
3335f4ec04baSStefan Hajnoczi BdrvChild *child;
3336f4ec04baSStefan Hajnoczi
3337d9249c25SKevin Wolf GLOBAL_STATE_CODE();
3338d9249c25SKevin Wolf assert_bdrv_graph_readable();
3339d9249c25SKevin Wolf
3340f4ec04baSStefan Hajnoczi QLIST_FOREACH(child, &bs->children, next) {
3341f4ec04baSStefan Hajnoczi if (child == final_child) {
3342f4ec04baSStefan Hajnoczi break;
3343f4ec04baSStefan Hajnoczi }
3344f4ec04baSStefan Hajnoczi
3345f4ec04baSStefan Hajnoczi bdrv_unregister_buf(child->bs, host, size);
3346f4ec04baSStefan Hajnoczi }
3347f4ec04baSStefan Hajnoczi
3348f4ec04baSStefan Hajnoczi if (bs->drv && bs->drv->bdrv_unregister_buf) {
3349f4ec04baSStefan Hajnoczi bs->drv->bdrv_unregister_buf(bs, host, size);
3350f4ec04baSStefan Hajnoczi }
3351f4ec04baSStefan Hajnoczi }
3352f4ec04baSStefan Hajnoczi
bdrv_register_buf(BlockDriverState * bs,void * host,size_t size,Error ** errp)3353f4ec04baSStefan Hajnoczi bool bdrv_register_buf(BlockDriverState *bs, void *host, size_t size,
3354f4ec04baSStefan Hajnoczi Error **errp)
335523d0ba93SFam Zheng {
335623d0ba93SFam Zheng BdrvChild *child;
335723d0ba93SFam Zheng
3358f791bf7fSEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
3359d9249c25SKevin Wolf GRAPH_RDLOCK_GUARD_MAINLOOP();
3360d9249c25SKevin Wolf
336123d0ba93SFam Zheng if (bs->drv && bs->drv->bdrv_register_buf) {
3362f4ec04baSStefan Hajnoczi if (!bs->drv->bdrv_register_buf(bs, host, size, errp)) {
3363f4ec04baSStefan Hajnoczi return false;
3364f4ec04baSStefan Hajnoczi }
336523d0ba93SFam Zheng }
336623d0ba93SFam Zheng QLIST_FOREACH(child, &bs->children, next) {
3367f4ec04baSStefan Hajnoczi if (!bdrv_register_buf(child->bs, host, size, errp)) {
3368f4ec04baSStefan Hajnoczi bdrv_register_buf_rollback(bs, host, size, child);
3369f4ec04baSStefan Hajnoczi return false;
337023d0ba93SFam Zheng }
337123d0ba93SFam Zheng }
3372f4ec04baSStefan Hajnoczi return true;
3373f4ec04baSStefan Hajnoczi }
337423d0ba93SFam Zheng
bdrv_unregister_buf(BlockDriverState * bs,void * host,size_t size)33754f384011SStefan Hajnoczi void bdrv_unregister_buf(BlockDriverState *bs, void *host, size_t size)
337623d0ba93SFam Zheng {
337723d0ba93SFam Zheng BdrvChild *child;
337823d0ba93SFam Zheng
3379f791bf7fSEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
3380d9249c25SKevin Wolf GRAPH_RDLOCK_GUARD_MAINLOOP();
3381d9249c25SKevin Wolf
338223d0ba93SFam Zheng if (bs->drv && bs->drv->bdrv_unregister_buf) {
33834f384011SStefan Hajnoczi bs->drv->bdrv_unregister_buf(bs, host, size);
338423d0ba93SFam Zheng }
338523d0ba93SFam Zheng QLIST_FOREACH(child, &bs->children, next) {
33864f384011SStefan Hajnoczi bdrv_unregister_buf(child->bs, host, size);
338723d0ba93SFam Zheng }
338823d0ba93SFam Zheng }
3389fcc67678SFam Zheng
bdrv_co_copy_range_internal(BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags,bool recurse_src)3390abaf8b75SKevin Wolf static int coroutine_fn GRAPH_RDLOCK bdrv_co_copy_range_internal(
3391a5215b8fSVladimir Sementsov-Ogievskiy BdrvChild *src, int64_t src_offset, BdrvChild *dst,
3392a5215b8fSVladimir Sementsov-Ogievskiy int64_t dst_offset, int64_t bytes,
339367b51fb9SVladimir Sementsov-Ogievskiy BdrvRequestFlags read_flags, BdrvRequestFlags write_flags,
3394fcc67678SFam Zheng bool recurse_src)
3395fcc67678SFam Zheng {
3396999658a0SVladimir Sementsov-Ogievskiy BdrvTrackedRequest req;
3397fcc67678SFam Zheng int ret;
3398742bf09bSEmanuele Giuseppe Esposito assert_bdrv_graph_readable();
3399fcc67678SFam Zheng
3400fe0480d6SKevin Wolf /* TODO We can support BDRV_REQ_NO_FALLBACK here */
3401fe0480d6SKevin Wolf assert(!(read_flags & BDRV_REQ_NO_FALLBACK));
3402fe0480d6SKevin Wolf assert(!(write_flags & BDRV_REQ_NO_FALLBACK));
340345e62b46SVladimir Sementsov-Ogievskiy assert(!(read_flags & BDRV_REQ_NO_WAIT));
340445e62b46SVladimir Sementsov-Ogievskiy assert(!(write_flags & BDRV_REQ_NO_WAIT));
3405fe0480d6SKevin Wolf
34061e97be91SEmanuele Giuseppe Esposito if (!dst || !dst->bs || !bdrv_co_is_inserted(dst->bs)) {
3407fcc67678SFam Zheng return -ENOMEDIUM;
3408fcc67678SFam Zheng }
340963f4ad11SVladimir Sementsov-Ogievskiy ret = bdrv_check_request32(dst_offset, bytes, NULL, 0);
3410fcc67678SFam Zheng if (ret) {
3411fcc67678SFam Zheng return ret;
3412fcc67678SFam Zheng }
341367b51fb9SVladimir Sementsov-Ogievskiy if (write_flags & BDRV_REQ_ZERO_WRITE) {
341467b51fb9SVladimir Sementsov-Ogievskiy return bdrv_co_pwrite_zeroes(dst, dst_offset, bytes, write_flags);
3415fcc67678SFam Zheng }
3416fcc67678SFam Zheng
34171e97be91SEmanuele Giuseppe Esposito if (!src || !src->bs || !bdrv_co_is_inserted(src->bs)) {
3418d4d3e5a0SFam Zheng return -ENOMEDIUM;
3419d4d3e5a0SFam Zheng }
342063f4ad11SVladimir Sementsov-Ogievskiy ret = bdrv_check_request32(src_offset, bytes, NULL, 0);
3421d4d3e5a0SFam Zheng if (ret) {
3422d4d3e5a0SFam Zheng return ret;
3423d4d3e5a0SFam Zheng }
3424d4d3e5a0SFam Zheng
3425fcc67678SFam Zheng if (!src->bs->drv->bdrv_co_copy_range_from
3426fcc67678SFam Zheng || !dst->bs->drv->bdrv_co_copy_range_to
3427fcc67678SFam Zheng || src->bs->encrypted || dst->bs->encrypted) {
3428fcc67678SFam Zheng return -ENOTSUP;
3429fcc67678SFam Zheng }
3430999658a0SVladimir Sementsov-Ogievskiy
3431999658a0SVladimir Sementsov-Ogievskiy if (recurse_src) {
3432d4d3e5a0SFam Zheng bdrv_inc_in_flight(src->bs);
3433999658a0SVladimir Sementsov-Ogievskiy tracked_request_begin(&req, src->bs, src_offset, bytes,
3434999658a0SVladimir Sementsov-Ogievskiy BDRV_TRACKED_READ);
343537aec7d7SFam Zheng
343609d2f948SVladimir Sementsov-Ogievskiy /* BDRV_REQ_SERIALISING is only for write operation */
343709d2f948SVladimir Sementsov-Ogievskiy assert(!(read_flags & BDRV_REQ_SERIALISING));
3438304d9d7fSMax Reitz bdrv_wait_serialising_requests(&req);
3439999658a0SVladimir Sementsov-Ogievskiy
344037aec7d7SFam Zheng ret = src->bs->drv->bdrv_co_copy_range_from(src->bs,
3441fcc67678SFam Zheng src, src_offset,
3442fcc67678SFam Zheng dst, dst_offset,
344367b51fb9SVladimir Sementsov-Ogievskiy bytes,
344467b51fb9SVladimir Sementsov-Ogievskiy read_flags, write_flags);
3445999658a0SVladimir Sementsov-Ogievskiy
3446999658a0SVladimir Sementsov-Ogievskiy tracked_request_end(&req);
3447999658a0SVladimir Sementsov-Ogievskiy bdrv_dec_in_flight(src->bs);
3448fcc67678SFam Zheng } else {
3449999658a0SVladimir Sementsov-Ogievskiy bdrv_inc_in_flight(dst->bs);
3450999658a0SVladimir Sementsov-Ogievskiy tracked_request_begin(&req, dst->bs, dst_offset, bytes,
3451999658a0SVladimir Sementsov-Ogievskiy BDRV_TRACKED_WRITE);
34520eb1e891SFam Zheng ret = bdrv_co_write_req_prepare(dst, dst_offset, bytes, &req,
34530eb1e891SFam Zheng write_flags);
34540eb1e891SFam Zheng if (!ret) {
345537aec7d7SFam Zheng ret = dst->bs->drv->bdrv_co_copy_range_to(dst->bs,
3456fcc67678SFam Zheng src, src_offset,
3457fcc67678SFam Zheng dst, dst_offset,
345867b51fb9SVladimir Sementsov-Ogievskiy bytes,
345967b51fb9SVladimir Sementsov-Ogievskiy read_flags, write_flags);
34600eb1e891SFam Zheng }
34610eb1e891SFam Zheng bdrv_co_write_req_finish(dst, dst_offset, bytes, &req, ret);
3462999658a0SVladimir Sementsov-Ogievskiy tracked_request_end(&req);
3463d4d3e5a0SFam Zheng bdrv_dec_in_flight(dst->bs);
3464999658a0SVladimir Sementsov-Ogievskiy }
3465999658a0SVladimir Sementsov-Ogievskiy
346637aec7d7SFam Zheng return ret;
3467fcc67678SFam Zheng }
3468fcc67678SFam Zheng
3469fcc67678SFam Zheng /* Copy range from @src to @dst.
3470fcc67678SFam Zheng *
3471fcc67678SFam Zheng * See the comment of bdrv_co_copy_range for the parameter and return value
3472fcc67678SFam Zheng * semantics. */
bdrv_co_copy_range_from(BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3473a5215b8fSVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_copy_range_from(BdrvChild *src, int64_t src_offset,
3474a5215b8fSVladimir Sementsov-Ogievskiy BdrvChild *dst, int64_t dst_offset,
3475a5215b8fSVladimir Sementsov-Ogievskiy int64_t bytes,
347667b51fb9SVladimir Sementsov-Ogievskiy BdrvRequestFlags read_flags,
347767b51fb9SVladimir Sementsov-Ogievskiy BdrvRequestFlags write_flags)
3478fcc67678SFam Zheng {
3479967d7905SEmanuele Giuseppe Esposito IO_CODE();
3480742bf09bSEmanuele Giuseppe Esposito assert_bdrv_graph_readable();
3481ecc983a5SFam Zheng trace_bdrv_co_copy_range_from(src, src_offset, dst, dst_offset, bytes,
3482ecc983a5SFam Zheng read_flags, write_flags);
3483fcc67678SFam Zheng return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
348467b51fb9SVladimir Sementsov-Ogievskiy bytes, read_flags, write_flags, true);
3485fcc67678SFam Zheng }
3486fcc67678SFam Zheng
3487fcc67678SFam Zheng /* Copy range from @src to @dst.
3488fcc67678SFam Zheng *
3489fcc67678SFam Zheng * See the comment of bdrv_co_copy_range for the parameter and return value
3490fcc67678SFam Zheng * semantics. */
bdrv_co_copy_range_to(BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3491a5215b8fSVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_copy_range_to(BdrvChild *src, int64_t src_offset,
3492a5215b8fSVladimir Sementsov-Ogievskiy BdrvChild *dst, int64_t dst_offset,
3493a5215b8fSVladimir Sementsov-Ogievskiy int64_t bytes,
349467b51fb9SVladimir Sementsov-Ogievskiy BdrvRequestFlags read_flags,
349567b51fb9SVladimir Sementsov-Ogievskiy BdrvRequestFlags write_flags)
3496fcc67678SFam Zheng {
3497967d7905SEmanuele Giuseppe Esposito IO_CODE();
3498742bf09bSEmanuele Giuseppe Esposito assert_bdrv_graph_readable();
3499ecc983a5SFam Zheng trace_bdrv_co_copy_range_to(src, src_offset, dst, dst_offset, bytes,
3500ecc983a5SFam Zheng read_flags, write_flags);
3501fcc67678SFam Zheng return bdrv_co_copy_range_internal(src, src_offset, dst, dst_offset,
350267b51fb9SVladimir Sementsov-Ogievskiy bytes, read_flags, write_flags, false);
3503fcc67678SFam Zheng }
3504fcc67678SFam Zheng
bdrv_co_copy_range(BdrvChild * src,int64_t src_offset,BdrvChild * dst,int64_t dst_offset,int64_t bytes,BdrvRequestFlags read_flags,BdrvRequestFlags write_flags)3505a5215b8fSVladimir Sementsov-Ogievskiy int coroutine_fn bdrv_co_copy_range(BdrvChild *src, int64_t src_offset,
3506a5215b8fSVladimir Sementsov-Ogievskiy BdrvChild *dst, int64_t dst_offset,
3507a5215b8fSVladimir Sementsov-Ogievskiy int64_t bytes, BdrvRequestFlags read_flags,
350867b51fb9SVladimir Sementsov-Ogievskiy BdrvRequestFlags write_flags)
3509fcc67678SFam Zheng {
3510384a48fbSEmanuele Giuseppe Esposito IO_CODE();
3511742bf09bSEmanuele Giuseppe Esposito assert_bdrv_graph_readable();
3512742bf09bSEmanuele Giuseppe Esposito
351337aec7d7SFam Zheng return bdrv_co_copy_range_from(src, src_offset,
3514fcc67678SFam Zheng dst, dst_offset,
351567b51fb9SVladimir Sementsov-Ogievskiy bytes, read_flags, write_flags);
3516fcc67678SFam Zheng }
35173d9f2d2aSKevin Wolf
35187859c45aSKevin Wolf static void coroutine_fn GRAPH_RDLOCK
bdrv_parent_cb_resize(BlockDriverState * bs)35197859c45aSKevin Wolf bdrv_parent_cb_resize(BlockDriverState *bs)
35203d9f2d2aSKevin Wolf {
35213d9f2d2aSKevin Wolf BdrvChild *c;
35227859c45aSKevin Wolf
35237859c45aSKevin Wolf assert_bdrv_graph_readable();
35247859c45aSKevin Wolf
35253d9f2d2aSKevin Wolf QLIST_FOREACH(c, &bs->parents, next_parent) {
3526bd86fb99SMax Reitz if (c->klass->resize) {
3527bd86fb99SMax Reitz c->klass->resize(c);
35283d9f2d2aSKevin Wolf }
35293d9f2d2aSKevin Wolf }
35303d9f2d2aSKevin Wolf }
35313d9f2d2aSKevin Wolf
35323d9f2d2aSKevin Wolf /**
35333d9f2d2aSKevin Wolf * Truncate file to 'offset' bytes (needed only for file protocols)
3534c80d8b06SMax Reitz *
3535c80d8b06SMax Reitz * If 'exact' is true, the file must be resized to exactly the given
3536c80d8b06SMax Reitz * 'offset'. Otherwise, it is sufficient for the node to be at least
3537c80d8b06SMax Reitz * 'offset' bytes in length.
35383d9f2d2aSKevin Wolf */
bdrv_co_truncate(BdrvChild * child,int64_t offset,bool exact,PreallocMode prealloc,BdrvRequestFlags flags,Error ** errp)3539c80d8b06SMax Reitz int coroutine_fn bdrv_co_truncate(BdrvChild *child, int64_t offset, bool exact,
35407b8e4857SKevin Wolf PreallocMode prealloc, BdrvRequestFlags flags,
35417b8e4857SKevin Wolf Error **errp)
35423d9f2d2aSKevin Wolf {
35433d9f2d2aSKevin Wolf BlockDriverState *bs = child->bs;
354423b93525SMax Reitz BdrvChild *filtered, *backing;
35453d9f2d2aSKevin Wolf BlockDriver *drv = bs->drv;
35461bc5f09fSKevin Wolf BdrvTrackedRequest req;
35471bc5f09fSKevin Wolf int64_t old_size, new_bytes;
35483d9f2d2aSKevin Wolf int ret;
3549384a48fbSEmanuele Giuseppe Esposito IO_CODE();
3550c2b8e315SKevin Wolf assert_bdrv_graph_readable();
35513d9f2d2aSKevin Wolf
35523d9f2d2aSKevin Wolf /* if bs->drv == NULL, bs is closed, so there's nothing to do here */
35533d9f2d2aSKevin Wolf if (!drv) {
35543d9f2d2aSKevin Wolf error_setg(errp, "No medium inserted");
35553d9f2d2aSKevin Wolf return -ENOMEDIUM;
35563d9f2d2aSKevin Wolf }
35573d9f2d2aSKevin Wolf if (offset < 0) {
35583d9f2d2aSKevin Wolf error_setg(errp, "Image size cannot be negative");
35593d9f2d2aSKevin Wolf return -EINVAL;
35603d9f2d2aSKevin Wolf }
35613d9f2d2aSKevin Wolf
356269b55e03SVladimir Sementsov-Ogievskiy ret = bdrv_check_request(offset, 0, errp);
35638b117001SVladimir Sementsov-Ogievskiy if (ret < 0) {
35648b117001SVladimir Sementsov-Ogievskiy return ret;
35658b117001SVladimir Sementsov-Ogievskiy }
35668b117001SVladimir Sementsov-Ogievskiy
35670af02bd1SPaolo Bonzini old_size = bdrv_co_getlength(bs);
35681bc5f09fSKevin Wolf if (old_size < 0) {
35691bc5f09fSKevin Wolf error_setg_errno(errp, -old_size, "Failed to get old image size");
35701bc5f09fSKevin Wolf return old_size;
35711bc5f09fSKevin Wolf }
35721bc5f09fSKevin Wolf
357397efa869SEric Blake if (bdrv_is_read_only(bs)) {
357497efa869SEric Blake error_setg(errp, "Image is read-only");
357597efa869SEric Blake return -EACCES;
357697efa869SEric Blake }
357797efa869SEric Blake
35781bc5f09fSKevin Wolf if (offset > old_size) {
35791bc5f09fSKevin Wolf new_bytes = offset - old_size;
35801bc5f09fSKevin Wolf } else {
35811bc5f09fSKevin Wolf new_bytes = 0;
35821bc5f09fSKevin Wolf }
35831bc5f09fSKevin Wolf
35843d9f2d2aSKevin Wolf bdrv_inc_in_flight(bs);
35855416a11eSFam Zheng tracked_request_begin(&req, bs, offset - new_bytes, new_bytes,
35865416a11eSFam Zheng BDRV_TRACKED_TRUNCATE);
35871bc5f09fSKevin Wolf
35881bc5f09fSKevin Wolf /* If we are growing the image and potentially using preallocation for the
35891bc5f09fSKevin Wolf * new area, we need to make sure that no write requests are made to it
35901bc5f09fSKevin Wolf * concurrently or they might be overwritten by preallocation. */
35911bc5f09fSKevin Wolf if (new_bytes) {
35928ac5aab2SVladimir Sementsov-Ogievskiy bdrv_make_request_serialising(&req, 1);
3593cd47d792SFam Zheng }
3594cd47d792SFam Zheng ret = bdrv_co_write_req_prepare(child, offset - new_bytes, new_bytes, &req,
3595cd47d792SFam Zheng 0);
3596cd47d792SFam Zheng if (ret < 0) {
3597cd47d792SFam Zheng error_setg_errno(errp, -ret,
3598cd47d792SFam Zheng "Failed to prepare request for truncation");
3599cd47d792SFam Zheng goto out;
36001bc5f09fSKevin Wolf }
36013d9f2d2aSKevin Wolf
360293393e69SMax Reitz filtered = bdrv_filter_child(bs);
360323b93525SMax Reitz backing = bdrv_cow_child(bs);
360493393e69SMax Reitz
3605955c7d66SKevin Wolf /*
3606955c7d66SKevin Wolf * If the image has a backing file that is large enough that it would
3607955c7d66SKevin Wolf * provide data for the new area, we cannot leave it unallocated because
3608955c7d66SKevin Wolf * then the backing file content would become visible. Instead, zero-fill
3609955c7d66SKevin Wolf * the new area.
3610955c7d66SKevin Wolf *
3611955c7d66SKevin Wolf * Note that if the image has a backing file, but was opened without the
3612955c7d66SKevin Wolf * backing file, taking care of keeping things consistent with that backing
3613955c7d66SKevin Wolf * file is the user's responsibility.
3614955c7d66SKevin Wolf */
361523b93525SMax Reitz if (new_bytes && backing) {
3616955c7d66SKevin Wolf int64_t backing_len;
3617955c7d66SKevin Wolf
3618bd53086eSEmanuele Giuseppe Esposito backing_len = bdrv_co_getlength(backing->bs);
3619955c7d66SKevin Wolf if (backing_len < 0) {
3620955c7d66SKevin Wolf ret = backing_len;
3621955c7d66SKevin Wolf error_setg_errno(errp, -ret, "Could not get backing file size");
3622955c7d66SKevin Wolf goto out;
3623955c7d66SKevin Wolf }
3624955c7d66SKevin Wolf
3625955c7d66SKevin Wolf if (backing_len > old_size) {
3626955c7d66SKevin Wolf flags |= BDRV_REQ_ZERO_WRITE;
3627955c7d66SKevin Wolf }
3628955c7d66SKevin Wolf }
3629955c7d66SKevin Wolf
36306b7e8f8bSMax Reitz if (drv->bdrv_co_truncate) {
363192b92799SKevin Wolf if (flags & ~bs->supported_truncate_flags) {
363292b92799SKevin Wolf error_setg(errp, "Block driver does not support requested flags");
363392b92799SKevin Wolf ret = -ENOTSUP;
363492b92799SKevin Wolf goto out;
363592b92799SKevin Wolf }
363692b92799SKevin Wolf ret = drv->bdrv_co_truncate(bs, offset, exact, prealloc, flags, errp);
363793393e69SMax Reitz } else if (filtered) {
363893393e69SMax Reitz ret = bdrv_co_truncate(filtered, offset, exact, prealloc, flags, errp);
36396b7e8f8bSMax Reitz } else {
36403d9f2d2aSKevin Wolf error_setg(errp, "Image format driver does not support resize");
36413d9f2d2aSKevin Wolf ret = -ENOTSUP;
36423d9f2d2aSKevin Wolf goto out;
36433d9f2d2aSKevin Wolf }
36443d9f2d2aSKevin Wolf if (ret < 0) {
36453d9f2d2aSKevin Wolf goto out;
36463d9f2d2aSKevin Wolf }
36476b7e8f8bSMax Reitz
3648bd53086eSEmanuele Giuseppe Esposito ret = bdrv_co_refresh_total_sectors(bs, offset >> BDRV_SECTOR_BITS);
36493d9f2d2aSKevin Wolf if (ret < 0) {
36503d9f2d2aSKevin Wolf error_setg_errno(errp, -ret, "Could not refresh total sector count");
36513d9f2d2aSKevin Wolf } else {
36523d9f2d2aSKevin Wolf offset = bs->total_sectors * BDRV_SECTOR_SIZE;
36533d9f2d2aSKevin Wolf }
3654c057960cSEmanuele Giuseppe Esposito /*
3655c057960cSEmanuele Giuseppe Esposito * It's possible that truncation succeeded but bdrv_refresh_total_sectors
3656cd47d792SFam Zheng * failed, but the latter doesn't affect how we should finish the request.
3657c057960cSEmanuele Giuseppe Esposito * Pass 0 as the last parameter so that dirty bitmaps etc. are handled.
3658c057960cSEmanuele Giuseppe Esposito */
3659cd47d792SFam Zheng bdrv_co_write_req_finish(child, offset - new_bytes, new_bytes, &req, 0);
36603d9f2d2aSKevin Wolf
36613d9f2d2aSKevin Wolf out:
36621bc5f09fSKevin Wolf tracked_request_end(&req);
36633d9f2d2aSKevin Wolf bdrv_dec_in_flight(bs);
36641bc5f09fSKevin Wolf
36653d9f2d2aSKevin Wolf return ret;
36663d9f2d2aSKevin Wolf }
3667bd54669aSVladimir Sementsov-Ogievskiy
bdrv_cancel_in_flight(BlockDriverState * bs)3668bd54669aSVladimir Sementsov-Ogievskiy void bdrv_cancel_in_flight(BlockDriverState *bs)
3669bd54669aSVladimir Sementsov-Ogievskiy {
3670f791bf7fSEmanuele Giuseppe Esposito GLOBAL_STATE_CODE();
367179a55866SKevin Wolf GRAPH_RDLOCK_GUARD_MAINLOOP();
367279a55866SKevin Wolf
3673bd54669aSVladimir Sementsov-Ogievskiy if (!bs || !bs->drv) {
3674bd54669aSVladimir Sementsov-Ogievskiy return;
3675bd54669aSVladimir Sementsov-Ogievskiy }
3676bd54669aSVladimir Sementsov-Ogievskiy
3677bd54669aSVladimir Sementsov-Ogievskiy if (bs->drv->bdrv_cancel_in_flight) {
3678bd54669aSVladimir Sementsov-Ogievskiy bs->drv->bdrv_cancel_in_flight(bs);
3679bd54669aSVladimir Sementsov-Ogievskiy }
3680bd54669aSVladimir Sementsov-Ogievskiy }
3681ce14f3b4SVladimir Sementsov-Ogievskiy
3682ce14f3b4SVladimir Sementsov-Ogievskiy int coroutine_fn
bdrv_co_preadv_snapshot(BdrvChild * child,int64_t offset,int64_t bytes,QEMUIOVector * qiov,size_t qiov_offset)3683ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_co_preadv_snapshot(BdrvChild *child, int64_t offset, int64_t bytes,
3684ce14f3b4SVladimir Sementsov-Ogievskiy QEMUIOVector *qiov, size_t qiov_offset)
3685ce14f3b4SVladimir Sementsov-Ogievskiy {
3686ce14f3b4SVladimir Sementsov-Ogievskiy BlockDriverState *bs = child->bs;
3687ce14f3b4SVladimir Sementsov-Ogievskiy BlockDriver *drv = bs->drv;
3688ce14f3b4SVladimir Sementsov-Ogievskiy int ret;
3689ce14f3b4SVladimir Sementsov-Ogievskiy IO_CODE();
36907b9e8b22SKevin Wolf assert_bdrv_graph_readable();
3691ce14f3b4SVladimir Sementsov-Ogievskiy
3692ce14f3b4SVladimir Sementsov-Ogievskiy if (!drv) {
3693ce14f3b4SVladimir Sementsov-Ogievskiy return -ENOMEDIUM;
3694ce14f3b4SVladimir Sementsov-Ogievskiy }
3695ce14f3b4SVladimir Sementsov-Ogievskiy
3696ce14f3b4SVladimir Sementsov-Ogievskiy if (!drv->bdrv_co_preadv_snapshot) {
3697ce14f3b4SVladimir Sementsov-Ogievskiy return -ENOTSUP;
3698ce14f3b4SVladimir Sementsov-Ogievskiy }
3699ce14f3b4SVladimir Sementsov-Ogievskiy
3700ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_inc_in_flight(bs);
3701ce14f3b4SVladimir Sementsov-Ogievskiy ret = drv->bdrv_co_preadv_snapshot(bs, offset, bytes, qiov, qiov_offset);
3702ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_dec_in_flight(bs);
3703ce14f3b4SVladimir Sementsov-Ogievskiy
3704ce14f3b4SVladimir Sementsov-Ogievskiy return ret;
3705ce14f3b4SVladimir Sementsov-Ogievskiy }
3706ce14f3b4SVladimir Sementsov-Ogievskiy
3707ce14f3b4SVladimir Sementsov-Ogievskiy int coroutine_fn
bdrv_co_snapshot_block_status(BlockDriverState * bs,bool want_zero,int64_t offset,int64_t bytes,int64_t * pnum,int64_t * map,BlockDriverState ** file)3708ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_co_snapshot_block_status(BlockDriverState *bs,
3709ce14f3b4SVladimir Sementsov-Ogievskiy bool want_zero, int64_t offset, int64_t bytes,
3710ce14f3b4SVladimir Sementsov-Ogievskiy int64_t *pnum, int64_t *map,
3711ce14f3b4SVladimir Sementsov-Ogievskiy BlockDriverState **file)
3712ce14f3b4SVladimir Sementsov-Ogievskiy {
3713ce14f3b4SVladimir Sementsov-Ogievskiy BlockDriver *drv = bs->drv;
3714ce14f3b4SVladimir Sementsov-Ogievskiy int ret;
3715ce14f3b4SVladimir Sementsov-Ogievskiy IO_CODE();
37167b9e8b22SKevin Wolf assert_bdrv_graph_readable();
3717ce14f3b4SVladimir Sementsov-Ogievskiy
3718ce14f3b4SVladimir Sementsov-Ogievskiy if (!drv) {
3719ce14f3b4SVladimir Sementsov-Ogievskiy return -ENOMEDIUM;
3720ce14f3b4SVladimir Sementsov-Ogievskiy }
3721ce14f3b4SVladimir Sementsov-Ogievskiy
3722ce14f3b4SVladimir Sementsov-Ogievskiy if (!drv->bdrv_co_snapshot_block_status) {
3723ce14f3b4SVladimir Sementsov-Ogievskiy return -ENOTSUP;
3724ce14f3b4SVladimir Sementsov-Ogievskiy }
3725ce14f3b4SVladimir Sementsov-Ogievskiy
3726ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_inc_in_flight(bs);
3727ce14f3b4SVladimir Sementsov-Ogievskiy ret = drv->bdrv_co_snapshot_block_status(bs, want_zero, offset, bytes,
3728ce14f3b4SVladimir Sementsov-Ogievskiy pnum, map, file);
3729ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_dec_in_flight(bs);
3730ce14f3b4SVladimir Sementsov-Ogievskiy
3731ce14f3b4SVladimir Sementsov-Ogievskiy return ret;
3732ce14f3b4SVladimir Sementsov-Ogievskiy }
3733ce14f3b4SVladimir Sementsov-Ogievskiy
3734ce14f3b4SVladimir Sementsov-Ogievskiy int coroutine_fn
bdrv_co_pdiscard_snapshot(BlockDriverState * bs,int64_t offset,int64_t bytes)3735ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_co_pdiscard_snapshot(BlockDriverState *bs, int64_t offset, int64_t bytes)
3736ce14f3b4SVladimir Sementsov-Ogievskiy {
3737ce14f3b4SVladimir Sementsov-Ogievskiy BlockDriver *drv = bs->drv;
3738ce14f3b4SVladimir Sementsov-Ogievskiy int ret;
3739ce14f3b4SVladimir Sementsov-Ogievskiy IO_CODE();
37409a5a1c62SEmanuele Giuseppe Esposito assert_bdrv_graph_readable();
3741ce14f3b4SVladimir Sementsov-Ogievskiy
3742ce14f3b4SVladimir Sementsov-Ogievskiy if (!drv) {
3743ce14f3b4SVladimir Sementsov-Ogievskiy return -ENOMEDIUM;
3744ce14f3b4SVladimir Sementsov-Ogievskiy }
3745ce14f3b4SVladimir Sementsov-Ogievskiy
3746ce14f3b4SVladimir Sementsov-Ogievskiy if (!drv->bdrv_co_pdiscard_snapshot) {
3747ce14f3b4SVladimir Sementsov-Ogievskiy return -ENOTSUP;
3748ce14f3b4SVladimir Sementsov-Ogievskiy }
3749ce14f3b4SVladimir Sementsov-Ogievskiy
3750ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_inc_in_flight(bs);
3751ce14f3b4SVladimir Sementsov-Ogievskiy ret = drv->bdrv_co_pdiscard_snapshot(bs, offset, bytes);
3752ce14f3b4SVladimir Sementsov-Ogievskiy bdrv_dec_in_flight(bs);
3753ce14f3b4SVladimir Sementsov-Ogievskiy
3754ce14f3b4SVladimir Sementsov-Ogievskiy return ret;
3755ce14f3b4SVladimir Sementsov-Ogievskiy }
3756