xref: /openbmc/qemu/block/stream.c (revision 57a33d89)
1 /*
2  * Image streaming
3  *
4  * Copyright IBM, Corp. 2011
5  *
6  * Authors:
7  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
8  *
9  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10  * See the COPYING.LIB file in the top-level directory.
11  *
12  */
13 
14 #include "trace.h"
15 #include "block_int.h"
16 
17 enum {
18     /*
19      * Size of data buffer for populating the image file.  This should be large
20      * enough to process multiple clusters in a single call, so that populating
21      * contiguous regions of the image is efficient.
22      */
23     STREAM_BUFFER_SIZE = 512 * 1024, /* in bytes */
24 };
25 
26 #define SLICE_TIME 100000000ULL /* ns */
27 
28 typedef struct {
29     int64_t next_slice_time;
30     uint64_t slice_quota;
31     uint64_t dispatched;
32 } RateLimit;
33 
34 static int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
35 {
36     int64_t delay_ns = 0;
37     int64_t now = qemu_get_clock_ns(rt_clock);
38 
39     if (limit->next_slice_time < now) {
40         limit->next_slice_time = now + SLICE_TIME;
41         limit->dispatched = 0;
42     }
43     if (limit->dispatched + n > limit->slice_quota) {
44         delay_ns = limit->next_slice_time - now;
45     } else {
46         limit->dispatched += n;
47     }
48     return delay_ns;
49 }
50 
51 static void ratelimit_set_speed(RateLimit *limit, uint64_t speed)
52 {
53     limit->slice_quota = speed / (1000000000ULL / SLICE_TIME);
54 }
55 
56 typedef struct StreamBlockJob {
57     BlockJob common;
58     RateLimit limit;
59     BlockDriverState *base;
60     char backing_file_id[1024];
61 } StreamBlockJob;
62 
63 static int coroutine_fn stream_populate(BlockDriverState *bs,
64                                         int64_t sector_num, int nb_sectors,
65                                         void *buf)
66 {
67     struct iovec iov = {
68         .iov_base = buf,
69         .iov_len  = nb_sectors * BDRV_SECTOR_SIZE,
70     };
71     QEMUIOVector qiov;
72 
73     qemu_iovec_init_external(&qiov, &iov, 1);
74 
75     /* Copy-on-read the unallocated clusters */
76     return bdrv_co_copy_on_readv(bs, sector_num, nb_sectors, &qiov);
77 }
78 
79 static void close_unused_images(BlockDriverState *top, BlockDriverState *base,
80                                 const char *base_id)
81 {
82     BlockDriverState *intermediate;
83     intermediate = top->backing_hd;
84 
85     while (intermediate) {
86         BlockDriverState *unused;
87 
88         /* reached base */
89         if (intermediate == base) {
90             break;
91         }
92 
93         unused = intermediate;
94         intermediate = intermediate->backing_hd;
95         unused->backing_hd = NULL;
96         bdrv_delete(unused);
97     }
98     top->backing_hd = base;
99 
100     pstrcpy(top->backing_file, sizeof(top->backing_file), "");
101     pstrcpy(top->backing_format, sizeof(top->backing_format), "");
102     if (base_id) {
103         pstrcpy(top->backing_file, sizeof(top->backing_file), base_id);
104         if (base->drv) {
105             pstrcpy(top->backing_format, sizeof(top->backing_format),
106                     base->drv->format_name);
107         }
108     }
109 
110 }
111 
112 /*
113  * Given an image chain: [BASE] -> [INTER1] -> [INTER2] -> [TOP]
114  *
115  * Return true if the given sector is allocated in top.
116  * Return false if the given sector is allocated in intermediate images.
117  * Return true otherwise.
118  *
119  * 'pnum' is set to the number of sectors (including and immediately following
120  *  the specified sector) that are known to be in the same
121  *  allocated/unallocated state.
122  *
123  */
124 static int coroutine_fn is_allocated_base(BlockDriverState *top,
125                                           BlockDriverState *base,
126                                           int64_t sector_num,
127                                           int nb_sectors, int *pnum)
128 {
129     BlockDriverState *intermediate;
130     int ret, n;
131 
132     ret = bdrv_co_is_allocated(top, sector_num, nb_sectors, &n);
133     if (ret) {
134         *pnum = n;
135         return ret;
136     }
137 
138     /*
139      * Is the unallocated chunk [sector_num, n] also
140      * unallocated between base and top?
141      */
142     intermediate = top->backing_hd;
143 
144     while (intermediate) {
145         int pnum_inter;
146 
147         /* reached base */
148         if (intermediate == base) {
149             *pnum = n;
150             return 1;
151         }
152         ret = bdrv_co_is_allocated(intermediate, sector_num, nb_sectors,
153                                    &pnum_inter);
154         if (ret < 0) {
155             return ret;
156         } else if (ret) {
157             *pnum = pnum_inter;
158             return 0;
159         }
160 
161         /*
162          * [sector_num, nb_sectors] is unallocated on top but intermediate
163          * might have
164          *
165          * [sector_num+x, nr_sectors] allocated.
166          */
167         if (n > pnum_inter) {
168             n = pnum_inter;
169         }
170 
171         intermediate = intermediate->backing_hd;
172     }
173 
174     return 1;
175 }
176 
177 static void coroutine_fn stream_run(void *opaque)
178 {
179     StreamBlockJob *s = opaque;
180     BlockDriverState *bs = s->common.bs;
181     BlockDriverState *base = s->base;
182     int64_t sector_num, end;
183     int ret = 0;
184     int n;
185     void *buf;
186 
187     s->common.len = bdrv_getlength(bs);
188     if (s->common.len < 0) {
189         block_job_complete(&s->common, s->common.len);
190         return;
191     }
192 
193     end = s->common.len >> BDRV_SECTOR_BITS;
194     buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE);
195 
196     /* Turn on copy-on-read for the whole block device so that guest read
197      * requests help us make progress.  Only do this when copying the entire
198      * backing chain since the copy-on-read operation does not take base into
199      * account.
200      */
201     if (!base) {
202         bdrv_enable_copy_on_read(bs);
203     }
204 
205     for (sector_num = 0; sector_num < end; sector_num += n) {
206 retry:
207         if (block_job_is_cancelled(&s->common)) {
208             break;
209         }
210 
211         s->common.busy = true;
212         if (base) {
213             ret = is_allocated_base(bs, base, sector_num,
214                                     STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n);
215         } else {
216             ret = bdrv_co_is_allocated(bs, sector_num,
217                                        STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE,
218                                        &n);
219         }
220         trace_stream_one_iteration(s, sector_num, n, ret);
221         if (ret == 0) {
222             if (s->common.speed) {
223                 uint64_t delay_ns = ratelimit_calculate_delay(&s->limit, n);
224                 if (delay_ns > 0) {
225                     s->common.busy = false;
226                     co_sleep_ns(rt_clock, delay_ns);
227 
228                     /* Recheck cancellation and that sectors are unallocated */
229                     goto retry;
230                 }
231             }
232             ret = stream_populate(bs, sector_num, n, buf);
233         }
234         if (ret < 0) {
235             break;
236         }
237         ret = 0;
238 
239         /* Publish progress */
240         s->common.offset += n * BDRV_SECTOR_SIZE;
241 
242         /* Note that even when no rate limit is applied we need to yield
243          * with no pending I/O here so that qemu_aio_flush() returns.
244          */
245         s->common.busy = false;
246         co_sleep_ns(rt_clock, 0);
247     }
248 
249     if (!base) {
250         bdrv_disable_copy_on_read(bs);
251     }
252 
253     if (!block_job_is_cancelled(&s->common) && sector_num == end && ret == 0) {
254         const char *base_id = NULL;
255         if (base) {
256             base_id = s->backing_file_id;
257         }
258         ret = bdrv_change_backing_file(bs, base_id, NULL);
259         close_unused_images(bs, base, base_id);
260     }
261 
262     qemu_vfree(buf);
263     block_job_complete(&s->common, ret);
264 }
265 
266 static int stream_set_speed(BlockJob *job, int64_t value)
267 {
268     StreamBlockJob *s = container_of(job, StreamBlockJob, common);
269 
270     if (value < 0) {
271         return -EINVAL;
272     }
273     ratelimit_set_speed(&s->limit, value / BDRV_SECTOR_SIZE);
274     return 0;
275 }
276 
277 static BlockJobType stream_job_type = {
278     .instance_size = sizeof(StreamBlockJob),
279     .job_type      = "stream",
280     .set_speed     = stream_set_speed,
281 };
282 
283 int stream_start(BlockDriverState *bs, BlockDriverState *base,
284                  const char *base_id, BlockDriverCompletionFunc *cb,
285                  void *opaque)
286 {
287     StreamBlockJob *s;
288     Coroutine *co;
289 
290     s = block_job_create(&stream_job_type, bs, cb, opaque);
291     if (!s) {
292         return -EBUSY; /* bs must already be in use */
293     }
294 
295     s->base = base;
296     if (base_id) {
297         pstrcpy(s->backing_file_id, sizeof(s->backing_file_id), base_id);
298     }
299 
300     co = qemu_coroutine_create(stream_run);
301     trace_stream_start(bs, base, s, co, opaque);
302     qemu_coroutine_enter(co, s);
303     return 0;
304 }
305