xref: /openbmc/qemu/block/qed.c (revision 58a0067a)
1 /*
2  * QEMU Enhanced Disk Format
3  *
4  * Copyright IBM, Corp. 2010
5  *
6  * Authors:
7  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *
10  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11  * See the COPYING.LIB file in the top-level directory.
12  *
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qapi/error.h"
17 #include "qemu/timer.h"
18 #include "trace.h"
19 #include "qed.h"
20 #include "qapi/qmp/qerror.h"
21 #include "migration/migration.h"
22 #include "sysemu/block-backend.h"
23 
24 static const AIOCBInfo qed_aiocb_info = {
25     .aiocb_size         = sizeof(QEDAIOCB),
26 };
27 
28 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
29                           const char *filename)
30 {
31     const QEDHeader *header = (const QEDHeader *)buf;
32 
33     if (buf_size < sizeof(*header)) {
34         return 0;
35     }
36     if (le32_to_cpu(header->magic) != QED_MAGIC) {
37         return 0;
38     }
39     return 100;
40 }
41 
42 /**
43  * Check whether an image format is raw
44  *
45  * @fmt:    Backing file format, may be NULL
46  */
47 static bool qed_fmt_is_raw(const char *fmt)
48 {
49     return fmt && strcmp(fmt, "raw") == 0;
50 }
51 
52 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
53 {
54     cpu->magic = le32_to_cpu(le->magic);
55     cpu->cluster_size = le32_to_cpu(le->cluster_size);
56     cpu->table_size = le32_to_cpu(le->table_size);
57     cpu->header_size = le32_to_cpu(le->header_size);
58     cpu->features = le64_to_cpu(le->features);
59     cpu->compat_features = le64_to_cpu(le->compat_features);
60     cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
61     cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
62     cpu->image_size = le64_to_cpu(le->image_size);
63     cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
64     cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
65 }
66 
67 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
68 {
69     le->magic = cpu_to_le32(cpu->magic);
70     le->cluster_size = cpu_to_le32(cpu->cluster_size);
71     le->table_size = cpu_to_le32(cpu->table_size);
72     le->header_size = cpu_to_le32(cpu->header_size);
73     le->features = cpu_to_le64(cpu->features);
74     le->compat_features = cpu_to_le64(cpu->compat_features);
75     le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
76     le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
77     le->image_size = cpu_to_le64(cpu->image_size);
78     le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
79     le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
80 }
81 
82 int qed_write_header_sync(BDRVQEDState *s)
83 {
84     QEDHeader le;
85     int ret;
86 
87     qed_header_cpu_to_le(&s->header, &le);
88     ret = bdrv_pwrite(s->bs->file->bs, 0, &le, sizeof(le));
89     if (ret != sizeof(le)) {
90         return ret;
91     }
92     return 0;
93 }
94 
95 typedef struct {
96     GenericCB gencb;
97     BDRVQEDState *s;
98     struct iovec iov;
99     QEMUIOVector qiov;
100     int nsectors;
101     uint8_t *buf;
102 } QEDWriteHeaderCB;
103 
104 static void qed_write_header_cb(void *opaque, int ret)
105 {
106     QEDWriteHeaderCB *write_header_cb = opaque;
107 
108     qemu_vfree(write_header_cb->buf);
109     gencb_complete(write_header_cb, ret);
110 }
111 
112 static void qed_write_header_read_cb(void *opaque, int ret)
113 {
114     QEDWriteHeaderCB *write_header_cb = opaque;
115     BDRVQEDState *s = write_header_cb->s;
116 
117     if (ret) {
118         qed_write_header_cb(write_header_cb, ret);
119         return;
120     }
121 
122     /* Update header */
123     qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
124 
125     bdrv_aio_writev(s->bs->file->bs, 0, &write_header_cb->qiov,
126                     write_header_cb->nsectors, qed_write_header_cb,
127                     write_header_cb);
128 }
129 
130 /**
131  * Update header in-place (does not rewrite backing filename or other strings)
132  *
133  * This function only updates known header fields in-place and does not affect
134  * extra data after the QED header.
135  */
136 static void qed_write_header(BDRVQEDState *s, BlockCompletionFunc cb,
137                              void *opaque)
138 {
139     /* We must write full sectors for O_DIRECT but cannot necessarily generate
140      * the data following the header if an unrecognized compat feature is
141      * active.  Therefore, first read the sectors containing the header, update
142      * them, and write back.
143      */
144 
145     int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
146                    BDRV_SECTOR_SIZE;
147     size_t len = nsectors * BDRV_SECTOR_SIZE;
148     QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
149                                                     cb, opaque);
150 
151     write_header_cb->s = s;
152     write_header_cb->nsectors = nsectors;
153     write_header_cb->buf = qemu_blockalign(s->bs, len);
154     write_header_cb->iov.iov_base = write_header_cb->buf;
155     write_header_cb->iov.iov_len = len;
156     qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
157 
158     bdrv_aio_readv(s->bs->file->bs, 0, &write_header_cb->qiov, nsectors,
159                    qed_write_header_read_cb, write_header_cb);
160 }
161 
162 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
163 {
164     uint64_t table_entries;
165     uint64_t l2_size;
166 
167     table_entries = (table_size * cluster_size) / sizeof(uint64_t);
168     l2_size = table_entries * cluster_size;
169 
170     return l2_size * table_entries;
171 }
172 
173 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
174 {
175     if (cluster_size < QED_MIN_CLUSTER_SIZE ||
176         cluster_size > QED_MAX_CLUSTER_SIZE) {
177         return false;
178     }
179     if (cluster_size & (cluster_size - 1)) {
180         return false; /* not power of 2 */
181     }
182     return true;
183 }
184 
185 static bool qed_is_table_size_valid(uint32_t table_size)
186 {
187     if (table_size < QED_MIN_TABLE_SIZE ||
188         table_size > QED_MAX_TABLE_SIZE) {
189         return false;
190     }
191     if (table_size & (table_size - 1)) {
192         return false; /* not power of 2 */
193     }
194     return true;
195 }
196 
197 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
198                                     uint32_t table_size)
199 {
200     if (image_size % BDRV_SECTOR_SIZE != 0) {
201         return false; /* not multiple of sector size */
202     }
203     if (image_size > qed_max_image_size(cluster_size, table_size)) {
204         return false; /* image is too large */
205     }
206     return true;
207 }
208 
209 /**
210  * Read a string of known length from the image file
211  *
212  * @file:       Image file
213  * @offset:     File offset to start of string, in bytes
214  * @n:          String length in bytes
215  * @buf:        Destination buffer
216  * @buflen:     Destination buffer length in bytes
217  * @ret:        0 on success, -errno on failure
218  *
219  * The string is NUL-terminated.
220  */
221 static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
222                            char *buf, size_t buflen)
223 {
224     int ret;
225     if (n >= buflen) {
226         return -EINVAL;
227     }
228     ret = bdrv_pread(file, offset, buf, n);
229     if (ret < 0) {
230         return ret;
231     }
232     buf[n] = '\0';
233     return 0;
234 }
235 
236 /**
237  * Allocate new clusters
238  *
239  * @s:          QED state
240  * @n:          Number of contiguous clusters to allocate
241  * @ret:        Offset of first allocated cluster
242  *
243  * This function only produces the offset where the new clusters should be
244  * written.  It updates BDRVQEDState but does not make any changes to the image
245  * file.
246  */
247 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
248 {
249     uint64_t offset = s->file_size;
250     s->file_size += n * s->header.cluster_size;
251     return offset;
252 }
253 
254 QEDTable *qed_alloc_table(BDRVQEDState *s)
255 {
256     /* Honor O_DIRECT memory alignment requirements */
257     return qemu_blockalign(s->bs,
258                            s->header.cluster_size * s->header.table_size);
259 }
260 
261 /**
262  * Allocate a new zeroed L2 table
263  */
264 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
265 {
266     CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
267 
268     l2_table->table = qed_alloc_table(s);
269     l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
270 
271     memset(l2_table->table->offsets, 0,
272            s->header.cluster_size * s->header.table_size);
273     return l2_table;
274 }
275 
276 static void qed_aio_next_io(void *opaque, int ret);
277 
278 static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
279 {
280     assert(!s->allocating_write_reqs_plugged);
281 
282     s->allocating_write_reqs_plugged = true;
283 }
284 
285 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
286 {
287     QEDAIOCB *acb;
288 
289     assert(s->allocating_write_reqs_plugged);
290 
291     s->allocating_write_reqs_plugged = false;
292 
293     acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
294     if (acb) {
295         qed_aio_next_io(acb, 0);
296     }
297 }
298 
299 static void qed_finish_clear_need_check(void *opaque, int ret)
300 {
301     /* Do nothing */
302 }
303 
304 static void qed_flush_after_clear_need_check(void *opaque, int ret)
305 {
306     BDRVQEDState *s = opaque;
307 
308     bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
309 
310     /* No need to wait until flush completes */
311     qed_unplug_allocating_write_reqs(s);
312 }
313 
314 static void qed_clear_need_check(void *opaque, int ret)
315 {
316     BDRVQEDState *s = opaque;
317 
318     if (ret) {
319         qed_unplug_allocating_write_reqs(s);
320         return;
321     }
322 
323     s->header.features &= ~QED_F_NEED_CHECK;
324     qed_write_header(s, qed_flush_after_clear_need_check, s);
325 }
326 
327 static void qed_need_check_timer_cb(void *opaque)
328 {
329     BDRVQEDState *s = opaque;
330 
331     /* The timer should only fire when allocating writes have drained */
332     assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
333 
334     trace_qed_need_check_timer_cb(s);
335 
336     qed_plug_allocating_write_reqs(s);
337 
338     /* Ensure writes are on disk before clearing flag */
339     bdrv_aio_flush(s->bs, qed_clear_need_check, s);
340 }
341 
342 static void qed_start_need_check_timer(BDRVQEDState *s)
343 {
344     trace_qed_start_need_check_timer(s);
345 
346     /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
347      * migration.
348      */
349     timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
350                    NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
351 }
352 
353 /* It's okay to call this multiple times or when no timer is started */
354 static void qed_cancel_need_check_timer(BDRVQEDState *s)
355 {
356     trace_qed_cancel_need_check_timer(s);
357     timer_del(s->need_check_timer);
358 }
359 
360 static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
361 {
362     BDRVQEDState *s = bs->opaque;
363 
364     qed_cancel_need_check_timer(s);
365     timer_free(s->need_check_timer);
366 }
367 
368 static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
369                                         AioContext *new_context)
370 {
371     BDRVQEDState *s = bs->opaque;
372 
373     s->need_check_timer = aio_timer_new(new_context,
374                                         QEMU_CLOCK_VIRTUAL, SCALE_NS,
375                                         qed_need_check_timer_cb, s);
376     if (s->header.features & QED_F_NEED_CHECK) {
377         qed_start_need_check_timer(s);
378     }
379 }
380 
381 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
382                          Error **errp)
383 {
384     BDRVQEDState *s = bs->opaque;
385     QEDHeader le_header;
386     int64_t file_size;
387     int ret;
388 
389     s->bs = bs;
390     QSIMPLEQ_INIT(&s->allocating_write_reqs);
391 
392     ret = bdrv_pread(bs->file->bs, 0, &le_header, sizeof(le_header));
393     if (ret < 0) {
394         return ret;
395     }
396     qed_header_le_to_cpu(&le_header, &s->header);
397 
398     if (s->header.magic != QED_MAGIC) {
399         error_setg(errp, "Image not in QED format");
400         return -EINVAL;
401     }
402     if (s->header.features & ~QED_FEATURE_MASK) {
403         /* image uses unsupported feature bits */
404         error_setg(errp, "Unsupported QED features: %" PRIx64,
405                    s->header.features & ~QED_FEATURE_MASK);
406         return -ENOTSUP;
407     }
408     if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
409         return -EINVAL;
410     }
411 
412     /* Round down file size to the last cluster */
413     file_size = bdrv_getlength(bs->file->bs);
414     if (file_size < 0) {
415         return file_size;
416     }
417     s->file_size = qed_start_of_cluster(s, file_size);
418 
419     if (!qed_is_table_size_valid(s->header.table_size)) {
420         return -EINVAL;
421     }
422     if (!qed_is_image_size_valid(s->header.image_size,
423                                  s->header.cluster_size,
424                                  s->header.table_size)) {
425         return -EINVAL;
426     }
427     if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
428         return -EINVAL;
429     }
430 
431     s->table_nelems = (s->header.cluster_size * s->header.table_size) /
432                       sizeof(uint64_t);
433     s->l2_shift = ctz32(s->header.cluster_size);
434     s->l2_mask = s->table_nelems - 1;
435     s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
436 
437     /* Header size calculation must not overflow uint32_t */
438     if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
439         return -EINVAL;
440     }
441 
442     if ((s->header.features & QED_F_BACKING_FILE)) {
443         if ((uint64_t)s->header.backing_filename_offset +
444             s->header.backing_filename_size >
445             s->header.cluster_size * s->header.header_size) {
446             return -EINVAL;
447         }
448 
449         ret = qed_read_string(bs->file->bs, s->header.backing_filename_offset,
450                               s->header.backing_filename_size, bs->backing_file,
451                               sizeof(bs->backing_file));
452         if (ret < 0) {
453             return ret;
454         }
455 
456         if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
457             pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
458         }
459     }
460 
461     /* Reset unknown autoclear feature bits.  This is a backwards
462      * compatibility mechanism that allows images to be opened by older
463      * programs, which "knock out" unknown feature bits.  When an image is
464      * opened by a newer program again it can detect that the autoclear
465      * feature is no longer valid.
466      */
467     if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
468         !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
469         s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
470 
471         ret = qed_write_header_sync(s);
472         if (ret) {
473             return ret;
474         }
475 
476         /* From here on only known autoclear feature bits are valid */
477         bdrv_flush(bs->file->bs);
478     }
479 
480     s->l1_table = qed_alloc_table(s);
481     qed_init_l2_cache(&s->l2_cache);
482 
483     ret = qed_read_l1_table_sync(s);
484     if (ret) {
485         goto out;
486     }
487 
488     /* If image was not closed cleanly, check consistency */
489     if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
490         /* Read-only images cannot be fixed.  There is no risk of corruption
491          * since write operations are not possible.  Therefore, allow
492          * potentially inconsistent images to be opened read-only.  This can
493          * aid data recovery from an otherwise inconsistent image.
494          */
495         if (!bdrv_is_read_only(bs->file->bs) &&
496             !(flags & BDRV_O_INACTIVE)) {
497             BdrvCheckResult result = {0};
498 
499             ret = qed_check(s, &result, true);
500             if (ret) {
501                 goto out;
502             }
503         }
504     }
505 
506     bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
507 
508 out:
509     if (ret) {
510         qed_free_l2_cache(&s->l2_cache);
511         qemu_vfree(s->l1_table);
512     }
513     return ret;
514 }
515 
516 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
517 {
518     BDRVQEDState *s = bs->opaque;
519 
520     bs->bl.write_zeroes_alignment = s->header.cluster_size >> BDRV_SECTOR_BITS;
521 }
522 
523 /* We have nothing to do for QED reopen, stubs just return
524  * success */
525 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
526                                    BlockReopenQueue *queue, Error **errp)
527 {
528     return 0;
529 }
530 
531 static void bdrv_qed_close(BlockDriverState *bs)
532 {
533     BDRVQEDState *s = bs->opaque;
534 
535     bdrv_qed_detach_aio_context(bs);
536 
537     /* Ensure writes reach stable storage */
538     bdrv_flush(bs->file->bs);
539 
540     /* Clean shutdown, no check required on next open */
541     if (s->header.features & QED_F_NEED_CHECK) {
542         s->header.features &= ~QED_F_NEED_CHECK;
543         qed_write_header_sync(s);
544     }
545 
546     qed_free_l2_cache(&s->l2_cache);
547     qemu_vfree(s->l1_table);
548 }
549 
550 static int qed_create(const char *filename, uint32_t cluster_size,
551                       uint64_t image_size, uint32_t table_size,
552                       const char *backing_file, const char *backing_fmt,
553                       QemuOpts *opts, Error **errp)
554 {
555     QEDHeader header = {
556         .magic = QED_MAGIC,
557         .cluster_size = cluster_size,
558         .table_size = table_size,
559         .header_size = 1,
560         .features = 0,
561         .compat_features = 0,
562         .l1_table_offset = cluster_size,
563         .image_size = image_size,
564     };
565     QEDHeader le_header;
566     uint8_t *l1_table = NULL;
567     size_t l1_size = header.cluster_size * header.table_size;
568     Error *local_err = NULL;
569     int ret = 0;
570     BlockBackend *blk;
571 
572     ret = bdrv_create_file(filename, opts, &local_err);
573     if (ret < 0) {
574         error_propagate(errp, local_err);
575         return ret;
576     }
577 
578     blk = blk_new_open(filename, NULL, NULL,
579                        BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_PROTOCOL,
580                        &local_err);
581     if (blk == NULL) {
582         error_propagate(errp, local_err);
583         return -EIO;
584     }
585 
586     blk_set_allow_write_beyond_eof(blk, true);
587 
588     /* File must start empty and grow, check truncate is supported */
589     ret = blk_truncate(blk, 0);
590     if (ret < 0) {
591         goto out;
592     }
593 
594     if (backing_file) {
595         header.features |= QED_F_BACKING_FILE;
596         header.backing_filename_offset = sizeof(le_header);
597         header.backing_filename_size = strlen(backing_file);
598 
599         if (qed_fmt_is_raw(backing_fmt)) {
600             header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
601         }
602     }
603 
604     qed_header_cpu_to_le(&header, &le_header);
605     ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header));
606     if (ret < 0) {
607         goto out;
608     }
609     ret = blk_pwrite(blk, sizeof(le_header), backing_file,
610                      header.backing_filename_size);
611     if (ret < 0) {
612         goto out;
613     }
614 
615     l1_table = g_malloc0(l1_size);
616     ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size);
617     if (ret < 0) {
618         goto out;
619     }
620 
621     ret = 0; /* success */
622 out:
623     g_free(l1_table);
624     blk_unref(blk);
625     return ret;
626 }
627 
628 static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp)
629 {
630     uint64_t image_size = 0;
631     uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
632     uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
633     char *backing_file = NULL;
634     char *backing_fmt = NULL;
635     int ret;
636 
637     image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
638                           BDRV_SECTOR_SIZE);
639     backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
640     backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
641     cluster_size = qemu_opt_get_size_del(opts,
642                                          BLOCK_OPT_CLUSTER_SIZE,
643                                          QED_DEFAULT_CLUSTER_SIZE);
644     table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE,
645                                        QED_DEFAULT_TABLE_SIZE);
646 
647     if (!qed_is_cluster_size_valid(cluster_size)) {
648         error_setg(errp, "QED cluster size must be within range [%u, %u] "
649                          "and power of 2",
650                    QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
651         ret = -EINVAL;
652         goto finish;
653     }
654     if (!qed_is_table_size_valid(table_size)) {
655         error_setg(errp, "QED table size must be within range [%u, %u] "
656                          "and power of 2",
657                    QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
658         ret = -EINVAL;
659         goto finish;
660     }
661     if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
662         error_setg(errp, "QED image size must be a non-zero multiple of "
663                          "cluster size and less than %" PRIu64 " bytes",
664                    qed_max_image_size(cluster_size, table_size));
665         ret = -EINVAL;
666         goto finish;
667     }
668 
669     ret = qed_create(filename, cluster_size, image_size, table_size,
670                      backing_file, backing_fmt, opts, errp);
671 
672 finish:
673     g_free(backing_file);
674     g_free(backing_fmt);
675     return ret;
676 }
677 
678 typedef struct {
679     BlockDriverState *bs;
680     Coroutine *co;
681     uint64_t pos;
682     int64_t status;
683     int *pnum;
684     BlockDriverState **file;
685 } QEDIsAllocatedCB;
686 
687 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
688 {
689     QEDIsAllocatedCB *cb = opaque;
690     BDRVQEDState *s = cb->bs->opaque;
691     *cb->pnum = len / BDRV_SECTOR_SIZE;
692     switch (ret) {
693     case QED_CLUSTER_FOUND:
694         offset |= qed_offset_into_cluster(s, cb->pos);
695         cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
696         *cb->file = cb->bs->file->bs;
697         break;
698     case QED_CLUSTER_ZERO:
699         cb->status = BDRV_BLOCK_ZERO;
700         break;
701     case QED_CLUSTER_L2:
702     case QED_CLUSTER_L1:
703         cb->status = 0;
704         break;
705     default:
706         assert(ret < 0);
707         cb->status = ret;
708         break;
709     }
710 
711     if (cb->co) {
712         qemu_coroutine_enter(cb->co, NULL);
713     }
714 }
715 
716 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
717                                                  int64_t sector_num,
718                                                  int nb_sectors, int *pnum,
719                                                  BlockDriverState **file)
720 {
721     BDRVQEDState *s = bs->opaque;
722     size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
723     QEDIsAllocatedCB cb = {
724         .bs = bs,
725         .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
726         .status = BDRV_BLOCK_OFFSET_MASK,
727         .pnum = pnum,
728         .file = file,
729     };
730     QEDRequest request = { .l2_table = NULL };
731 
732     qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb);
733 
734     /* Now sleep if the callback wasn't invoked immediately */
735     while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
736         cb.co = qemu_coroutine_self();
737         qemu_coroutine_yield();
738     }
739 
740     qed_unref_l2_cache_entry(request.l2_table);
741 
742     return cb.status;
743 }
744 
745 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
746 {
747     return acb->common.bs->opaque;
748 }
749 
750 /**
751  * Read from the backing file or zero-fill if no backing file
752  *
753  * @s:              QED state
754  * @pos:            Byte position in device
755  * @qiov:           Destination I/O vector
756  * @backing_qiov:   Possibly shortened copy of qiov, to be allocated here
757  * @cb:             Completion function
758  * @opaque:         User data for completion function
759  *
760  * This function reads qiov->size bytes starting at pos from the backing file.
761  * If there is no backing file then zeroes are read.
762  */
763 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
764                                   QEMUIOVector *qiov,
765                                   QEMUIOVector **backing_qiov,
766                                   BlockCompletionFunc *cb, void *opaque)
767 {
768     uint64_t backing_length = 0;
769     size_t size;
770 
771     /* If there is a backing file, get its length.  Treat the absence of a
772      * backing file like a zero length backing file.
773      */
774     if (s->bs->backing) {
775         int64_t l = bdrv_getlength(s->bs->backing->bs);
776         if (l < 0) {
777             cb(opaque, l);
778             return;
779         }
780         backing_length = l;
781     }
782 
783     /* Zero all sectors if reading beyond the end of the backing file */
784     if (pos >= backing_length ||
785         pos + qiov->size > backing_length) {
786         qemu_iovec_memset(qiov, 0, 0, qiov->size);
787     }
788 
789     /* Complete now if there are no backing file sectors to read */
790     if (pos >= backing_length) {
791         cb(opaque, 0);
792         return;
793     }
794 
795     /* If the read straddles the end of the backing file, shorten it */
796     size = MIN((uint64_t)backing_length - pos, qiov->size);
797 
798     assert(*backing_qiov == NULL);
799     *backing_qiov = g_new(QEMUIOVector, 1);
800     qemu_iovec_init(*backing_qiov, qiov->niov);
801     qemu_iovec_concat(*backing_qiov, qiov, 0, size);
802 
803     BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
804     bdrv_aio_readv(s->bs->backing->bs, pos / BDRV_SECTOR_SIZE,
805                    *backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
806 }
807 
808 typedef struct {
809     GenericCB gencb;
810     BDRVQEDState *s;
811     QEMUIOVector qiov;
812     QEMUIOVector *backing_qiov;
813     struct iovec iov;
814     uint64_t offset;
815 } CopyFromBackingFileCB;
816 
817 static void qed_copy_from_backing_file_cb(void *opaque, int ret)
818 {
819     CopyFromBackingFileCB *copy_cb = opaque;
820     qemu_vfree(copy_cb->iov.iov_base);
821     gencb_complete(&copy_cb->gencb, ret);
822 }
823 
824 static void qed_copy_from_backing_file_write(void *opaque, int ret)
825 {
826     CopyFromBackingFileCB *copy_cb = opaque;
827     BDRVQEDState *s = copy_cb->s;
828 
829     if (copy_cb->backing_qiov) {
830         qemu_iovec_destroy(copy_cb->backing_qiov);
831         g_free(copy_cb->backing_qiov);
832         copy_cb->backing_qiov = NULL;
833     }
834 
835     if (ret) {
836         qed_copy_from_backing_file_cb(copy_cb, ret);
837         return;
838     }
839 
840     BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
841     bdrv_aio_writev(s->bs->file->bs, copy_cb->offset / BDRV_SECTOR_SIZE,
842                     &copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
843                     qed_copy_from_backing_file_cb, copy_cb);
844 }
845 
846 /**
847  * Copy data from backing file into the image
848  *
849  * @s:          QED state
850  * @pos:        Byte position in device
851  * @len:        Number of bytes
852  * @offset:     Byte offset in image file
853  * @cb:         Completion function
854  * @opaque:     User data for completion function
855  */
856 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
857                                        uint64_t len, uint64_t offset,
858                                        BlockCompletionFunc *cb,
859                                        void *opaque)
860 {
861     CopyFromBackingFileCB *copy_cb;
862 
863     /* Skip copy entirely if there is no work to do */
864     if (len == 0) {
865         cb(opaque, 0);
866         return;
867     }
868 
869     copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
870     copy_cb->s = s;
871     copy_cb->offset = offset;
872     copy_cb->backing_qiov = NULL;
873     copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
874     copy_cb->iov.iov_len = len;
875     qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);
876 
877     qed_read_backing_file(s, pos, &copy_cb->qiov, &copy_cb->backing_qiov,
878                           qed_copy_from_backing_file_write, copy_cb);
879 }
880 
881 /**
882  * Link one or more contiguous clusters into a table
883  *
884  * @s:              QED state
885  * @table:          L2 table
886  * @index:          First cluster index
887  * @n:              Number of contiguous clusters
888  * @cluster:        First cluster offset
889  *
890  * The cluster offset may be an allocated byte offset in the image file, the
891  * zero cluster marker, or the unallocated cluster marker.
892  */
893 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
894                                 unsigned int n, uint64_t cluster)
895 {
896     int i;
897     for (i = index; i < index + n; i++) {
898         table->offsets[i] = cluster;
899         if (!qed_offset_is_unalloc_cluster(cluster) &&
900             !qed_offset_is_zero_cluster(cluster)) {
901             cluster += s->header.cluster_size;
902         }
903     }
904 }
905 
906 static void qed_aio_complete_bh(void *opaque)
907 {
908     QEDAIOCB *acb = opaque;
909     BlockCompletionFunc *cb = acb->common.cb;
910     void *user_opaque = acb->common.opaque;
911     int ret = acb->bh_ret;
912 
913     qemu_bh_delete(acb->bh);
914     qemu_aio_unref(acb);
915 
916     /* Invoke callback */
917     cb(user_opaque, ret);
918 }
919 
920 static void qed_aio_complete(QEDAIOCB *acb, int ret)
921 {
922     BDRVQEDState *s = acb_to_s(acb);
923 
924     trace_qed_aio_complete(s, acb, ret);
925 
926     /* Free resources */
927     qemu_iovec_destroy(&acb->cur_qiov);
928     qed_unref_l2_cache_entry(acb->request.l2_table);
929 
930     /* Free the buffer we may have allocated for zero writes */
931     if (acb->flags & QED_AIOCB_ZERO) {
932         qemu_vfree(acb->qiov->iov[0].iov_base);
933         acb->qiov->iov[0].iov_base = NULL;
934     }
935 
936     /* Arrange for a bh to invoke the completion function */
937     acb->bh_ret = ret;
938     acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs),
939                          qed_aio_complete_bh, acb);
940     qemu_bh_schedule(acb->bh);
941 
942     /* Start next allocating write request waiting behind this one.  Note that
943      * requests enqueue themselves when they first hit an unallocated cluster
944      * but they wait until the entire request is finished before waking up the
945      * next request in the queue.  This ensures that we don't cycle through
946      * requests multiple times but rather finish one at a time completely.
947      */
948     if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
949         QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
950         acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
951         if (acb) {
952             qed_aio_next_io(acb, 0);
953         } else if (s->header.features & QED_F_NEED_CHECK) {
954             qed_start_need_check_timer(s);
955         }
956     }
957 }
958 
959 /**
960  * Commit the current L2 table to the cache
961  */
962 static void qed_commit_l2_update(void *opaque, int ret)
963 {
964     QEDAIOCB *acb = opaque;
965     BDRVQEDState *s = acb_to_s(acb);
966     CachedL2Table *l2_table = acb->request.l2_table;
967     uint64_t l2_offset = l2_table->offset;
968 
969     qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
970 
971     /* This is guaranteed to succeed because we just committed the entry to the
972      * cache.
973      */
974     acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
975     assert(acb->request.l2_table != NULL);
976 
977     qed_aio_next_io(opaque, ret);
978 }
979 
980 /**
981  * Update L1 table with new L2 table offset and write it out
982  */
983 static void qed_aio_write_l1_update(void *opaque, int ret)
984 {
985     QEDAIOCB *acb = opaque;
986     BDRVQEDState *s = acb_to_s(acb);
987     int index;
988 
989     if (ret) {
990         qed_aio_complete(acb, ret);
991         return;
992     }
993 
994     index = qed_l1_index(s, acb->cur_pos);
995     s->l1_table->offsets[index] = acb->request.l2_table->offset;
996 
997     qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
998 }
999 
1000 /**
1001  * Update L2 table with new cluster offsets and write them out
1002  */
1003 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
1004 {
1005     BDRVQEDState *s = acb_to_s(acb);
1006     bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
1007     int index;
1008 
1009     if (ret) {
1010         goto err;
1011     }
1012 
1013     if (need_alloc) {
1014         qed_unref_l2_cache_entry(acb->request.l2_table);
1015         acb->request.l2_table = qed_new_l2_table(s);
1016     }
1017 
1018     index = qed_l2_index(s, acb->cur_pos);
1019     qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
1020                          offset);
1021 
1022     if (need_alloc) {
1023         /* Write out the whole new L2 table */
1024         qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
1025                             qed_aio_write_l1_update, acb);
1026     } else {
1027         /* Write out only the updated part of the L2 table */
1028         qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
1029                             qed_aio_next_io, acb);
1030     }
1031     return;
1032 
1033 err:
1034     qed_aio_complete(acb, ret);
1035 }
1036 
1037 static void qed_aio_write_l2_update_cb(void *opaque, int ret)
1038 {
1039     QEDAIOCB *acb = opaque;
1040     qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
1041 }
1042 
1043 /**
1044  * Flush new data clusters before updating the L2 table
1045  *
1046  * This flush is necessary when a backing file is in use.  A crash during an
1047  * allocating write could result in empty clusters in the image.  If the write
1048  * only touched a subregion of the cluster, then backing image sectors have
1049  * been lost in the untouched region.  The solution is to flush after writing a
1050  * new data cluster and before updating the L2 table.
1051  */
1052 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
1053 {
1054     QEDAIOCB *acb = opaque;
1055     BDRVQEDState *s = acb_to_s(acb);
1056 
1057     if (!bdrv_aio_flush(s->bs->file->bs, qed_aio_write_l2_update_cb, opaque)) {
1058         qed_aio_complete(acb, -EIO);
1059     }
1060 }
1061 
1062 /**
1063  * Write data to the image file
1064  */
1065 static void qed_aio_write_main(void *opaque, int ret)
1066 {
1067     QEDAIOCB *acb = opaque;
1068     BDRVQEDState *s = acb_to_s(acb);
1069     uint64_t offset = acb->cur_cluster +
1070                       qed_offset_into_cluster(s, acb->cur_pos);
1071     BlockCompletionFunc *next_fn;
1072 
1073     trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1074 
1075     if (ret) {
1076         qed_aio_complete(acb, ret);
1077         return;
1078     }
1079 
1080     if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1081         next_fn = qed_aio_next_io;
1082     } else {
1083         if (s->bs->backing) {
1084             next_fn = qed_aio_write_flush_before_l2_update;
1085         } else {
1086             next_fn = qed_aio_write_l2_update_cb;
1087         }
1088     }
1089 
1090     BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1091     bdrv_aio_writev(s->bs->file->bs, offset / BDRV_SECTOR_SIZE,
1092                     &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1093                     next_fn, acb);
1094 }
1095 
1096 /**
1097  * Populate back untouched region of new data cluster
1098  */
1099 static void qed_aio_write_postfill(void *opaque, int ret)
1100 {
1101     QEDAIOCB *acb = opaque;
1102     BDRVQEDState *s = acb_to_s(acb);
1103     uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1104     uint64_t len =
1105         qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1106     uint64_t offset = acb->cur_cluster +
1107                       qed_offset_into_cluster(s, acb->cur_pos) +
1108                       acb->cur_qiov.size;
1109 
1110     if (ret) {
1111         qed_aio_complete(acb, ret);
1112         return;
1113     }
1114 
1115     trace_qed_aio_write_postfill(s, acb, start, len, offset);
1116     qed_copy_from_backing_file(s, start, len, offset,
1117                                 qed_aio_write_main, acb);
1118 }
1119 
1120 /**
1121  * Populate front untouched region of new data cluster
1122  */
1123 static void qed_aio_write_prefill(void *opaque, int ret)
1124 {
1125     QEDAIOCB *acb = opaque;
1126     BDRVQEDState *s = acb_to_s(acb);
1127     uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1128     uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1129 
1130     trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1131     qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1132                                 qed_aio_write_postfill, acb);
1133 }
1134 
1135 /**
1136  * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1137  */
1138 static bool qed_should_set_need_check(BDRVQEDState *s)
1139 {
1140     /* The flush before L2 update path ensures consistency */
1141     if (s->bs->backing) {
1142         return false;
1143     }
1144 
1145     return !(s->header.features & QED_F_NEED_CHECK);
1146 }
1147 
1148 static void qed_aio_write_zero_cluster(void *opaque, int ret)
1149 {
1150     QEDAIOCB *acb = opaque;
1151 
1152     if (ret) {
1153         qed_aio_complete(acb, ret);
1154         return;
1155     }
1156 
1157     qed_aio_write_l2_update(acb, 0, 1);
1158 }
1159 
1160 /**
1161  * Write new data cluster
1162  *
1163  * @acb:        Write request
1164  * @len:        Length in bytes
1165  *
1166  * This path is taken when writing to previously unallocated clusters.
1167  */
1168 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1169 {
1170     BDRVQEDState *s = acb_to_s(acb);
1171     BlockCompletionFunc *cb;
1172 
1173     /* Cancel timer when the first allocating request comes in */
1174     if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1175         qed_cancel_need_check_timer(s);
1176     }
1177 
1178     /* Freeze this request if another allocating write is in progress */
1179     if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1180         QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1181     }
1182     if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1183         s->allocating_write_reqs_plugged) {
1184         return; /* wait for existing request to finish */
1185     }
1186 
1187     acb->cur_nclusters = qed_bytes_to_clusters(s,
1188             qed_offset_into_cluster(s, acb->cur_pos) + len);
1189     qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1190 
1191     if (acb->flags & QED_AIOCB_ZERO) {
1192         /* Skip ahead if the clusters are already zero */
1193         if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1194             qed_aio_next_io(acb, 0);
1195             return;
1196         }
1197 
1198         cb = qed_aio_write_zero_cluster;
1199     } else {
1200         cb = qed_aio_write_prefill;
1201         acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1202     }
1203 
1204     if (qed_should_set_need_check(s)) {
1205         s->header.features |= QED_F_NEED_CHECK;
1206         qed_write_header(s, cb, acb);
1207     } else {
1208         cb(acb, 0);
1209     }
1210 }
1211 
1212 /**
1213  * Write data cluster in place
1214  *
1215  * @acb:        Write request
1216  * @offset:     Cluster offset in bytes
1217  * @len:        Length in bytes
1218  *
1219  * This path is taken when writing to already allocated clusters.
1220  */
1221 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1222 {
1223     /* Allocate buffer for zero writes */
1224     if (acb->flags & QED_AIOCB_ZERO) {
1225         struct iovec *iov = acb->qiov->iov;
1226 
1227         if (!iov->iov_base) {
1228             iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len);
1229             if (iov->iov_base == NULL) {
1230                 qed_aio_complete(acb, -ENOMEM);
1231                 return;
1232             }
1233             memset(iov->iov_base, 0, iov->iov_len);
1234         }
1235     }
1236 
1237     /* Calculate the I/O vector */
1238     acb->cur_cluster = offset;
1239     qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1240 
1241     /* Do the actual write */
1242     qed_aio_write_main(acb, 0);
1243 }
1244 
1245 /**
1246  * Write data cluster
1247  *
1248  * @opaque:     Write request
1249  * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1250  *              or -errno
1251  * @offset:     Cluster offset in bytes
1252  * @len:        Length in bytes
1253  *
1254  * Callback from qed_find_cluster().
1255  */
1256 static void qed_aio_write_data(void *opaque, int ret,
1257                                uint64_t offset, size_t len)
1258 {
1259     QEDAIOCB *acb = opaque;
1260 
1261     trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1262 
1263     acb->find_cluster_ret = ret;
1264 
1265     switch (ret) {
1266     case QED_CLUSTER_FOUND:
1267         qed_aio_write_inplace(acb, offset, len);
1268         break;
1269 
1270     case QED_CLUSTER_L2:
1271     case QED_CLUSTER_L1:
1272     case QED_CLUSTER_ZERO:
1273         qed_aio_write_alloc(acb, len);
1274         break;
1275 
1276     default:
1277         qed_aio_complete(acb, ret);
1278         break;
1279     }
1280 }
1281 
1282 /**
1283  * Read data cluster
1284  *
1285  * @opaque:     Read request
1286  * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1287  *              or -errno
1288  * @offset:     Cluster offset in bytes
1289  * @len:        Length in bytes
1290  *
1291  * Callback from qed_find_cluster().
1292  */
1293 static void qed_aio_read_data(void *opaque, int ret,
1294                               uint64_t offset, size_t len)
1295 {
1296     QEDAIOCB *acb = opaque;
1297     BDRVQEDState *s = acb_to_s(acb);
1298     BlockDriverState *bs = acb->common.bs;
1299 
1300     /* Adjust offset into cluster */
1301     offset += qed_offset_into_cluster(s, acb->cur_pos);
1302 
1303     trace_qed_aio_read_data(s, acb, ret, offset, len);
1304 
1305     if (ret < 0) {
1306         goto err;
1307     }
1308 
1309     qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1310 
1311     /* Handle zero cluster and backing file reads */
1312     if (ret == QED_CLUSTER_ZERO) {
1313         qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1314         qed_aio_next_io(acb, 0);
1315         return;
1316     } else if (ret != QED_CLUSTER_FOUND) {
1317         qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1318                               &acb->backing_qiov, qed_aio_next_io, acb);
1319         return;
1320     }
1321 
1322     BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1323     bdrv_aio_readv(bs->file->bs, offset / BDRV_SECTOR_SIZE,
1324                    &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1325                    qed_aio_next_io, acb);
1326     return;
1327 
1328 err:
1329     qed_aio_complete(acb, ret);
1330 }
1331 
1332 /**
1333  * Begin next I/O or complete the request
1334  */
1335 static void qed_aio_next_io(void *opaque, int ret)
1336 {
1337     QEDAIOCB *acb = opaque;
1338     BDRVQEDState *s = acb_to_s(acb);
1339     QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
1340                                 qed_aio_write_data : qed_aio_read_data;
1341 
1342     trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1343 
1344     if (acb->backing_qiov) {
1345         qemu_iovec_destroy(acb->backing_qiov);
1346         g_free(acb->backing_qiov);
1347         acb->backing_qiov = NULL;
1348     }
1349 
1350     /* Handle I/O error */
1351     if (ret) {
1352         qed_aio_complete(acb, ret);
1353         return;
1354     }
1355 
1356     acb->qiov_offset += acb->cur_qiov.size;
1357     acb->cur_pos += acb->cur_qiov.size;
1358     qemu_iovec_reset(&acb->cur_qiov);
1359 
1360     /* Complete request */
1361     if (acb->cur_pos >= acb->end_pos) {
1362         qed_aio_complete(acb, 0);
1363         return;
1364     }
1365 
1366     /* Find next cluster and start I/O */
1367     qed_find_cluster(s, &acb->request,
1368                       acb->cur_pos, acb->end_pos - acb->cur_pos,
1369                       io_fn, acb);
1370 }
1371 
1372 static BlockAIOCB *qed_aio_setup(BlockDriverState *bs,
1373                                  int64_t sector_num,
1374                                  QEMUIOVector *qiov, int nb_sectors,
1375                                  BlockCompletionFunc *cb,
1376                                  void *opaque, int flags)
1377 {
1378     QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
1379 
1380     trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1381                         opaque, flags);
1382 
1383     acb->flags = flags;
1384     acb->qiov = qiov;
1385     acb->qiov_offset = 0;
1386     acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1387     acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1388     acb->backing_qiov = NULL;
1389     acb->request.l2_table = NULL;
1390     qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1391 
1392     /* Start request */
1393     qed_aio_next_io(acb, 0);
1394     return &acb->common;
1395 }
1396 
1397 static BlockAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1398                                       int64_t sector_num,
1399                                       QEMUIOVector *qiov, int nb_sectors,
1400                                       BlockCompletionFunc *cb,
1401                                       void *opaque)
1402 {
1403     return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
1404 }
1405 
1406 static BlockAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1407                                        int64_t sector_num,
1408                                        QEMUIOVector *qiov, int nb_sectors,
1409                                        BlockCompletionFunc *cb,
1410                                        void *opaque)
1411 {
1412     return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
1413                          opaque, QED_AIOCB_WRITE);
1414 }
1415 
1416 typedef struct {
1417     Coroutine *co;
1418     int ret;
1419     bool done;
1420 } QEDWriteZeroesCB;
1421 
1422 static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret)
1423 {
1424     QEDWriteZeroesCB *cb = opaque;
1425 
1426     cb->done = true;
1427     cb->ret = ret;
1428     if (cb->co) {
1429         qemu_coroutine_enter(cb->co, NULL);
1430     }
1431 }
1432 
1433 static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs,
1434                                                  int64_t sector_num,
1435                                                  int nb_sectors,
1436                                                  BdrvRequestFlags flags)
1437 {
1438     BlockAIOCB *blockacb;
1439     BDRVQEDState *s = bs->opaque;
1440     QEDWriteZeroesCB cb = { .done = false };
1441     QEMUIOVector qiov;
1442     struct iovec iov;
1443 
1444     /* Refuse if there are untouched backing file sectors */
1445     if (bs->backing) {
1446         if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) {
1447             return -ENOTSUP;
1448         }
1449         if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) {
1450             return -ENOTSUP;
1451         }
1452     }
1453 
1454     /* Zero writes start without an I/O buffer.  If a buffer becomes necessary
1455      * then it will be allocated during request processing.
1456      */
1457     iov.iov_base = NULL,
1458     iov.iov_len  = nb_sectors * BDRV_SECTOR_SIZE,
1459 
1460     qemu_iovec_init_external(&qiov, &iov, 1);
1461     blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors,
1462                              qed_co_write_zeroes_cb, &cb,
1463                              QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1464     if (!blockacb) {
1465         return -EIO;
1466     }
1467     if (!cb.done) {
1468         cb.co = qemu_coroutine_self();
1469         qemu_coroutine_yield();
1470     }
1471     assert(cb.done);
1472     return cb.ret;
1473 }
1474 
1475 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1476 {
1477     BDRVQEDState *s = bs->opaque;
1478     uint64_t old_image_size;
1479     int ret;
1480 
1481     if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1482                                  s->header.table_size)) {
1483         return -EINVAL;
1484     }
1485 
1486     /* Shrinking is currently not supported */
1487     if ((uint64_t)offset < s->header.image_size) {
1488         return -ENOTSUP;
1489     }
1490 
1491     old_image_size = s->header.image_size;
1492     s->header.image_size = offset;
1493     ret = qed_write_header_sync(s);
1494     if (ret < 0) {
1495         s->header.image_size = old_image_size;
1496     }
1497     return ret;
1498 }
1499 
1500 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1501 {
1502     BDRVQEDState *s = bs->opaque;
1503     return s->header.image_size;
1504 }
1505 
1506 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1507 {
1508     BDRVQEDState *s = bs->opaque;
1509 
1510     memset(bdi, 0, sizeof(*bdi));
1511     bdi->cluster_size = s->header.cluster_size;
1512     bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1513     bdi->unallocated_blocks_are_zero = true;
1514     bdi->can_write_zeroes_with_unmap = true;
1515     return 0;
1516 }
1517 
1518 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1519                                         const char *backing_file,
1520                                         const char *backing_fmt)
1521 {
1522     BDRVQEDState *s = bs->opaque;
1523     QEDHeader new_header, le_header;
1524     void *buffer;
1525     size_t buffer_len, backing_file_len;
1526     int ret;
1527 
1528     /* Refuse to set backing filename if unknown compat feature bits are
1529      * active.  If the image uses an unknown compat feature then we may not
1530      * know the layout of data following the header structure and cannot safely
1531      * add a new string.
1532      */
1533     if (backing_file && (s->header.compat_features &
1534                          ~QED_COMPAT_FEATURE_MASK)) {
1535         return -ENOTSUP;
1536     }
1537 
1538     memcpy(&new_header, &s->header, sizeof(new_header));
1539 
1540     new_header.features &= ~(QED_F_BACKING_FILE |
1541                              QED_F_BACKING_FORMAT_NO_PROBE);
1542 
1543     /* Adjust feature flags */
1544     if (backing_file) {
1545         new_header.features |= QED_F_BACKING_FILE;
1546 
1547         if (qed_fmt_is_raw(backing_fmt)) {
1548             new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1549         }
1550     }
1551 
1552     /* Calculate new header size */
1553     backing_file_len = 0;
1554 
1555     if (backing_file) {
1556         backing_file_len = strlen(backing_file);
1557     }
1558 
1559     buffer_len = sizeof(new_header);
1560     new_header.backing_filename_offset = buffer_len;
1561     new_header.backing_filename_size = backing_file_len;
1562     buffer_len += backing_file_len;
1563 
1564     /* Make sure we can rewrite header without failing */
1565     if (buffer_len > new_header.header_size * new_header.cluster_size) {
1566         return -ENOSPC;
1567     }
1568 
1569     /* Prepare new header */
1570     buffer = g_malloc(buffer_len);
1571 
1572     qed_header_cpu_to_le(&new_header, &le_header);
1573     memcpy(buffer, &le_header, sizeof(le_header));
1574     buffer_len = sizeof(le_header);
1575 
1576     if (backing_file) {
1577         memcpy(buffer + buffer_len, backing_file, backing_file_len);
1578         buffer_len += backing_file_len;
1579     }
1580 
1581     /* Write new header */
1582     ret = bdrv_pwrite_sync(bs->file->bs, 0, buffer, buffer_len);
1583     g_free(buffer);
1584     if (ret == 0) {
1585         memcpy(&s->header, &new_header, sizeof(new_header));
1586     }
1587     return ret;
1588 }
1589 
1590 static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
1591 {
1592     BDRVQEDState *s = bs->opaque;
1593     Error *local_err = NULL;
1594     int ret;
1595 
1596     bdrv_qed_close(bs);
1597 
1598     bdrv_invalidate_cache(bs->file->bs, &local_err);
1599     if (local_err) {
1600         error_propagate(errp, local_err);
1601         return;
1602     }
1603 
1604     memset(s, 0, sizeof(BDRVQEDState));
1605     ret = bdrv_qed_open(bs, NULL, bs->open_flags, &local_err);
1606     if (local_err) {
1607         error_propagate(errp, local_err);
1608         error_prepend(errp, "Could not reopen qed layer: ");
1609         return;
1610     } else if (ret < 0) {
1611         error_setg_errno(errp, -ret, "Could not reopen qed layer");
1612         return;
1613     }
1614 }
1615 
1616 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1617                           BdrvCheckMode fix)
1618 {
1619     BDRVQEDState *s = bs->opaque;
1620 
1621     return qed_check(s, result, !!fix);
1622 }
1623 
1624 static QemuOptsList qed_create_opts = {
1625     .name = "qed-create-opts",
1626     .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1627     .desc = {
1628         {
1629             .name = BLOCK_OPT_SIZE,
1630             .type = QEMU_OPT_SIZE,
1631             .help = "Virtual disk size"
1632         },
1633         {
1634             .name = BLOCK_OPT_BACKING_FILE,
1635             .type = QEMU_OPT_STRING,
1636             .help = "File name of a base image"
1637         },
1638         {
1639             .name = BLOCK_OPT_BACKING_FMT,
1640             .type = QEMU_OPT_STRING,
1641             .help = "Image format of the base image"
1642         },
1643         {
1644             .name = BLOCK_OPT_CLUSTER_SIZE,
1645             .type = QEMU_OPT_SIZE,
1646             .help = "Cluster size (in bytes)",
1647             .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1648         },
1649         {
1650             .name = BLOCK_OPT_TABLE_SIZE,
1651             .type = QEMU_OPT_SIZE,
1652             .help = "L1/L2 table size (in clusters)"
1653         },
1654         { /* end of list */ }
1655     }
1656 };
1657 
1658 static BlockDriver bdrv_qed = {
1659     .format_name              = "qed",
1660     .instance_size            = sizeof(BDRVQEDState),
1661     .create_opts              = &qed_create_opts,
1662     .supports_backing         = true,
1663 
1664     .bdrv_probe               = bdrv_qed_probe,
1665     .bdrv_open                = bdrv_qed_open,
1666     .bdrv_close               = bdrv_qed_close,
1667     .bdrv_reopen_prepare      = bdrv_qed_reopen_prepare,
1668     .bdrv_create              = bdrv_qed_create,
1669     .bdrv_has_zero_init       = bdrv_has_zero_init_1,
1670     .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
1671     .bdrv_aio_readv           = bdrv_qed_aio_readv,
1672     .bdrv_aio_writev          = bdrv_qed_aio_writev,
1673     .bdrv_co_write_zeroes     = bdrv_qed_co_write_zeroes,
1674     .bdrv_truncate            = bdrv_qed_truncate,
1675     .bdrv_getlength           = bdrv_qed_getlength,
1676     .bdrv_get_info            = bdrv_qed_get_info,
1677     .bdrv_refresh_limits      = bdrv_qed_refresh_limits,
1678     .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1679     .bdrv_invalidate_cache    = bdrv_qed_invalidate_cache,
1680     .bdrv_check               = bdrv_qed_check,
1681     .bdrv_detach_aio_context  = bdrv_qed_detach_aio_context,
1682     .bdrv_attach_aio_context  = bdrv_qed_attach_aio_context,
1683 };
1684 
1685 static void bdrv_qed_init(void)
1686 {
1687     bdrv_register(&bdrv_qed);
1688 }
1689 
1690 block_init(bdrv_qed_init);
1691