xref: /openbmc/qemu/block/qed.c (revision 33577b47)
1 /*
2  * QEMU Enhanced Disk Format
3  *
4  * Copyright IBM, Corp. 2010
5  *
6  * Authors:
7  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *
10  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11  * See the COPYING.LIB file in the top-level directory.
12  *
13  */
14 
15 #include "qemu/osdep.h"
16 #include "qemu/timer.h"
17 #include "trace.h"
18 #include "qed.h"
19 #include "qapi/qmp/qerror.h"
20 #include "migration/migration.h"
21 #include "sysemu/block-backend.h"
22 
23 static const AIOCBInfo qed_aiocb_info = {
24     .aiocb_size         = sizeof(QEDAIOCB),
25 };
26 
27 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
28                           const char *filename)
29 {
30     const QEDHeader *header = (const QEDHeader *)buf;
31 
32     if (buf_size < sizeof(*header)) {
33         return 0;
34     }
35     if (le32_to_cpu(header->magic) != QED_MAGIC) {
36         return 0;
37     }
38     return 100;
39 }
40 
41 /**
42  * Check whether an image format is raw
43  *
44  * @fmt:    Backing file format, may be NULL
45  */
46 static bool qed_fmt_is_raw(const char *fmt)
47 {
48     return fmt && strcmp(fmt, "raw") == 0;
49 }
50 
51 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
52 {
53     cpu->magic = le32_to_cpu(le->magic);
54     cpu->cluster_size = le32_to_cpu(le->cluster_size);
55     cpu->table_size = le32_to_cpu(le->table_size);
56     cpu->header_size = le32_to_cpu(le->header_size);
57     cpu->features = le64_to_cpu(le->features);
58     cpu->compat_features = le64_to_cpu(le->compat_features);
59     cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
60     cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
61     cpu->image_size = le64_to_cpu(le->image_size);
62     cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
63     cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
64 }
65 
66 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
67 {
68     le->magic = cpu_to_le32(cpu->magic);
69     le->cluster_size = cpu_to_le32(cpu->cluster_size);
70     le->table_size = cpu_to_le32(cpu->table_size);
71     le->header_size = cpu_to_le32(cpu->header_size);
72     le->features = cpu_to_le64(cpu->features);
73     le->compat_features = cpu_to_le64(cpu->compat_features);
74     le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
75     le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
76     le->image_size = cpu_to_le64(cpu->image_size);
77     le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
78     le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
79 }
80 
81 int qed_write_header_sync(BDRVQEDState *s)
82 {
83     QEDHeader le;
84     int ret;
85 
86     qed_header_cpu_to_le(&s->header, &le);
87     ret = bdrv_pwrite(s->bs->file->bs, 0, &le, sizeof(le));
88     if (ret != sizeof(le)) {
89         return ret;
90     }
91     return 0;
92 }
93 
94 typedef struct {
95     GenericCB gencb;
96     BDRVQEDState *s;
97     struct iovec iov;
98     QEMUIOVector qiov;
99     int nsectors;
100     uint8_t *buf;
101 } QEDWriteHeaderCB;
102 
103 static void qed_write_header_cb(void *opaque, int ret)
104 {
105     QEDWriteHeaderCB *write_header_cb = opaque;
106 
107     qemu_vfree(write_header_cb->buf);
108     gencb_complete(write_header_cb, ret);
109 }
110 
111 static void qed_write_header_read_cb(void *opaque, int ret)
112 {
113     QEDWriteHeaderCB *write_header_cb = opaque;
114     BDRVQEDState *s = write_header_cb->s;
115 
116     if (ret) {
117         qed_write_header_cb(write_header_cb, ret);
118         return;
119     }
120 
121     /* Update header */
122     qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
123 
124     bdrv_aio_writev(s->bs->file->bs, 0, &write_header_cb->qiov,
125                     write_header_cb->nsectors, qed_write_header_cb,
126                     write_header_cb);
127 }
128 
129 /**
130  * Update header in-place (does not rewrite backing filename or other strings)
131  *
132  * This function only updates known header fields in-place and does not affect
133  * extra data after the QED header.
134  */
135 static void qed_write_header(BDRVQEDState *s, BlockCompletionFunc cb,
136                              void *opaque)
137 {
138     /* We must write full sectors for O_DIRECT but cannot necessarily generate
139      * the data following the header if an unrecognized compat feature is
140      * active.  Therefore, first read the sectors containing the header, update
141      * them, and write back.
142      */
143 
144     int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
145                    BDRV_SECTOR_SIZE;
146     size_t len = nsectors * BDRV_SECTOR_SIZE;
147     QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
148                                                     cb, opaque);
149 
150     write_header_cb->s = s;
151     write_header_cb->nsectors = nsectors;
152     write_header_cb->buf = qemu_blockalign(s->bs, len);
153     write_header_cb->iov.iov_base = write_header_cb->buf;
154     write_header_cb->iov.iov_len = len;
155     qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
156 
157     bdrv_aio_readv(s->bs->file->bs, 0, &write_header_cb->qiov, nsectors,
158                    qed_write_header_read_cb, write_header_cb);
159 }
160 
161 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
162 {
163     uint64_t table_entries;
164     uint64_t l2_size;
165 
166     table_entries = (table_size * cluster_size) / sizeof(uint64_t);
167     l2_size = table_entries * cluster_size;
168 
169     return l2_size * table_entries;
170 }
171 
172 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
173 {
174     if (cluster_size < QED_MIN_CLUSTER_SIZE ||
175         cluster_size > QED_MAX_CLUSTER_SIZE) {
176         return false;
177     }
178     if (cluster_size & (cluster_size - 1)) {
179         return false; /* not power of 2 */
180     }
181     return true;
182 }
183 
184 static bool qed_is_table_size_valid(uint32_t table_size)
185 {
186     if (table_size < QED_MIN_TABLE_SIZE ||
187         table_size > QED_MAX_TABLE_SIZE) {
188         return false;
189     }
190     if (table_size & (table_size - 1)) {
191         return false; /* not power of 2 */
192     }
193     return true;
194 }
195 
196 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
197                                     uint32_t table_size)
198 {
199     if (image_size % BDRV_SECTOR_SIZE != 0) {
200         return false; /* not multiple of sector size */
201     }
202     if (image_size > qed_max_image_size(cluster_size, table_size)) {
203         return false; /* image is too large */
204     }
205     return true;
206 }
207 
208 /**
209  * Read a string of known length from the image file
210  *
211  * @file:       Image file
212  * @offset:     File offset to start of string, in bytes
213  * @n:          String length in bytes
214  * @buf:        Destination buffer
215  * @buflen:     Destination buffer length in bytes
216  * @ret:        0 on success, -errno on failure
217  *
218  * The string is NUL-terminated.
219  */
220 static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
221                            char *buf, size_t buflen)
222 {
223     int ret;
224     if (n >= buflen) {
225         return -EINVAL;
226     }
227     ret = bdrv_pread(file, offset, buf, n);
228     if (ret < 0) {
229         return ret;
230     }
231     buf[n] = '\0';
232     return 0;
233 }
234 
235 /**
236  * Allocate new clusters
237  *
238  * @s:          QED state
239  * @n:          Number of contiguous clusters to allocate
240  * @ret:        Offset of first allocated cluster
241  *
242  * This function only produces the offset where the new clusters should be
243  * written.  It updates BDRVQEDState but does not make any changes to the image
244  * file.
245  */
246 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
247 {
248     uint64_t offset = s->file_size;
249     s->file_size += n * s->header.cluster_size;
250     return offset;
251 }
252 
253 QEDTable *qed_alloc_table(BDRVQEDState *s)
254 {
255     /* Honor O_DIRECT memory alignment requirements */
256     return qemu_blockalign(s->bs,
257                            s->header.cluster_size * s->header.table_size);
258 }
259 
260 /**
261  * Allocate a new zeroed L2 table
262  */
263 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
264 {
265     CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
266 
267     l2_table->table = qed_alloc_table(s);
268     l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
269 
270     memset(l2_table->table->offsets, 0,
271            s->header.cluster_size * s->header.table_size);
272     return l2_table;
273 }
274 
275 static void qed_aio_next_io(void *opaque, int ret);
276 
277 static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
278 {
279     assert(!s->allocating_write_reqs_plugged);
280 
281     s->allocating_write_reqs_plugged = true;
282 }
283 
284 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
285 {
286     QEDAIOCB *acb;
287 
288     assert(s->allocating_write_reqs_plugged);
289 
290     s->allocating_write_reqs_plugged = false;
291 
292     acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
293     if (acb) {
294         qed_aio_next_io(acb, 0);
295     }
296 }
297 
298 static void qed_finish_clear_need_check(void *opaque, int ret)
299 {
300     /* Do nothing */
301 }
302 
303 static void qed_flush_after_clear_need_check(void *opaque, int ret)
304 {
305     BDRVQEDState *s = opaque;
306 
307     bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
308 
309     /* No need to wait until flush completes */
310     qed_unplug_allocating_write_reqs(s);
311 }
312 
313 static void qed_clear_need_check(void *opaque, int ret)
314 {
315     BDRVQEDState *s = opaque;
316 
317     if (ret) {
318         qed_unplug_allocating_write_reqs(s);
319         return;
320     }
321 
322     s->header.features &= ~QED_F_NEED_CHECK;
323     qed_write_header(s, qed_flush_after_clear_need_check, s);
324 }
325 
326 static void qed_need_check_timer_cb(void *opaque)
327 {
328     BDRVQEDState *s = opaque;
329 
330     /* The timer should only fire when allocating writes have drained */
331     assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
332 
333     trace_qed_need_check_timer_cb(s);
334 
335     qed_plug_allocating_write_reqs(s);
336 
337     /* Ensure writes are on disk before clearing flag */
338     bdrv_aio_flush(s->bs, qed_clear_need_check, s);
339 }
340 
341 static void qed_start_need_check_timer(BDRVQEDState *s)
342 {
343     trace_qed_start_need_check_timer(s);
344 
345     /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
346      * migration.
347      */
348     timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
349                    get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT);
350 }
351 
352 /* It's okay to call this multiple times or when no timer is started */
353 static void qed_cancel_need_check_timer(BDRVQEDState *s)
354 {
355     trace_qed_cancel_need_check_timer(s);
356     timer_del(s->need_check_timer);
357 }
358 
359 static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
360 {
361     BDRVQEDState *s = bs->opaque;
362 
363     qed_cancel_need_check_timer(s);
364     timer_free(s->need_check_timer);
365 }
366 
367 static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
368                                         AioContext *new_context)
369 {
370     BDRVQEDState *s = bs->opaque;
371 
372     s->need_check_timer = aio_timer_new(new_context,
373                                         QEMU_CLOCK_VIRTUAL, SCALE_NS,
374                                         qed_need_check_timer_cb, s);
375     if (s->header.features & QED_F_NEED_CHECK) {
376         qed_start_need_check_timer(s);
377     }
378 }
379 
380 static void bdrv_qed_drain(BlockDriverState *bs)
381 {
382     BDRVQEDState *s = bs->opaque;
383 
384     /* Cancel timer and start doing I/O that were meant to happen as if it
385      * fired, that way we get bdrv_drain() taking care of the ongoing requests
386      * correctly. */
387     qed_cancel_need_check_timer(s);
388     qed_plug_allocating_write_reqs(s);
389     bdrv_aio_flush(s->bs, qed_clear_need_check, s);
390 }
391 
392 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
393                          Error **errp)
394 {
395     BDRVQEDState *s = bs->opaque;
396     QEDHeader le_header;
397     int64_t file_size;
398     int ret;
399 
400     s->bs = bs;
401     QSIMPLEQ_INIT(&s->allocating_write_reqs);
402 
403     ret = bdrv_pread(bs->file->bs, 0, &le_header, sizeof(le_header));
404     if (ret < 0) {
405         return ret;
406     }
407     qed_header_le_to_cpu(&le_header, &s->header);
408 
409     if (s->header.magic != QED_MAGIC) {
410         error_setg(errp, "Image not in QED format");
411         return -EINVAL;
412     }
413     if (s->header.features & ~QED_FEATURE_MASK) {
414         /* image uses unsupported feature bits */
415         char buf[64];
416         snprintf(buf, sizeof(buf), "%" PRIx64,
417             s->header.features & ~QED_FEATURE_MASK);
418         error_setg(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
419                    bdrv_get_device_or_node_name(bs), "QED", buf);
420         return -ENOTSUP;
421     }
422     if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
423         return -EINVAL;
424     }
425 
426     /* Round down file size to the last cluster */
427     file_size = bdrv_getlength(bs->file->bs);
428     if (file_size < 0) {
429         return file_size;
430     }
431     s->file_size = qed_start_of_cluster(s, file_size);
432 
433     if (!qed_is_table_size_valid(s->header.table_size)) {
434         return -EINVAL;
435     }
436     if (!qed_is_image_size_valid(s->header.image_size,
437                                  s->header.cluster_size,
438                                  s->header.table_size)) {
439         return -EINVAL;
440     }
441     if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
442         return -EINVAL;
443     }
444 
445     s->table_nelems = (s->header.cluster_size * s->header.table_size) /
446                       sizeof(uint64_t);
447     s->l2_shift = ctz32(s->header.cluster_size);
448     s->l2_mask = s->table_nelems - 1;
449     s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
450 
451     /* Header size calculation must not overflow uint32_t */
452     if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
453         return -EINVAL;
454     }
455 
456     if ((s->header.features & QED_F_BACKING_FILE)) {
457         if ((uint64_t)s->header.backing_filename_offset +
458             s->header.backing_filename_size >
459             s->header.cluster_size * s->header.header_size) {
460             return -EINVAL;
461         }
462 
463         ret = qed_read_string(bs->file->bs, s->header.backing_filename_offset,
464                               s->header.backing_filename_size, bs->backing_file,
465                               sizeof(bs->backing_file));
466         if (ret < 0) {
467             return ret;
468         }
469 
470         if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
471             pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
472         }
473     }
474 
475     /* Reset unknown autoclear feature bits.  This is a backwards
476      * compatibility mechanism that allows images to be opened by older
477      * programs, which "knock out" unknown feature bits.  When an image is
478      * opened by a newer program again it can detect that the autoclear
479      * feature is no longer valid.
480      */
481     if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
482         !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
483         s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
484 
485         ret = qed_write_header_sync(s);
486         if (ret) {
487             return ret;
488         }
489 
490         /* From here on only known autoclear feature bits are valid */
491         bdrv_flush(bs->file->bs);
492     }
493 
494     s->l1_table = qed_alloc_table(s);
495     qed_init_l2_cache(&s->l2_cache);
496 
497     ret = qed_read_l1_table_sync(s);
498     if (ret) {
499         goto out;
500     }
501 
502     /* If image was not closed cleanly, check consistency */
503     if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
504         /* Read-only images cannot be fixed.  There is no risk of corruption
505          * since write operations are not possible.  Therefore, allow
506          * potentially inconsistent images to be opened read-only.  This can
507          * aid data recovery from an otherwise inconsistent image.
508          */
509         if (!bdrv_is_read_only(bs->file->bs) &&
510             !(flags & BDRV_O_INACTIVE)) {
511             BdrvCheckResult result = {0};
512 
513             ret = qed_check(s, &result, true);
514             if (ret) {
515                 goto out;
516             }
517         }
518     }
519 
520     bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
521 
522 out:
523     if (ret) {
524         qed_free_l2_cache(&s->l2_cache);
525         qemu_vfree(s->l1_table);
526     }
527     return ret;
528 }
529 
530 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
531 {
532     BDRVQEDState *s = bs->opaque;
533 
534     bs->bl.write_zeroes_alignment = s->header.cluster_size >> BDRV_SECTOR_BITS;
535 }
536 
537 /* We have nothing to do for QED reopen, stubs just return
538  * success */
539 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
540                                    BlockReopenQueue *queue, Error **errp)
541 {
542     return 0;
543 }
544 
545 static void bdrv_qed_close(BlockDriverState *bs)
546 {
547     BDRVQEDState *s = bs->opaque;
548 
549     bdrv_qed_detach_aio_context(bs);
550 
551     /* Ensure writes reach stable storage */
552     bdrv_flush(bs->file->bs);
553 
554     /* Clean shutdown, no check required on next open */
555     if (s->header.features & QED_F_NEED_CHECK) {
556         s->header.features &= ~QED_F_NEED_CHECK;
557         qed_write_header_sync(s);
558     }
559 
560     qed_free_l2_cache(&s->l2_cache);
561     qemu_vfree(s->l1_table);
562 }
563 
564 static int qed_create(const char *filename, uint32_t cluster_size,
565                       uint64_t image_size, uint32_t table_size,
566                       const char *backing_file, const char *backing_fmt,
567                       QemuOpts *opts, Error **errp)
568 {
569     QEDHeader header = {
570         .magic = QED_MAGIC,
571         .cluster_size = cluster_size,
572         .table_size = table_size,
573         .header_size = 1,
574         .features = 0,
575         .compat_features = 0,
576         .l1_table_offset = cluster_size,
577         .image_size = image_size,
578     };
579     QEDHeader le_header;
580     uint8_t *l1_table = NULL;
581     size_t l1_size = header.cluster_size * header.table_size;
582     Error *local_err = NULL;
583     int ret = 0;
584     BlockBackend *blk;
585 
586     ret = bdrv_create_file(filename, opts, &local_err);
587     if (ret < 0) {
588         error_propagate(errp, local_err);
589         return ret;
590     }
591 
592     blk = blk_new_open("image", filename, NULL, NULL,
593                        BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_PROTOCOL,
594                        &local_err);
595     if (blk == NULL) {
596         error_propagate(errp, local_err);
597         return -EIO;
598     }
599 
600     blk_set_allow_write_beyond_eof(blk, true);
601 
602     /* File must start empty and grow, check truncate is supported */
603     ret = blk_truncate(blk, 0);
604     if (ret < 0) {
605         goto out;
606     }
607 
608     if (backing_file) {
609         header.features |= QED_F_BACKING_FILE;
610         header.backing_filename_offset = sizeof(le_header);
611         header.backing_filename_size = strlen(backing_file);
612 
613         if (qed_fmt_is_raw(backing_fmt)) {
614             header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
615         }
616     }
617 
618     qed_header_cpu_to_le(&header, &le_header);
619     ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header));
620     if (ret < 0) {
621         goto out;
622     }
623     ret = blk_pwrite(blk, sizeof(le_header), backing_file,
624                      header.backing_filename_size);
625     if (ret < 0) {
626         goto out;
627     }
628 
629     l1_table = g_malloc0(l1_size);
630     ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size);
631     if (ret < 0) {
632         goto out;
633     }
634 
635     ret = 0; /* success */
636 out:
637     g_free(l1_table);
638     blk_unref(blk);
639     return ret;
640 }
641 
642 static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp)
643 {
644     uint64_t image_size = 0;
645     uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
646     uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
647     char *backing_file = NULL;
648     char *backing_fmt = NULL;
649     int ret;
650 
651     image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
652                           BDRV_SECTOR_SIZE);
653     backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
654     backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
655     cluster_size = qemu_opt_get_size_del(opts,
656                                          BLOCK_OPT_CLUSTER_SIZE,
657                                          QED_DEFAULT_CLUSTER_SIZE);
658     table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE,
659                                        QED_DEFAULT_TABLE_SIZE);
660 
661     if (!qed_is_cluster_size_valid(cluster_size)) {
662         error_setg(errp, "QED cluster size must be within range [%u, %u] "
663                          "and power of 2",
664                    QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
665         ret = -EINVAL;
666         goto finish;
667     }
668     if (!qed_is_table_size_valid(table_size)) {
669         error_setg(errp, "QED table size must be within range [%u, %u] "
670                          "and power of 2",
671                    QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
672         ret = -EINVAL;
673         goto finish;
674     }
675     if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
676         error_setg(errp, "QED image size must be a non-zero multiple of "
677                          "cluster size and less than %" PRIu64 " bytes",
678                    qed_max_image_size(cluster_size, table_size));
679         ret = -EINVAL;
680         goto finish;
681     }
682 
683     ret = qed_create(filename, cluster_size, image_size, table_size,
684                      backing_file, backing_fmt, opts, errp);
685 
686 finish:
687     g_free(backing_file);
688     g_free(backing_fmt);
689     return ret;
690 }
691 
692 typedef struct {
693     BlockDriverState *bs;
694     Coroutine *co;
695     uint64_t pos;
696     int64_t status;
697     int *pnum;
698     BlockDriverState **file;
699 } QEDIsAllocatedCB;
700 
701 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
702 {
703     QEDIsAllocatedCB *cb = opaque;
704     BDRVQEDState *s = cb->bs->opaque;
705     *cb->pnum = len / BDRV_SECTOR_SIZE;
706     switch (ret) {
707     case QED_CLUSTER_FOUND:
708         offset |= qed_offset_into_cluster(s, cb->pos);
709         cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
710         *cb->file = cb->bs->file->bs;
711         break;
712     case QED_CLUSTER_ZERO:
713         cb->status = BDRV_BLOCK_ZERO;
714         break;
715     case QED_CLUSTER_L2:
716     case QED_CLUSTER_L1:
717         cb->status = 0;
718         break;
719     default:
720         assert(ret < 0);
721         cb->status = ret;
722         break;
723     }
724 
725     if (cb->co) {
726         qemu_coroutine_enter(cb->co, NULL);
727     }
728 }
729 
730 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
731                                                  int64_t sector_num,
732                                                  int nb_sectors, int *pnum,
733                                                  BlockDriverState **file)
734 {
735     BDRVQEDState *s = bs->opaque;
736     size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
737     QEDIsAllocatedCB cb = {
738         .bs = bs,
739         .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
740         .status = BDRV_BLOCK_OFFSET_MASK,
741         .pnum = pnum,
742         .file = file,
743     };
744     QEDRequest request = { .l2_table = NULL };
745 
746     qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb);
747 
748     /* Now sleep if the callback wasn't invoked immediately */
749     while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
750         cb.co = qemu_coroutine_self();
751         qemu_coroutine_yield();
752     }
753 
754     qed_unref_l2_cache_entry(request.l2_table);
755 
756     return cb.status;
757 }
758 
759 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
760 {
761     return acb->common.bs->opaque;
762 }
763 
764 /**
765  * Read from the backing file or zero-fill if no backing file
766  *
767  * @s:              QED state
768  * @pos:            Byte position in device
769  * @qiov:           Destination I/O vector
770  * @backing_qiov:   Possibly shortened copy of qiov, to be allocated here
771  * @cb:             Completion function
772  * @opaque:         User data for completion function
773  *
774  * This function reads qiov->size bytes starting at pos from the backing file.
775  * If there is no backing file then zeroes are read.
776  */
777 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
778                                   QEMUIOVector *qiov,
779                                   QEMUIOVector **backing_qiov,
780                                   BlockCompletionFunc *cb, void *opaque)
781 {
782     uint64_t backing_length = 0;
783     size_t size;
784 
785     /* If there is a backing file, get its length.  Treat the absence of a
786      * backing file like a zero length backing file.
787      */
788     if (s->bs->backing) {
789         int64_t l = bdrv_getlength(s->bs->backing->bs);
790         if (l < 0) {
791             cb(opaque, l);
792             return;
793         }
794         backing_length = l;
795     }
796 
797     /* Zero all sectors if reading beyond the end of the backing file */
798     if (pos >= backing_length ||
799         pos + qiov->size > backing_length) {
800         qemu_iovec_memset(qiov, 0, 0, qiov->size);
801     }
802 
803     /* Complete now if there are no backing file sectors to read */
804     if (pos >= backing_length) {
805         cb(opaque, 0);
806         return;
807     }
808 
809     /* If the read straddles the end of the backing file, shorten it */
810     size = MIN((uint64_t)backing_length - pos, qiov->size);
811 
812     assert(*backing_qiov == NULL);
813     *backing_qiov = g_new(QEMUIOVector, 1);
814     qemu_iovec_init(*backing_qiov, qiov->niov);
815     qemu_iovec_concat(*backing_qiov, qiov, 0, size);
816 
817     BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
818     bdrv_aio_readv(s->bs->backing->bs, pos / BDRV_SECTOR_SIZE,
819                    *backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
820 }
821 
822 typedef struct {
823     GenericCB gencb;
824     BDRVQEDState *s;
825     QEMUIOVector qiov;
826     QEMUIOVector *backing_qiov;
827     struct iovec iov;
828     uint64_t offset;
829 } CopyFromBackingFileCB;
830 
831 static void qed_copy_from_backing_file_cb(void *opaque, int ret)
832 {
833     CopyFromBackingFileCB *copy_cb = opaque;
834     qemu_vfree(copy_cb->iov.iov_base);
835     gencb_complete(&copy_cb->gencb, ret);
836 }
837 
838 static void qed_copy_from_backing_file_write(void *opaque, int ret)
839 {
840     CopyFromBackingFileCB *copy_cb = opaque;
841     BDRVQEDState *s = copy_cb->s;
842 
843     if (copy_cb->backing_qiov) {
844         qemu_iovec_destroy(copy_cb->backing_qiov);
845         g_free(copy_cb->backing_qiov);
846         copy_cb->backing_qiov = NULL;
847     }
848 
849     if (ret) {
850         qed_copy_from_backing_file_cb(copy_cb, ret);
851         return;
852     }
853 
854     BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
855     bdrv_aio_writev(s->bs->file->bs, copy_cb->offset / BDRV_SECTOR_SIZE,
856                     &copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
857                     qed_copy_from_backing_file_cb, copy_cb);
858 }
859 
860 /**
861  * Copy data from backing file into the image
862  *
863  * @s:          QED state
864  * @pos:        Byte position in device
865  * @len:        Number of bytes
866  * @offset:     Byte offset in image file
867  * @cb:         Completion function
868  * @opaque:     User data for completion function
869  */
870 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
871                                        uint64_t len, uint64_t offset,
872                                        BlockCompletionFunc *cb,
873                                        void *opaque)
874 {
875     CopyFromBackingFileCB *copy_cb;
876 
877     /* Skip copy entirely if there is no work to do */
878     if (len == 0) {
879         cb(opaque, 0);
880         return;
881     }
882 
883     copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
884     copy_cb->s = s;
885     copy_cb->offset = offset;
886     copy_cb->backing_qiov = NULL;
887     copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
888     copy_cb->iov.iov_len = len;
889     qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);
890 
891     qed_read_backing_file(s, pos, &copy_cb->qiov, &copy_cb->backing_qiov,
892                           qed_copy_from_backing_file_write, copy_cb);
893 }
894 
895 /**
896  * Link one or more contiguous clusters into a table
897  *
898  * @s:              QED state
899  * @table:          L2 table
900  * @index:          First cluster index
901  * @n:              Number of contiguous clusters
902  * @cluster:        First cluster offset
903  *
904  * The cluster offset may be an allocated byte offset in the image file, the
905  * zero cluster marker, or the unallocated cluster marker.
906  */
907 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
908                                 unsigned int n, uint64_t cluster)
909 {
910     int i;
911     for (i = index; i < index + n; i++) {
912         table->offsets[i] = cluster;
913         if (!qed_offset_is_unalloc_cluster(cluster) &&
914             !qed_offset_is_zero_cluster(cluster)) {
915             cluster += s->header.cluster_size;
916         }
917     }
918 }
919 
920 static void qed_aio_complete_bh(void *opaque)
921 {
922     QEDAIOCB *acb = opaque;
923     BlockCompletionFunc *cb = acb->common.cb;
924     void *user_opaque = acb->common.opaque;
925     int ret = acb->bh_ret;
926 
927     qemu_bh_delete(acb->bh);
928     qemu_aio_unref(acb);
929 
930     /* Invoke callback */
931     cb(user_opaque, ret);
932 }
933 
934 static void qed_aio_complete(QEDAIOCB *acb, int ret)
935 {
936     BDRVQEDState *s = acb_to_s(acb);
937 
938     trace_qed_aio_complete(s, acb, ret);
939 
940     /* Free resources */
941     qemu_iovec_destroy(&acb->cur_qiov);
942     qed_unref_l2_cache_entry(acb->request.l2_table);
943 
944     /* Free the buffer we may have allocated for zero writes */
945     if (acb->flags & QED_AIOCB_ZERO) {
946         qemu_vfree(acb->qiov->iov[0].iov_base);
947         acb->qiov->iov[0].iov_base = NULL;
948     }
949 
950     /* Arrange for a bh to invoke the completion function */
951     acb->bh_ret = ret;
952     acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs),
953                          qed_aio_complete_bh, acb);
954     qemu_bh_schedule(acb->bh);
955 
956     /* Start next allocating write request waiting behind this one.  Note that
957      * requests enqueue themselves when they first hit an unallocated cluster
958      * but they wait until the entire request is finished before waking up the
959      * next request in the queue.  This ensures that we don't cycle through
960      * requests multiple times but rather finish one at a time completely.
961      */
962     if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
963         QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
964         acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
965         if (acb) {
966             qed_aio_next_io(acb, 0);
967         } else if (s->header.features & QED_F_NEED_CHECK) {
968             qed_start_need_check_timer(s);
969         }
970     }
971 }
972 
973 /**
974  * Commit the current L2 table to the cache
975  */
976 static void qed_commit_l2_update(void *opaque, int ret)
977 {
978     QEDAIOCB *acb = opaque;
979     BDRVQEDState *s = acb_to_s(acb);
980     CachedL2Table *l2_table = acb->request.l2_table;
981     uint64_t l2_offset = l2_table->offset;
982 
983     qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
984 
985     /* This is guaranteed to succeed because we just committed the entry to the
986      * cache.
987      */
988     acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
989     assert(acb->request.l2_table != NULL);
990 
991     qed_aio_next_io(opaque, ret);
992 }
993 
994 /**
995  * Update L1 table with new L2 table offset and write it out
996  */
997 static void qed_aio_write_l1_update(void *opaque, int ret)
998 {
999     QEDAIOCB *acb = opaque;
1000     BDRVQEDState *s = acb_to_s(acb);
1001     int index;
1002 
1003     if (ret) {
1004         qed_aio_complete(acb, ret);
1005         return;
1006     }
1007 
1008     index = qed_l1_index(s, acb->cur_pos);
1009     s->l1_table->offsets[index] = acb->request.l2_table->offset;
1010 
1011     qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
1012 }
1013 
1014 /**
1015  * Update L2 table with new cluster offsets and write them out
1016  */
1017 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
1018 {
1019     BDRVQEDState *s = acb_to_s(acb);
1020     bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
1021     int index;
1022 
1023     if (ret) {
1024         goto err;
1025     }
1026 
1027     if (need_alloc) {
1028         qed_unref_l2_cache_entry(acb->request.l2_table);
1029         acb->request.l2_table = qed_new_l2_table(s);
1030     }
1031 
1032     index = qed_l2_index(s, acb->cur_pos);
1033     qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
1034                          offset);
1035 
1036     if (need_alloc) {
1037         /* Write out the whole new L2 table */
1038         qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
1039                             qed_aio_write_l1_update, acb);
1040     } else {
1041         /* Write out only the updated part of the L2 table */
1042         qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
1043                             qed_aio_next_io, acb);
1044     }
1045     return;
1046 
1047 err:
1048     qed_aio_complete(acb, ret);
1049 }
1050 
1051 static void qed_aio_write_l2_update_cb(void *opaque, int ret)
1052 {
1053     QEDAIOCB *acb = opaque;
1054     qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
1055 }
1056 
1057 /**
1058  * Flush new data clusters before updating the L2 table
1059  *
1060  * This flush is necessary when a backing file is in use.  A crash during an
1061  * allocating write could result in empty clusters in the image.  If the write
1062  * only touched a subregion of the cluster, then backing image sectors have
1063  * been lost in the untouched region.  The solution is to flush after writing a
1064  * new data cluster and before updating the L2 table.
1065  */
1066 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
1067 {
1068     QEDAIOCB *acb = opaque;
1069     BDRVQEDState *s = acb_to_s(acb);
1070 
1071     if (!bdrv_aio_flush(s->bs->file->bs, qed_aio_write_l2_update_cb, opaque)) {
1072         qed_aio_complete(acb, -EIO);
1073     }
1074 }
1075 
1076 /**
1077  * Write data to the image file
1078  */
1079 static void qed_aio_write_main(void *opaque, int ret)
1080 {
1081     QEDAIOCB *acb = opaque;
1082     BDRVQEDState *s = acb_to_s(acb);
1083     uint64_t offset = acb->cur_cluster +
1084                       qed_offset_into_cluster(s, acb->cur_pos);
1085     BlockCompletionFunc *next_fn;
1086 
1087     trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1088 
1089     if (ret) {
1090         qed_aio_complete(acb, ret);
1091         return;
1092     }
1093 
1094     if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1095         next_fn = qed_aio_next_io;
1096     } else {
1097         if (s->bs->backing) {
1098             next_fn = qed_aio_write_flush_before_l2_update;
1099         } else {
1100             next_fn = qed_aio_write_l2_update_cb;
1101         }
1102     }
1103 
1104     BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1105     bdrv_aio_writev(s->bs->file->bs, offset / BDRV_SECTOR_SIZE,
1106                     &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1107                     next_fn, acb);
1108 }
1109 
1110 /**
1111  * Populate back untouched region of new data cluster
1112  */
1113 static void qed_aio_write_postfill(void *opaque, int ret)
1114 {
1115     QEDAIOCB *acb = opaque;
1116     BDRVQEDState *s = acb_to_s(acb);
1117     uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1118     uint64_t len =
1119         qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1120     uint64_t offset = acb->cur_cluster +
1121                       qed_offset_into_cluster(s, acb->cur_pos) +
1122                       acb->cur_qiov.size;
1123 
1124     if (ret) {
1125         qed_aio_complete(acb, ret);
1126         return;
1127     }
1128 
1129     trace_qed_aio_write_postfill(s, acb, start, len, offset);
1130     qed_copy_from_backing_file(s, start, len, offset,
1131                                 qed_aio_write_main, acb);
1132 }
1133 
1134 /**
1135  * Populate front untouched region of new data cluster
1136  */
1137 static void qed_aio_write_prefill(void *opaque, int ret)
1138 {
1139     QEDAIOCB *acb = opaque;
1140     BDRVQEDState *s = acb_to_s(acb);
1141     uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1142     uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1143 
1144     trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1145     qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1146                                 qed_aio_write_postfill, acb);
1147 }
1148 
1149 /**
1150  * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1151  */
1152 static bool qed_should_set_need_check(BDRVQEDState *s)
1153 {
1154     /* The flush before L2 update path ensures consistency */
1155     if (s->bs->backing) {
1156         return false;
1157     }
1158 
1159     return !(s->header.features & QED_F_NEED_CHECK);
1160 }
1161 
1162 static void qed_aio_write_zero_cluster(void *opaque, int ret)
1163 {
1164     QEDAIOCB *acb = opaque;
1165 
1166     if (ret) {
1167         qed_aio_complete(acb, ret);
1168         return;
1169     }
1170 
1171     qed_aio_write_l2_update(acb, 0, 1);
1172 }
1173 
1174 /**
1175  * Write new data cluster
1176  *
1177  * @acb:        Write request
1178  * @len:        Length in bytes
1179  *
1180  * This path is taken when writing to previously unallocated clusters.
1181  */
1182 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1183 {
1184     BDRVQEDState *s = acb_to_s(acb);
1185     BlockCompletionFunc *cb;
1186 
1187     /* Cancel timer when the first allocating request comes in */
1188     if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1189         qed_cancel_need_check_timer(s);
1190     }
1191 
1192     /* Freeze this request if another allocating write is in progress */
1193     if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1194         QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1195     }
1196     if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1197         s->allocating_write_reqs_plugged) {
1198         return; /* wait for existing request to finish */
1199     }
1200 
1201     acb->cur_nclusters = qed_bytes_to_clusters(s,
1202             qed_offset_into_cluster(s, acb->cur_pos) + len);
1203     qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1204 
1205     if (acb->flags & QED_AIOCB_ZERO) {
1206         /* Skip ahead if the clusters are already zero */
1207         if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1208             qed_aio_next_io(acb, 0);
1209             return;
1210         }
1211 
1212         cb = qed_aio_write_zero_cluster;
1213     } else {
1214         cb = qed_aio_write_prefill;
1215         acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1216     }
1217 
1218     if (qed_should_set_need_check(s)) {
1219         s->header.features |= QED_F_NEED_CHECK;
1220         qed_write_header(s, cb, acb);
1221     } else {
1222         cb(acb, 0);
1223     }
1224 }
1225 
1226 /**
1227  * Write data cluster in place
1228  *
1229  * @acb:        Write request
1230  * @offset:     Cluster offset in bytes
1231  * @len:        Length in bytes
1232  *
1233  * This path is taken when writing to already allocated clusters.
1234  */
1235 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1236 {
1237     /* Allocate buffer for zero writes */
1238     if (acb->flags & QED_AIOCB_ZERO) {
1239         struct iovec *iov = acb->qiov->iov;
1240 
1241         if (!iov->iov_base) {
1242             iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len);
1243             if (iov->iov_base == NULL) {
1244                 qed_aio_complete(acb, -ENOMEM);
1245                 return;
1246             }
1247             memset(iov->iov_base, 0, iov->iov_len);
1248         }
1249     }
1250 
1251     /* Calculate the I/O vector */
1252     acb->cur_cluster = offset;
1253     qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1254 
1255     /* Do the actual write */
1256     qed_aio_write_main(acb, 0);
1257 }
1258 
1259 /**
1260  * Write data cluster
1261  *
1262  * @opaque:     Write request
1263  * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1264  *              or -errno
1265  * @offset:     Cluster offset in bytes
1266  * @len:        Length in bytes
1267  *
1268  * Callback from qed_find_cluster().
1269  */
1270 static void qed_aio_write_data(void *opaque, int ret,
1271                                uint64_t offset, size_t len)
1272 {
1273     QEDAIOCB *acb = opaque;
1274 
1275     trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1276 
1277     acb->find_cluster_ret = ret;
1278 
1279     switch (ret) {
1280     case QED_CLUSTER_FOUND:
1281         qed_aio_write_inplace(acb, offset, len);
1282         break;
1283 
1284     case QED_CLUSTER_L2:
1285     case QED_CLUSTER_L1:
1286     case QED_CLUSTER_ZERO:
1287         qed_aio_write_alloc(acb, len);
1288         break;
1289 
1290     default:
1291         qed_aio_complete(acb, ret);
1292         break;
1293     }
1294 }
1295 
1296 /**
1297  * Read data cluster
1298  *
1299  * @opaque:     Read request
1300  * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1301  *              or -errno
1302  * @offset:     Cluster offset in bytes
1303  * @len:        Length in bytes
1304  *
1305  * Callback from qed_find_cluster().
1306  */
1307 static void qed_aio_read_data(void *opaque, int ret,
1308                               uint64_t offset, size_t len)
1309 {
1310     QEDAIOCB *acb = opaque;
1311     BDRVQEDState *s = acb_to_s(acb);
1312     BlockDriverState *bs = acb->common.bs;
1313 
1314     /* Adjust offset into cluster */
1315     offset += qed_offset_into_cluster(s, acb->cur_pos);
1316 
1317     trace_qed_aio_read_data(s, acb, ret, offset, len);
1318 
1319     if (ret < 0) {
1320         goto err;
1321     }
1322 
1323     qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1324 
1325     /* Handle zero cluster and backing file reads */
1326     if (ret == QED_CLUSTER_ZERO) {
1327         qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1328         qed_aio_next_io(acb, 0);
1329         return;
1330     } else if (ret != QED_CLUSTER_FOUND) {
1331         qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1332                               &acb->backing_qiov, qed_aio_next_io, acb);
1333         return;
1334     }
1335 
1336     BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1337     bdrv_aio_readv(bs->file->bs, offset / BDRV_SECTOR_SIZE,
1338                    &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1339                    qed_aio_next_io, acb);
1340     return;
1341 
1342 err:
1343     qed_aio_complete(acb, ret);
1344 }
1345 
1346 /**
1347  * Begin next I/O or complete the request
1348  */
1349 static void qed_aio_next_io(void *opaque, int ret)
1350 {
1351     QEDAIOCB *acb = opaque;
1352     BDRVQEDState *s = acb_to_s(acb);
1353     QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
1354                                 qed_aio_write_data : qed_aio_read_data;
1355 
1356     trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1357 
1358     if (acb->backing_qiov) {
1359         qemu_iovec_destroy(acb->backing_qiov);
1360         g_free(acb->backing_qiov);
1361         acb->backing_qiov = NULL;
1362     }
1363 
1364     /* Handle I/O error */
1365     if (ret) {
1366         qed_aio_complete(acb, ret);
1367         return;
1368     }
1369 
1370     acb->qiov_offset += acb->cur_qiov.size;
1371     acb->cur_pos += acb->cur_qiov.size;
1372     qemu_iovec_reset(&acb->cur_qiov);
1373 
1374     /* Complete request */
1375     if (acb->cur_pos >= acb->end_pos) {
1376         qed_aio_complete(acb, 0);
1377         return;
1378     }
1379 
1380     /* Find next cluster and start I/O */
1381     qed_find_cluster(s, &acb->request,
1382                       acb->cur_pos, acb->end_pos - acb->cur_pos,
1383                       io_fn, acb);
1384 }
1385 
1386 static BlockAIOCB *qed_aio_setup(BlockDriverState *bs,
1387                                  int64_t sector_num,
1388                                  QEMUIOVector *qiov, int nb_sectors,
1389                                  BlockCompletionFunc *cb,
1390                                  void *opaque, int flags)
1391 {
1392     QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
1393 
1394     trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1395                         opaque, flags);
1396 
1397     acb->flags = flags;
1398     acb->qiov = qiov;
1399     acb->qiov_offset = 0;
1400     acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1401     acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1402     acb->backing_qiov = NULL;
1403     acb->request.l2_table = NULL;
1404     qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1405 
1406     /* Start request */
1407     qed_aio_next_io(acb, 0);
1408     return &acb->common;
1409 }
1410 
1411 static BlockAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1412                                       int64_t sector_num,
1413                                       QEMUIOVector *qiov, int nb_sectors,
1414                                       BlockCompletionFunc *cb,
1415                                       void *opaque)
1416 {
1417     return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
1418 }
1419 
1420 static BlockAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1421                                        int64_t sector_num,
1422                                        QEMUIOVector *qiov, int nb_sectors,
1423                                        BlockCompletionFunc *cb,
1424                                        void *opaque)
1425 {
1426     return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
1427                          opaque, QED_AIOCB_WRITE);
1428 }
1429 
1430 typedef struct {
1431     Coroutine *co;
1432     int ret;
1433     bool done;
1434 } QEDWriteZeroesCB;
1435 
1436 static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret)
1437 {
1438     QEDWriteZeroesCB *cb = opaque;
1439 
1440     cb->done = true;
1441     cb->ret = ret;
1442     if (cb->co) {
1443         qemu_coroutine_enter(cb->co, NULL);
1444     }
1445 }
1446 
1447 static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs,
1448                                                  int64_t sector_num,
1449                                                  int nb_sectors,
1450                                                  BdrvRequestFlags flags)
1451 {
1452     BlockAIOCB *blockacb;
1453     BDRVQEDState *s = bs->opaque;
1454     QEDWriteZeroesCB cb = { .done = false };
1455     QEMUIOVector qiov;
1456     struct iovec iov;
1457 
1458     /* Refuse if there are untouched backing file sectors */
1459     if (bs->backing) {
1460         if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) {
1461             return -ENOTSUP;
1462         }
1463         if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) {
1464             return -ENOTSUP;
1465         }
1466     }
1467 
1468     /* Zero writes start without an I/O buffer.  If a buffer becomes necessary
1469      * then it will be allocated during request processing.
1470      */
1471     iov.iov_base = NULL,
1472     iov.iov_len  = nb_sectors * BDRV_SECTOR_SIZE,
1473 
1474     qemu_iovec_init_external(&qiov, &iov, 1);
1475     blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors,
1476                              qed_co_write_zeroes_cb, &cb,
1477                              QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1478     if (!blockacb) {
1479         return -EIO;
1480     }
1481     if (!cb.done) {
1482         cb.co = qemu_coroutine_self();
1483         qemu_coroutine_yield();
1484     }
1485     assert(cb.done);
1486     return cb.ret;
1487 }
1488 
1489 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1490 {
1491     BDRVQEDState *s = bs->opaque;
1492     uint64_t old_image_size;
1493     int ret;
1494 
1495     if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1496                                  s->header.table_size)) {
1497         return -EINVAL;
1498     }
1499 
1500     /* Shrinking is currently not supported */
1501     if ((uint64_t)offset < s->header.image_size) {
1502         return -ENOTSUP;
1503     }
1504 
1505     old_image_size = s->header.image_size;
1506     s->header.image_size = offset;
1507     ret = qed_write_header_sync(s);
1508     if (ret < 0) {
1509         s->header.image_size = old_image_size;
1510     }
1511     return ret;
1512 }
1513 
1514 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1515 {
1516     BDRVQEDState *s = bs->opaque;
1517     return s->header.image_size;
1518 }
1519 
1520 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1521 {
1522     BDRVQEDState *s = bs->opaque;
1523 
1524     memset(bdi, 0, sizeof(*bdi));
1525     bdi->cluster_size = s->header.cluster_size;
1526     bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1527     bdi->unallocated_blocks_are_zero = true;
1528     bdi->can_write_zeroes_with_unmap = true;
1529     return 0;
1530 }
1531 
1532 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1533                                         const char *backing_file,
1534                                         const char *backing_fmt)
1535 {
1536     BDRVQEDState *s = bs->opaque;
1537     QEDHeader new_header, le_header;
1538     void *buffer;
1539     size_t buffer_len, backing_file_len;
1540     int ret;
1541 
1542     /* Refuse to set backing filename if unknown compat feature bits are
1543      * active.  If the image uses an unknown compat feature then we may not
1544      * know the layout of data following the header structure and cannot safely
1545      * add a new string.
1546      */
1547     if (backing_file && (s->header.compat_features &
1548                          ~QED_COMPAT_FEATURE_MASK)) {
1549         return -ENOTSUP;
1550     }
1551 
1552     memcpy(&new_header, &s->header, sizeof(new_header));
1553 
1554     new_header.features &= ~(QED_F_BACKING_FILE |
1555                              QED_F_BACKING_FORMAT_NO_PROBE);
1556 
1557     /* Adjust feature flags */
1558     if (backing_file) {
1559         new_header.features |= QED_F_BACKING_FILE;
1560 
1561         if (qed_fmt_is_raw(backing_fmt)) {
1562             new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1563         }
1564     }
1565 
1566     /* Calculate new header size */
1567     backing_file_len = 0;
1568 
1569     if (backing_file) {
1570         backing_file_len = strlen(backing_file);
1571     }
1572 
1573     buffer_len = sizeof(new_header);
1574     new_header.backing_filename_offset = buffer_len;
1575     new_header.backing_filename_size = backing_file_len;
1576     buffer_len += backing_file_len;
1577 
1578     /* Make sure we can rewrite header without failing */
1579     if (buffer_len > new_header.header_size * new_header.cluster_size) {
1580         return -ENOSPC;
1581     }
1582 
1583     /* Prepare new header */
1584     buffer = g_malloc(buffer_len);
1585 
1586     qed_header_cpu_to_le(&new_header, &le_header);
1587     memcpy(buffer, &le_header, sizeof(le_header));
1588     buffer_len = sizeof(le_header);
1589 
1590     if (backing_file) {
1591         memcpy(buffer + buffer_len, backing_file, backing_file_len);
1592         buffer_len += backing_file_len;
1593     }
1594 
1595     /* Write new header */
1596     ret = bdrv_pwrite_sync(bs->file->bs, 0, buffer, buffer_len);
1597     g_free(buffer);
1598     if (ret == 0) {
1599         memcpy(&s->header, &new_header, sizeof(new_header));
1600     }
1601     return ret;
1602 }
1603 
1604 static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
1605 {
1606     BDRVQEDState *s = bs->opaque;
1607     Error *local_err = NULL;
1608     int ret;
1609 
1610     bdrv_qed_close(bs);
1611 
1612     bdrv_invalidate_cache(bs->file->bs, &local_err);
1613     if (local_err) {
1614         error_propagate(errp, local_err);
1615         return;
1616     }
1617 
1618     memset(s, 0, sizeof(BDRVQEDState));
1619     ret = bdrv_qed_open(bs, NULL, bs->open_flags, &local_err);
1620     if (local_err) {
1621         error_propagate(errp, local_err);
1622         error_prepend(errp, "Could not reopen qed layer: ");
1623         return;
1624     } else if (ret < 0) {
1625         error_setg_errno(errp, -ret, "Could not reopen qed layer");
1626         return;
1627     }
1628 }
1629 
1630 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1631                           BdrvCheckMode fix)
1632 {
1633     BDRVQEDState *s = bs->opaque;
1634 
1635     return qed_check(s, result, !!fix);
1636 }
1637 
1638 static QemuOptsList qed_create_opts = {
1639     .name = "qed-create-opts",
1640     .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1641     .desc = {
1642         {
1643             .name = BLOCK_OPT_SIZE,
1644             .type = QEMU_OPT_SIZE,
1645             .help = "Virtual disk size"
1646         },
1647         {
1648             .name = BLOCK_OPT_BACKING_FILE,
1649             .type = QEMU_OPT_STRING,
1650             .help = "File name of a base image"
1651         },
1652         {
1653             .name = BLOCK_OPT_BACKING_FMT,
1654             .type = QEMU_OPT_STRING,
1655             .help = "Image format of the base image"
1656         },
1657         {
1658             .name = BLOCK_OPT_CLUSTER_SIZE,
1659             .type = QEMU_OPT_SIZE,
1660             .help = "Cluster size (in bytes)",
1661             .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1662         },
1663         {
1664             .name = BLOCK_OPT_TABLE_SIZE,
1665             .type = QEMU_OPT_SIZE,
1666             .help = "L1/L2 table size (in clusters)"
1667         },
1668         { /* end of list */ }
1669     }
1670 };
1671 
1672 static BlockDriver bdrv_qed = {
1673     .format_name              = "qed",
1674     .instance_size            = sizeof(BDRVQEDState),
1675     .create_opts              = &qed_create_opts,
1676     .supports_backing         = true,
1677 
1678     .bdrv_probe               = bdrv_qed_probe,
1679     .bdrv_open                = bdrv_qed_open,
1680     .bdrv_close               = bdrv_qed_close,
1681     .bdrv_reopen_prepare      = bdrv_qed_reopen_prepare,
1682     .bdrv_create              = bdrv_qed_create,
1683     .bdrv_has_zero_init       = bdrv_has_zero_init_1,
1684     .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
1685     .bdrv_aio_readv           = bdrv_qed_aio_readv,
1686     .bdrv_aio_writev          = bdrv_qed_aio_writev,
1687     .bdrv_co_write_zeroes     = bdrv_qed_co_write_zeroes,
1688     .bdrv_truncate            = bdrv_qed_truncate,
1689     .bdrv_getlength           = bdrv_qed_getlength,
1690     .bdrv_get_info            = bdrv_qed_get_info,
1691     .bdrv_refresh_limits      = bdrv_qed_refresh_limits,
1692     .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1693     .bdrv_invalidate_cache    = bdrv_qed_invalidate_cache,
1694     .bdrv_check               = bdrv_qed_check,
1695     .bdrv_detach_aio_context  = bdrv_qed_detach_aio_context,
1696     .bdrv_attach_aio_context  = bdrv_qed_attach_aio_context,
1697     .bdrv_drain               = bdrv_qed_drain,
1698 };
1699 
1700 static void bdrv_qed_init(void)
1701 {
1702     bdrv_register(&bdrv_qed);
1703 }
1704 
1705 block_init(bdrv_qed_init);
1706