xref: /openbmc/qemu/block/qed.c (revision 56411125)
1 /*
2  * QEMU Enhanced Disk Format
3  *
4  * Copyright IBM, Corp. 2010
5  *
6  * Authors:
7  *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
8  *  Anthony Liguori   <aliguori@us.ibm.com>
9  *
10  * This work is licensed under the terms of the GNU LGPL, version 2 or later.
11  * See the COPYING.LIB file in the top-level directory.
12  *
13  */
14 
15 #include "qemu/timer.h"
16 #include "trace.h"
17 #include "qed.h"
18 #include "qapi/qmp/qerror.h"
19 #include "migration/migration.h"
20 
21 static const AIOCBInfo qed_aiocb_info = {
22     .aiocb_size         = sizeof(QEDAIOCB),
23 };
24 
25 static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
26                           const char *filename)
27 {
28     const QEDHeader *header = (const QEDHeader *)buf;
29 
30     if (buf_size < sizeof(*header)) {
31         return 0;
32     }
33     if (le32_to_cpu(header->magic) != QED_MAGIC) {
34         return 0;
35     }
36     return 100;
37 }
38 
39 /**
40  * Check whether an image format is raw
41  *
42  * @fmt:    Backing file format, may be NULL
43  */
44 static bool qed_fmt_is_raw(const char *fmt)
45 {
46     return fmt && strcmp(fmt, "raw") == 0;
47 }
48 
49 static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
50 {
51     cpu->magic = le32_to_cpu(le->magic);
52     cpu->cluster_size = le32_to_cpu(le->cluster_size);
53     cpu->table_size = le32_to_cpu(le->table_size);
54     cpu->header_size = le32_to_cpu(le->header_size);
55     cpu->features = le64_to_cpu(le->features);
56     cpu->compat_features = le64_to_cpu(le->compat_features);
57     cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
58     cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
59     cpu->image_size = le64_to_cpu(le->image_size);
60     cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
61     cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
62 }
63 
64 static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
65 {
66     le->magic = cpu_to_le32(cpu->magic);
67     le->cluster_size = cpu_to_le32(cpu->cluster_size);
68     le->table_size = cpu_to_le32(cpu->table_size);
69     le->header_size = cpu_to_le32(cpu->header_size);
70     le->features = cpu_to_le64(cpu->features);
71     le->compat_features = cpu_to_le64(cpu->compat_features);
72     le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
73     le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
74     le->image_size = cpu_to_le64(cpu->image_size);
75     le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
76     le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
77 }
78 
79 int qed_write_header_sync(BDRVQEDState *s)
80 {
81     QEDHeader le;
82     int ret;
83 
84     qed_header_cpu_to_le(&s->header, &le);
85     ret = bdrv_pwrite(s->bs->file->bs, 0, &le, sizeof(le));
86     if (ret != sizeof(le)) {
87         return ret;
88     }
89     return 0;
90 }
91 
92 typedef struct {
93     GenericCB gencb;
94     BDRVQEDState *s;
95     struct iovec iov;
96     QEMUIOVector qiov;
97     int nsectors;
98     uint8_t *buf;
99 } QEDWriteHeaderCB;
100 
101 static void qed_write_header_cb(void *opaque, int ret)
102 {
103     QEDWriteHeaderCB *write_header_cb = opaque;
104 
105     qemu_vfree(write_header_cb->buf);
106     gencb_complete(write_header_cb, ret);
107 }
108 
109 static void qed_write_header_read_cb(void *opaque, int ret)
110 {
111     QEDWriteHeaderCB *write_header_cb = opaque;
112     BDRVQEDState *s = write_header_cb->s;
113 
114     if (ret) {
115         qed_write_header_cb(write_header_cb, ret);
116         return;
117     }
118 
119     /* Update header */
120     qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
121 
122     bdrv_aio_writev(s->bs->file->bs, 0, &write_header_cb->qiov,
123                     write_header_cb->nsectors, qed_write_header_cb,
124                     write_header_cb);
125 }
126 
127 /**
128  * Update header in-place (does not rewrite backing filename or other strings)
129  *
130  * This function only updates known header fields in-place and does not affect
131  * extra data after the QED header.
132  */
133 static void qed_write_header(BDRVQEDState *s, BlockCompletionFunc cb,
134                              void *opaque)
135 {
136     /* We must write full sectors for O_DIRECT but cannot necessarily generate
137      * the data following the header if an unrecognized compat feature is
138      * active.  Therefore, first read the sectors containing the header, update
139      * them, and write back.
140      */
141 
142     int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
143                    BDRV_SECTOR_SIZE;
144     size_t len = nsectors * BDRV_SECTOR_SIZE;
145     QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
146                                                     cb, opaque);
147 
148     write_header_cb->s = s;
149     write_header_cb->nsectors = nsectors;
150     write_header_cb->buf = qemu_blockalign(s->bs, len);
151     write_header_cb->iov.iov_base = write_header_cb->buf;
152     write_header_cb->iov.iov_len = len;
153     qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
154 
155     bdrv_aio_readv(s->bs->file->bs, 0, &write_header_cb->qiov, nsectors,
156                    qed_write_header_read_cb, write_header_cb);
157 }
158 
159 static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
160 {
161     uint64_t table_entries;
162     uint64_t l2_size;
163 
164     table_entries = (table_size * cluster_size) / sizeof(uint64_t);
165     l2_size = table_entries * cluster_size;
166 
167     return l2_size * table_entries;
168 }
169 
170 static bool qed_is_cluster_size_valid(uint32_t cluster_size)
171 {
172     if (cluster_size < QED_MIN_CLUSTER_SIZE ||
173         cluster_size > QED_MAX_CLUSTER_SIZE) {
174         return false;
175     }
176     if (cluster_size & (cluster_size - 1)) {
177         return false; /* not power of 2 */
178     }
179     return true;
180 }
181 
182 static bool qed_is_table_size_valid(uint32_t table_size)
183 {
184     if (table_size < QED_MIN_TABLE_SIZE ||
185         table_size > QED_MAX_TABLE_SIZE) {
186         return false;
187     }
188     if (table_size & (table_size - 1)) {
189         return false; /* not power of 2 */
190     }
191     return true;
192 }
193 
194 static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
195                                     uint32_t table_size)
196 {
197     if (image_size % BDRV_SECTOR_SIZE != 0) {
198         return false; /* not multiple of sector size */
199     }
200     if (image_size > qed_max_image_size(cluster_size, table_size)) {
201         return false; /* image is too large */
202     }
203     return true;
204 }
205 
206 /**
207  * Read a string of known length from the image file
208  *
209  * @file:       Image file
210  * @offset:     File offset to start of string, in bytes
211  * @n:          String length in bytes
212  * @buf:        Destination buffer
213  * @buflen:     Destination buffer length in bytes
214  * @ret:        0 on success, -errno on failure
215  *
216  * The string is NUL-terminated.
217  */
218 static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
219                            char *buf, size_t buflen)
220 {
221     int ret;
222     if (n >= buflen) {
223         return -EINVAL;
224     }
225     ret = bdrv_pread(file, offset, buf, n);
226     if (ret < 0) {
227         return ret;
228     }
229     buf[n] = '\0';
230     return 0;
231 }
232 
233 /**
234  * Allocate new clusters
235  *
236  * @s:          QED state
237  * @n:          Number of contiguous clusters to allocate
238  * @ret:        Offset of first allocated cluster
239  *
240  * This function only produces the offset where the new clusters should be
241  * written.  It updates BDRVQEDState but does not make any changes to the image
242  * file.
243  */
244 static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
245 {
246     uint64_t offset = s->file_size;
247     s->file_size += n * s->header.cluster_size;
248     return offset;
249 }
250 
251 QEDTable *qed_alloc_table(BDRVQEDState *s)
252 {
253     /* Honor O_DIRECT memory alignment requirements */
254     return qemu_blockalign(s->bs,
255                            s->header.cluster_size * s->header.table_size);
256 }
257 
258 /**
259  * Allocate a new zeroed L2 table
260  */
261 static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
262 {
263     CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
264 
265     l2_table->table = qed_alloc_table(s);
266     l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
267 
268     memset(l2_table->table->offsets, 0,
269            s->header.cluster_size * s->header.table_size);
270     return l2_table;
271 }
272 
273 static void qed_aio_next_io(void *opaque, int ret);
274 
275 static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
276 {
277     assert(!s->allocating_write_reqs_plugged);
278 
279     s->allocating_write_reqs_plugged = true;
280 }
281 
282 static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
283 {
284     QEDAIOCB *acb;
285 
286     assert(s->allocating_write_reqs_plugged);
287 
288     s->allocating_write_reqs_plugged = false;
289 
290     acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
291     if (acb) {
292         qed_aio_next_io(acb, 0);
293     }
294 }
295 
296 static void qed_finish_clear_need_check(void *opaque, int ret)
297 {
298     /* Do nothing */
299 }
300 
301 static void qed_flush_after_clear_need_check(void *opaque, int ret)
302 {
303     BDRVQEDState *s = opaque;
304 
305     bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
306 
307     /* No need to wait until flush completes */
308     qed_unplug_allocating_write_reqs(s);
309 }
310 
311 static void qed_clear_need_check(void *opaque, int ret)
312 {
313     BDRVQEDState *s = opaque;
314 
315     if (ret) {
316         qed_unplug_allocating_write_reqs(s);
317         return;
318     }
319 
320     s->header.features &= ~QED_F_NEED_CHECK;
321     qed_write_header(s, qed_flush_after_clear_need_check, s);
322 }
323 
324 static void qed_need_check_timer_cb(void *opaque)
325 {
326     BDRVQEDState *s = opaque;
327 
328     /* The timer should only fire when allocating writes have drained */
329     assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
330 
331     trace_qed_need_check_timer_cb(s);
332 
333     qed_plug_allocating_write_reqs(s);
334 
335     /* Ensure writes are on disk before clearing flag */
336     bdrv_aio_flush(s->bs, qed_clear_need_check, s);
337 }
338 
339 static void qed_start_need_check_timer(BDRVQEDState *s)
340 {
341     trace_qed_start_need_check_timer(s);
342 
343     /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
344      * migration.
345      */
346     timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
347                    get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT);
348 }
349 
350 /* It's okay to call this multiple times or when no timer is started */
351 static void qed_cancel_need_check_timer(BDRVQEDState *s)
352 {
353     trace_qed_cancel_need_check_timer(s);
354     timer_del(s->need_check_timer);
355 }
356 
357 static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
358 {
359     BDRVQEDState *s = bs->opaque;
360 
361     qed_cancel_need_check_timer(s);
362     timer_free(s->need_check_timer);
363 }
364 
365 static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
366                                         AioContext *new_context)
367 {
368     BDRVQEDState *s = bs->opaque;
369 
370     s->need_check_timer = aio_timer_new(new_context,
371                                         QEMU_CLOCK_VIRTUAL, SCALE_NS,
372                                         qed_need_check_timer_cb, s);
373     if (s->header.features & QED_F_NEED_CHECK) {
374         qed_start_need_check_timer(s);
375     }
376 }
377 
378 static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
379                          Error **errp)
380 {
381     BDRVQEDState *s = bs->opaque;
382     QEDHeader le_header;
383     int64_t file_size;
384     int ret;
385 
386     s->bs = bs;
387     QSIMPLEQ_INIT(&s->allocating_write_reqs);
388 
389     ret = bdrv_pread(bs->file->bs, 0, &le_header, sizeof(le_header));
390     if (ret < 0) {
391         return ret;
392     }
393     qed_header_le_to_cpu(&le_header, &s->header);
394 
395     if (s->header.magic != QED_MAGIC) {
396         error_setg(errp, "Image not in QED format");
397         return -EINVAL;
398     }
399     if (s->header.features & ~QED_FEATURE_MASK) {
400         /* image uses unsupported feature bits */
401         char buf[64];
402         snprintf(buf, sizeof(buf), "%" PRIx64,
403             s->header.features & ~QED_FEATURE_MASK);
404         error_setg(errp, QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
405                    bdrv_get_device_or_node_name(bs), "QED", buf);
406         return -ENOTSUP;
407     }
408     if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
409         return -EINVAL;
410     }
411 
412     /* Round down file size to the last cluster */
413     file_size = bdrv_getlength(bs->file->bs);
414     if (file_size < 0) {
415         return file_size;
416     }
417     s->file_size = qed_start_of_cluster(s, file_size);
418 
419     if (!qed_is_table_size_valid(s->header.table_size)) {
420         return -EINVAL;
421     }
422     if (!qed_is_image_size_valid(s->header.image_size,
423                                  s->header.cluster_size,
424                                  s->header.table_size)) {
425         return -EINVAL;
426     }
427     if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
428         return -EINVAL;
429     }
430 
431     s->table_nelems = (s->header.cluster_size * s->header.table_size) /
432                       sizeof(uint64_t);
433     s->l2_shift = ctz32(s->header.cluster_size);
434     s->l2_mask = s->table_nelems - 1;
435     s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
436 
437     /* Header size calculation must not overflow uint32_t */
438     if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
439         return -EINVAL;
440     }
441 
442     if ((s->header.features & QED_F_BACKING_FILE)) {
443         if ((uint64_t)s->header.backing_filename_offset +
444             s->header.backing_filename_size >
445             s->header.cluster_size * s->header.header_size) {
446             return -EINVAL;
447         }
448 
449         ret = qed_read_string(bs->file->bs, s->header.backing_filename_offset,
450                               s->header.backing_filename_size, bs->backing_file,
451                               sizeof(bs->backing_file));
452         if (ret < 0) {
453             return ret;
454         }
455 
456         if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
457             pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
458         }
459     }
460 
461     /* Reset unknown autoclear feature bits.  This is a backwards
462      * compatibility mechanism that allows images to be opened by older
463      * programs, which "knock out" unknown feature bits.  When an image is
464      * opened by a newer program again it can detect that the autoclear
465      * feature is no longer valid.
466      */
467     if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
468         !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INCOMING)) {
469         s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
470 
471         ret = qed_write_header_sync(s);
472         if (ret) {
473             return ret;
474         }
475 
476         /* From here on only known autoclear feature bits are valid */
477         bdrv_flush(bs->file->bs);
478     }
479 
480     s->l1_table = qed_alloc_table(s);
481     qed_init_l2_cache(&s->l2_cache);
482 
483     ret = qed_read_l1_table_sync(s);
484     if (ret) {
485         goto out;
486     }
487 
488     /* If image was not closed cleanly, check consistency */
489     if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
490         /* Read-only images cannot be fixed.  There is no risk of corruption
491          * since write operations are not possible.  Therefore, allow
492          * potentially inconsistent images to be opened read-only.  This can
493          * aid data recovery from an otherwise inconsistent image.
494          */
495         if (!bdrv_is_read_only(bs->file->bs) &&
496             !(flags & BDRV_O_INCOMING)) {
497             BdrvCheckResult result = {0};
498 
499             ret = qed_check(s, &result, true);
500             if (ret) {
501                 goto out;
502             }
503         }
504     }
505 
506     bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
507 
508 out:
509     if (ret) {
510         qed_free_l2_cache(&s->l2_cache);
511         qemu_vfree(s->l1_table);
512     }
513     return ret;
514 }
515 
516 static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
517 {
518     BDRVQEDState *s = bs->opaque;
519 
520     bs->bl.write_zeroes_alignment = s->header.cluster_size >> BDRV_SECTOR_BITS;
521 }
522 
523 /* We have nothing to do for QED reopen, stubs just return
524  * success */
525 static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
526                                    BlockReopenQueue *queue, Error **errp)
527 {
528     return 0;
529 }
530 
531 static void bdrv_qed_close(BlockDriverState *bs)
532 {
533     BDRVQEDState *s = bs->opaque;
534 
535     bdrv_qed_detach_aio_context(bs);
536 
537     /* Ensure writes reach stable storage */
538     bdrv_flush(bs->file->bs);
539 
540     /* Clean shutdown, no check required on next open */
541     if (s->header.features & QED_F_NEED_CHECK) {
542         s->header.features &= ~QED_F_NEED_CHECK;
543         qed_write_header_sync(s);
544     }
545 
546     qed_free_l2_cache(&s->l2_cache);
547     qemu_vfree(s->l1_table);
548 }
549 
550 static int qed_create(const char *filename, uint32_t cluster_size,
551                       uint64_t image_size, uint32_t table_size,
552                       const char *backing_file, const char *backing_fmt,
553                       QemuOpts *opts, Error **errp)
554 {
555     QEDHeader header = {
556         .magic = QED_MAGIC,
557         .cluster_size = cluster_size,
558         .table_size = table_size,
559         .header_size = 1,
560         .features = 0,
561         .compat_features = 0,
562         .l1_table_offset = cluster_size,
563         .image_size = image_size,
564     };
565     QEDHeader le_header;
566     uint8_t *l1_table = NULL;
567     size_t l1_size = header.cluster_size * header.table_size;
568     Error *local_err = NULL;
569     int ret = 0;
570     BlockDriverState *bs;
571 
572     ret = bdrv_create_file(filename, opts, &local_err);
573     if (ret < 0) {
574         error_propagate(errp, local_err);
575         return ret;
576     }
577 
578     bs = NULL;
579     ret = bdrv_open(&bs, filename, NULL, NULL,
580                     BDRV_O_RDWR | BDRV_O_CACHE_WB | BDRV_O_PROTOCOL,
581                     &local_err);
582     if (ret < 0) {
583         error_propagate(errp, local_err);
584         return ret;
585     }
586 
587     /* File must start empty and grow, check truncate is supported */
588     ret = bdrv_truncate(bs, 0);
589     if (ret < 0) {
590         goto out;
591     }
592 
593     if (backing_file) {
594         header.features |= QED_F_BACKING_FILE;
595         header.backing_filename_offset = sizeof(le_header);
596         header.backing_filename_size = strlen(backing_file);
597 
598         if (qed_fmt_is_raw(backing_fmt)) {
599             header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
600         }
601     }
602 
603     qed_header_cpu_to_le(&header, &le_header);
604     ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header));
605     if (ret < 0) {
606         goto out;
607     }
608     ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
609                       header.backing_filename_size);
610     if (ret < 0) {
611         goto out;
612     }
613 
614     l1_table = g_malloc0(l1_size);
615     ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size);
616     if (ret < 0) {
617         goto out;
618     }
619 
620     ret = 0; /* success */
621 out:
622     g_free(l1_table);
623     bdrv_unref(bs);
624     return ret;
625 }
626 
627 static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp)
628 {
629     uint64_t image_size = 0;
630     uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
631     uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
632     char *backing_file = NULL;
633     char *backing_fmt = NULL;
634     int ret;
635 
636     image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
637                           BDRV_SECTOR_SIZE);
638     backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
639     backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
640     cluster_size = qemu_opt_get_size_del(opts,
641                                          BLOCK_OPT_CLUSTER_SIZE,
642                                          QED_DEFAULT_CLUSTER_SIZE);
643     table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE,
644                                        QED_DEFAULT_TABLE_SIZE);
645 
646     if (!qed_is_cluster_size_valid(cluster_size)) {
647         error_setg(errp, "QED cluster size must be within range [%u, %u] "
648                          "and power of 2",
649                    QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
650         ret = -EINVAL;
651         goto finish;
652     }
653     if (!qed_is_table_size_valid(table_size)) {
654         error_setg(errp, "QED table size must be within range [%u, %u] "
655                          "and power of 2",
656                    QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
657         ret = -EINVAL;
658         goto finish;
659     }
660     if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
661         error_setg(errp, "QED image size must be a non-zero multiple of "
662                          "cluster size and less than %" PRIu64 " bytes",
663                    qed_max_image_size(cluster_size, table_size));
664         ret = -EINVAL;
665         goto finish;
666     }
667 
668     ret = qed_create(filename, cluster_size, image_size, table_size,
669                      backing_file, backing_fmt, opts, errp);
670 
671 finish:
672     g_free(backing_file);
673     g_free(backing_fmt);
674     return ret;
675 }
676 
677 typedef struct {
678     BlockDriverState *bs;
679     Coroutine *co;
680     uint64_t pos;
681     int64_t status;
682     int *pnum;
683 } QEDIsAllocatedCB;
684 
685 static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
686 {
687     QEDIsAllocatedCB *cb = opaque;
688     BDRVQEDState *s = cb->bs->opaque;
689     *cb->pnum = len / BDRV_SECTOR_SIZE;
690     switch (ret) {
691     case QED_CLUSTER_FOUND:
692         offset |= qed_offset_into_cluster(s, cb->pos);
693         cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
694         break;
695     case QED_CLUSTER_ZERO:
696         cb->status = BDRV_BLOCK_ZERO;
697         break;
698     case QED_CLUSTER_L2:
699     case QED_CLUSTER_L1:
700         cb->status = 0;
701         break;
702     default:
703         assert(ret < 0);
704         cb->status = ret;
705         break;
706     }
707 
708     if (cb->co) {
709         qemu_coroutine_enter(cb->co, NULL);
710     }
711 }
712 
713 static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
714                                                  int64_t sector_num,
715                                                  int nb_sectors, int *pnum)
716 {
717     BDRVQEDState *s = bs->opaque;
718     size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
719     QEDIsAllocatedCB cb = {
720         .bs = bs,
721         .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
722         .status = BDRV_BLOCK_OFFSET_MASK,
723         .pnum = pnum,
724     };
725     QEDRequest request = { .l2_table = NULL };
726 
727     qed_find_cluster(s, &request, cb.pos, len, qed_is_allocated_cb, &cb);
728 
729     /* Now sleep if the callback wasn't invoked immediately */
730     while (cb.status == BDRV_BLOCK_OFFSET_MASK) {
731         cb.co = qemu_coroutine_self();
732         qemu_coroutine_yield();
733     }
734 
735     qed_unref_l2_cache_entry(request.l2_table);
736 
737     return cb.status;
738 }
739 
740 static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
741 {
742     return acb->common.bs->opaque;
743 }
744 
745 /**
746  * Read from the backing file or zero-fill if no backing file
747  *
748  * @s:              QED state
749  * @pos:            Byte position in device
750  * @qiov:           Destination I/O vector
751  * @backing_qiov:   Possibly shortened copy of qiov, to be allocated here
752  * @cb:             Completion function
753  * @opaque:         User data for completion function
754  *
755  * This function reads qiov->size bytes starting at pos from the backing file.
756  * If there is no backing file then zeroes are read.
757  */
758 static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
759                                   QEMUIOVector *qiov,
760                                   QEMUIOVector **backing_qiov,
761                                   BlockCompletionFunc *cb, void *opaque)
762 {
763     uint64_t backing_length = 0;
764     size_t size;
765 
766     /* If there is a backing file, get its length.  Treat the absence of a
767      * backing file like a zero length backing file.
768      */
769     if (s->bs->backing) {
770         int64_t l = bdrv_getlength(s->bs->backing->bs);
771         if (l < 0) {
772             cb(opaque, l);
773             return;
774         }
775         backing_length = l;
776     }
777 
778     /* Zero all sectors if reading beyond the end of the backing file */
779     if (pos >= backing_length ||
780         pos + qiov->size > backing_length) {
781         qemu_iovec_memset(qiov, 0, 0, qiov->size);
782     }
783 
784     /* Complete now if there are no backing file sectors to read */
785     if (pos >= backing_length) {
786         cb(opaque, 0);
787         return;
788     }
789 
790     /* If the read straddles the end of the backing file, shorten it */
791     size = MIN((uint64_t)backing_length - pos, qiov->size);
792 
793     assert(*backing_qiov == NULL);
794     *backing_qiov = g_new(QEMUIOVector, 1);
795     qemu_iovec_init(*backing_qiov, qiov->niov);
796     qemu_iovec_concat(*backing_qiov, qiov, 0, size);
797 
798     BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
799     bdrv_aio_readv(s->bs->backing->bs, pos / BDRV_SECTOR_SIZE,
800                    *backing_qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
801 }
802 
803 typedef struct {
804     GenericCB gencb;
805     BDRVQEDState *s;
806     QEMUIOVector qiov;
807     QEMUIOVector *backing_qiov;
808     struct iovec iov;
809     uint64_t offset;
810 } CopyFromBackingFileCB;
811 
812 static void qed_copy_from_backing_file_cb(void *opaque, int ret)
813 {
814     CopyFromBackingFileCB *copy_cb = opaque;
815     qemu_vfree(copy_cb->iov.iov_base);
816     gencb_complete(&copy_cb->gencb, ret);
817 }
818 
819 static void qed_copy_from_backing_file_write(void *opaque, int ret)
820 {
821     CopyFromBackingFileCB *copy_cb = opaque;
822     BDRVQEDState *s = copy_cb->s;
823 
824     if (copy_cb->backing_qiov) {
825         qemu_iovec_destroy(copy_cb->backing_qiov);
826         g_free(copy_cb->backing_qiov);
827         copy_cb->backing_qiov = NULL;
828     }
829 
830     if (ret) {
831         qed_copy_from_backing_file_cb(copy_cb, ret);
832         return;
833     }
834 
835     BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
836     bdrv_aio_writev(s->bs->file->bs, copy_cb->offset / BDRV_SECTOR_SIZE,
837                     &copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
838                     qed_copy_from_backing_file_cb, copy_cb);
839 }
840 
841 /**
842  * Copy data from backing file into the image
843  *
844  * @s:          QED state
845  * @pos:        Byte position in device
846  * @len:        Number of bytes
847  * @offset:     Byte offset in image file
848  * @cb:         Completion function
849  * @opaque:     User data for completion function
850  */
851 static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
852                                        uint64_t len, uint64_t offset,
853                                        BlockCompletionFunc *cb,
854                                        void *opaque)
855 {
856     CopyFromBackingFileCB *copy_cb;
857 
858     /* Skip copy entirely if there is no work to do */
859     if (len == 0) {
860         cb(opaque, 0);
861         return;
862     }
863 
864     copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
865     copy_cb->s = s;
866     copy_cb->offset = offset;
867     copy_cb->backing_qiov = NULL;
868     copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
869     copy_cb->iov.iov_len = len;
870     qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);
871 
872     qed_read_backing_file(s, pos, &copy_cb->qiov, &copy_cb->backing_qiov,
873                           qed_copy_from_backing_file_write, copy_cb);
874 }
875 
876 /**
877  * Link one or more contiguous clusters into a table
878  *
879  * @s:              QED state
880  * @table:          L2 table
881  * @index:          First cluster index
882  * @n:              Number of contiguous clusters
883  * @cluster:        First cluster offset
884  *
885  * The cluster offset may be an allocated byte offset in the image file, the
886  * zero cluster marker, or the unallocated cluster marker.
887  */
888 static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
889                                 unsigned int n, uint64_t cluster)
890 {
891     int i;
892     for (i = index; i < index + n; i++) {
893         table->offsets[i] = cluster;
894         if (!qed_offset_is_unalloc_cluster(cluster) &&
895             !qed_offset_is_zero_cluster(cluster)) {
896             cluster += s->header.cluster_size;
897         }
898     }
899 }
900 
901 static void qed_aio_complete_bh(void *opaque)
902 {
903     QEDAIOCB *acb = opaque;
904     BlockCompletionFunc *cb = acb->common.cb;
905     void *user_opaque = acb->common.opaque;
906     int ret = acb->bh_ret;
907 
908     qemu_bh_delete(acb->bh);
909     qemu_aio_unref(acb);
910 
911     /* Invoke callback */
912     cb(user_opaque, ret);
913 }
914 
915 static void qed_aio_complete(QEDAIOCB *acb, int ret)
916 {
917     BDRVQEDState *s = acb_to_s(acb);
918 
919     trace_qed_aio_complete(s, acb, ret);
920 
921     /* Free resources */
922     qemu_iovec_destroy(&acb->cur_qiov);
923     qed_unref_l2_cache_entry(acb->request.l2_table);
924 
925     /* Free the buffer we may have allocated for zero writes */
926     if (acb->flags & QED_AIOCB_ZERO) {
927         qemu_vfree(acb->qiov->iov[0].iov_base);
928         acb->qiov->iov[0].iov_base = NULL;
929     }
930 
931     /* Arrange for a bh to invoke the completion function */
932     acb->bh_ret = ret;
933     acb->bh = aio_bh_new(bdrv_get_aio_context(acb->common.bs),
934                          qed_aio_complete_bh, acb);
935     qemu_bh_schedule(acb->bh);
936 
937     /* Start next allocating write request waiting behind this one.  Note that
938      * requests enqueue themselves when they first hit an unallocated cluster
939      * but they wait until the entire request is finished before waking up the
940      * next request in the queue.  This ensures that we don't cycle through
941      * requests multiple times but rather finish one at a time completely.
942      */
943     if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
944         QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
945         acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
946         if (acb) {
947             qed_aio_next_io(acb, 0);
948         } else if (s->header.features & QED_F_NEED_CHECK) {
949             qed_start_need_check_timer(s);
950         }
951     }
952 }
953 
954 /**
955  * Commit the current L2 table to the cache
956  */
957 static void qed_commit_l2_update(void *opaque, int ret)
958 {
959     QEDAIOCB *acb = opaque;
960     BDRVQEDState *s = acb_to_s(acb);
961     CachedL2Table *l2_table = acb->request.l2_table;
962     uint64_t l2_offset = l2_table->offset;
963 
964     qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
965 
966     /* This is guaranteed to succeed because we just committed the entry to the
967      * cache.
968      */
969     acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
970     assert(acb->request.l2_table != NULL);
971 
972     qed_aio_next_io(opaque, ret);
973 }
974 
975 /**
976  * Update L1 table with new L2 table offset and write it out
977  */
978 static void qed_aio_write_l1_update(void *opaque, int ret)
979 {
980     QEDAIOCB *acb = opaque;
981     BDRVQEDState *s = acb_to_s(acb);
982     int index;
983 
984     if (ret) {
985         qed_aio_complete(acb, ret);
986         return;
987     }
988 
989     index = qed_l1_index(s, acb->cur_pos);
990     s->l1_table->offsets[index] = acb->request.l2_table->offset;
991 
992     qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
993 }
994 
995 /**
996  * Update L2 table with new cluster offsets and write them out
997  */
998 static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
999 {
1000     BDRVQEDState *s = acb_to_s(acb);
1001     bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
1002     int index;
1003 
1004     if (ret) {
1005         goto err;
1006     }
1007 
1008     if (need_alloc) {
1009         qed_unref_l2_cache_entry(acb->request.l2_table);
1010         acb->request.l2_table = qed_new_l2_table(s);
1011     }
1012 
1013     index = qed_l2_index(s, acb->cur_pos);
1014     qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
1015                          offset);
1016 
1017     if (need_alloc) {
1018         /* Write out the whole new L2 table */
1019         qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
1020                             qed_aio_write_l1_update, acb);
1021     } else {
1022         /* Write out only the updated part of the L2 table */
1023         qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
1024                             qed_aio_next_io, acb);
1025     }
1026     return;
1027 
1028 err:
1029     qed_aio_complete(acb, ret);
1030 }
1031 
1032 static void qed_aio_write_l2_update_cb(void *opaque, int ret)
1033 {
1034     QEDAIOCB *acb = opaque;
1035     qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
1036 }
1037 
1038 /**
1039  * Flush new data clusters before updating the L2 table
1040  *
1041  * This flush is necessary when a backing file is in use.  A crash during an
1042  * allocating write could result in empty clusters in the image.  If the write
1043  * only touched a subregion of the cluster, then backing image sectors have
1044  * been lost in the untouched region.  The solution is to flush after writing a
1045  * new data cluster and before updating the L2 table.
1046  */
1047 static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
1048 {
1049     QEDAIOCB *acb = opaque;
1050     BDRVQEDState *s = acb_to_s(acb);
1051 
1052     if (!bdrv_aio_flush(s->bs->file->bs, qed_aio_write_l2_update_cb, opaque)) {
1053         qed_aio_complete(acb, -EIO);
1054     }
1055 }
1056 
1057 /**
1058  * Write data to the image file
1059  */
1060 static void qed_aio_write_main(void *opaque, int ret)
1061 {
1062     QEDAIOCB *acb = opaque;
1063     BDRVQEDState *s = acb_to_s(acb);
1064     uint64_t offset = acb->cur_cluster +
1065                       qed_offset_into_cluster(s, acb->cur_pos);
1066     BlockCompletionFunc *next_fn;
1067 
1068     trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1069 
1070     if (ret) {
1071         qed_aio_complete(acb, ret);
1072         return;
1073     }
1074 
1075     if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1076         next_fn = qed_aio_next_io;
1077     } else {
1078         if (s->bs->backing) {
1079             next_fn = qed_aio_write_flush_before_l2_update;
1080         } else {
1081             next_fn = qed_aio_write_l2_update_cb;
1082         }
1083     }
1084 
1085     BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1086     bdrv_aio_writev(s->bs->file->bs, offset / BDRV_SECTOR_SIZE,
1087                     &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1088                     next_fn, acb);
1089 }
1090 
1091 /**
1092  * Populate back untouched region of new data cluster
1093  */
1094 static void qed_aio_write_postfill(void *opaque, int ret)
1095 {
1096     QEDAIOCB *acb = opaque;
1097     BDRVQEDState *s = acb_to_s(acb);
1098     uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1099     uint64_t len =
1100         qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1101     uint64_t offset = acb->cur_cluster +
1102                       qed_offset_into_cluster(s, acb->cur_pos) +
1103                       acb->cur_qiov.size;
1104 
1105     if (ret) {
1106         qed_aio_complete(acb, ret);
1107         return;
1108     }
1109 
1110     trace_qed_aio_write_postfill(s, acb, start, len, offset);
1111     qed_copy_from_backing_file(s, start, len, offset,
1112                                 qed_aio_write_main, acb);
1113 }
1114 
1115 /**
1116  * Populate front untouched region of new data cluster
1117  */
1118 static void qed_aio_write_prefill(void *opaque, int ret)
1119 {
1120     QEDAIOCB *acb = opaque;
1121     BDRVQEDState *s = acb_to_s(acb);
1122     uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1123     uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1124 
1125     trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1126     qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1127                                 qed_aio_write_postfill, acb);
1128 }
1129 
1130 /**
1131  * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1132  */
1133 static bool qed_should_set_need_check(BDRVQEDState *s)
1134 {
1135     /* The flush before L2 update path ensures consistency */
1136     if (s->bs->backing) {
1137         return false;
1138     }
1139 
1140     return !(s->header.features & QED_F_NEED_CHECK);
1141 }
1142 
1143 static void qed_aio_write_zero_cluster(void *opaque, int ret)
1144 {
1145     QEDAIOCB *acb = opaque;
1146 
1147     if (ret) {
1148         qed_aio_complete(acb, ret);
1149         return;
1150     }
1151 
1152     qed_aio_write_l2_update(acb, 0, 1);
1153 }
1154 
1155 /**
1156  * Write new data cluster
1157  *
1158  * @acb:        Write request
1159  * @len:        Length in bytes
1160  *
1161  * This path is taken when writing to previously unallocated clusters.
1162  */
1163 static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1164 {
1165     BDRVQEDState *s = acb_to_s(acb);
1166     BlockCompletionFunc *cb;
1167 
1168     /* Cancel timer when the first allocating request comes in */
1169     if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1170         qed_cancel_need_check_timer(s);
1171     }
1172 
1173     /* Freeze this request if another allocating write is in progress */
1174     if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1175         QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1176     }
1177     if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1178         s->allocating_write_reqs_plugged) {
1179         return; /* wait for existing request to finish */
1180     }
1181 
1182     acb->cur_nclusters = qed_bytes_to_clusters(s,
1183             qed_offset_into_cluster(s, acb->cur_pos) + len);
1184     qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1185 
1186     if (acb->flags & QED_AIOCB_ZERO) {
1187         /* Skip ahead if the clusters are already zero */
1188         if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1189             qed_aio_next_io(acb, 0);
1190             return;
1191         }
1192 
1193         cb = qed_aio_write_zero_cluster;
1194     } else {
1195         cb = qed_aio_write_prefill;
1196         acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1197     }
1198 
1199     if (qed_should_set_need_check(s)) {
1200         s->header.features |= QED_F_NEED_CHECK;
1201         qed_write_header(s, cb, acb);
1202     } else {
1203         cb(acb, 0);
1204     }
1205 }
1206 
1207 /**
1208  * Write data cluster in place
1209  *
1210  * @acb:        Write request
1211  * @offset:     Cluster offset in bytes
1212  * @len:        Length in bytes
1213  *
1214  * This path is taken when writing to already allocated clusters.
1215  */
1216 static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1217 {
1218     /* Allocate buffer for zero writes */
1219     if (acb->flags & QED_AIOCB_ZERO) {
1220         struct iovec *iov = acb->qiov->iov;
1221 
1222         if (!iov->iov_base) {
1223             iov->iov_base = qemu_try_blockalign(acb->common.bs, iov->iov_len);
1224             if (iov->iov_base == NULL) {
1225                 qed_aio_complete(acb, -ENOMEM);
1226                 return;
1227             }
1228             memset(iov->iov_base, 0, iov->iov_len);
1229         }
1230     }
1231 
1232     /* Calculate the I/O vector */
1233     acb->cur_cluster = offset;
1234     qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1235 
1236     /* Do the actual write */
1237     qed_aio_write_main(acb, 0);
1238 }
1239 
1240 /**
1241  * Write data cluster
1242  *
1243  * @opaque:     Write request
1244  * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1245  *              or -errno
1246  * @offset:     Cluster offset in bytes
1247  * @len:        Length in bytes
1248  *
1249  * Callback from qed_find_cluster().
1250  */
1251 static void qed_aio_write_data(void *opaque, int ret,
1252                                uint64_t offset, size_t len)
1253 {
1254     QEDAIOCB *acb = opaque;
1255 
1256     trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1257 
1258     acb->find_cluster_ret = ret;
1259 
1260     switch (ret) {
1261     case QED_CLUSTER_FOUND:
1262         qed_aio_write_inplace(acb, offset, len);
1263         break;
1264 
1265     case QED_CLUSTER_L2:
1266     case QED_CLUSTER_L1:
1267     case QED_CLUSTER_ZERO:
1268         qed_aio_write_alloc(acb, len);
1269         break;
1270 
1271     default:
1272         qed_aio_complete(acb, ret);
1273         break;
1274     }
1275 }
1276 
1277 /**
1278  * Read data cluster
1279  *
1280  * @opaque:     Read request
1281  * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1282  *              or -errno
1283  * @offset:     Cluster offset in bytes
1284  * @len:        Length in bytes
1285  *
1286  * Callback from qed_find_cluster().
1287  */
1288 static void qed_aio_read_data(void *opaque, int ret,
1289                               uint64_t offset, size_t len)
1290 {
1291     QEDAIOCB *acb = opaque;
1292     BDRVQEDState *s = acb_to_s(acb);
1293     BlockDriverState *bs = acb->common.bs;
1294 
1295     /* Adjust offset into cluster */
1296     offset += qed_offset_into_cluster(s, acb->cur_pos);
1297 
1298     trace_qed_aio_read_data(s, acb, ret, offset, len);
1299 
1300     if (ret < 0) {
1301         goto err;
1302     }
1303 
1304     qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1305 
1306     /* Handle zero cluster and backing file reads */
1307     if (ret == QED_CLUSTER_ZERO) {
1308         qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1309         qed_aio_next_io(acb, 0);
1310         return;
1311     } else if (ret != QED_CLUSTER_FOUND) {
1312         qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1313                               &acb->backing_qiov, qed_aio_next_io, acb);
1314         return;
1315     }
1316 
1317     BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1318     bdrv_aio_readv(bs->file->bs, offset / BDRV_SECTOR_SIZE,
1319                    &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1320                    qed_aio_next_io, acb);
1321     return;
1322 
1323 err:
1324     qed_aio_complete(acb, ret);
1325 }
1326 
1327 /**
1328  * Begin next I/O or complete the request
1329  */
1330 static void qed_aio_next_io(void *opaque, int ret)
1331 {
1332     QEDAIOCB *acb = opaque;
1333     BDRVQEDState *s = acb_to_s(acb);
1334     QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
1335                                 qed_aio_write_data : qed_aio_read_data;
1336 
1337     trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1338 
1339     if (acb->backing_qiov) {
1340         qemu_iovec_destroy(acb->backing_qiov);
1341         g_free(acb->backing_qiov);
1342         acb->backing_qiov = NULL;
1343     }
1344 
1345     /* Handle I/O error */
1346     if (ret) {
1347         qed_aio_complete(acb, ret);
1348         return;
1349     }
1350 
1351     acb->qiov_offset += acb->cur_qiov.size;
1352     acb->cur_pos += acb->cur_qiov.size;
1353     qemu_iovec_reset(&acb->cur_qiov);
1354 
1355     /* Complete request */
1356     if (acb->cur_pos >= acb->end_pos) {
1357         qed_aio_complete(acb, 0);
1358         return;
1359     }
1360 
1361     /* Find next cluster and start I/O */
1362     qed_find_cluster(s, &acb->request,
1363                       acb->cur_pos, acb->end_pos - acb->cur_pos,
1364                       io_fn, acb);
1365 }
1366 
1367 static BlockAIOCB *qed_aio_setup(BlockDriverState *bs,
1368                                  int64_t sector_num,
1369                                  QEMUIOVector *qiov, int nb_sectors,
1370                                  BlockCompletionFunc *cb,
1371                                  void *opaque, int flags)
1372 {
1373     QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
1374 
1375     trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1376                         opaque, flags);
1377 
1378     acb->flags = flags;
1379     acb->qiov = qiov;
1380     acb->qiov_offset = 0;
1381     acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1382     acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1383     acb->backing_qiov = NULL;
1384     acb->request.l2_table = NULL;
1385     qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1386 
1387     /* Start request */
1388     qed_aio_next_io(acb, 0);
1389     return &acb->common;
1390 }
1391 
1392 static BlockAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1393                                       int64_t sector_num,
1394                                       QEMUIOVector *qiov, int nb_sectors,
1395                                       BlockCompletionFunc *cb,
1396                                       void *opaque)
1397 {
1398     return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
1399 }
1400 
1401 static BlockAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1402                                        int64_t sector_num,
1403                                        QEMUIOVector *qiov, int nb_sectors,
1404                                        BlockCompletionFunc *cb,
1405                                        void *opaque)
1406 {
1407     return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
1408                          opaque, QED_AIOCB_WRITE);
1409 }
1410 
1411 typedef struct {
1412     Coroutine *co;
1413     int ret;
1414     bool done;
1415 } QEDWriteZeroesCB;
1416 
1417 static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret)
1418 {
1419     QEDWriteZeroesCB *cb = opaque;
1420 
1421     cb->done = true;
1422     cb->ret = ret;
1423     if (cb->co) {
1424         qemu_coroutine_enter(cb->co, NULL);
1425     }
1426 }
1427 
1428 static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs,
1429                                                  int64_t sector_num,
1430                                                  int nb_sectors,
1431                                                  BdrvRequestFlags flags)
1432 {
1433     BlockAIOCB *blockacb;
1434     BDRVQEDState *s = bs->opaque;
1435     QEDWriteZeroesCB cb = { .done = false };
1436     QEMUIOVector qiov;
1437     struct iovec iov;
1438 
1439     /* Refuse if there are untouched backing file sectors */
1440     if (bs->backing) {
1441         if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) {
1442             return -ENOTSUP;
1443         }
1444         if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) {
1445             return -ENOTSUP;
1446         }
1447     }
1448 
1449     /* Zero writes start without an I/O buffer.  If a buffer becomes necessary
1450      * then it will be allocated during request processing.
1451      */
1452     iov.iov_base = NULL,
1453     iov.iov_len  = nb_sectors * BDRV_SECTOR_SIZE,
1454 
1455     qemu_iovec_init_external(&qiov, &iov, 1);
1456     blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors,
1457                              qed_co_write_zeroes_cb, &cb,
1458                              QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1459     if (!blockacb) {
1460         return -EIO;
1461     }
1462     if (!cb.done) {
1463         cb.co = qemu_coroutine_self();
1464         qemu_coroutine_yield();
1465     }
1466     assert(cb.done);
1467     return cb.ret;
1468 }
1469 
1470 static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1471 {
1472     BDRVQEDState *s = bs->opaque;
1473     uint64_t old_image_size;
1474     int ret;
1475 
1476     if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1477                                  s->header.table_size)) {
1478         return -EINVAL;
1479     }
1480 
1481     /* Shrinking is currently not supported */
1482     if ((uint64_t)offset < s->header.image_size) {
1483         return -ENOTSUP;
1484     }
1485 
1486     old_image_size = s->header.image_size;
1487     s->header.image_size = offset;
1488     ret = qed_write_header_sync(s);
1489     if (ret < 0) {
1490         s->header.image_size = old_image_size;
1491     }
1492     return ret;
1493 }
1494 
1495 static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1496 {
1497     BDRVQEDState *s = bs->opaque;
1498     return s->header.image_size;
1499 }
1500 
1501 static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1502 {
1503     BDRVQEDState *s = bs->opaque;
1504 
1505     memset(bdi, 0, sizeof(*bdi));
1506     bdi->cluster_size = s->header.cluster_size;
1507     bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1508     bdi->unallocated_blocks_are_zero = true;
1509     bdi->can_write_zeroes_with_unmap = true;
1510     return 0;
1511 }
1512 
1513 static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1514                                         const char *backing_file,
1515                                         const char *backing_fmt)
1516 {
1517     BDRVQEDState *s = bs->opaque;
1518     QEDHeader new_header, le_header;
1519     void *buffer;
1520     size_t buffer_len, backing_file_len;
1521     int ret;
1522 
1523     /* Refuse to set backing filename if unknown compat feature bits are
1524      * active.  If the image uses an unknown compat feature then we may not
1525      * know the layout of data following the header structure and cannot safely
1526      * add a new string.
1527      */
1528     if (backing_file && (s->header.compat_features &
1529                          ~QED_COMPAT_FEATURE_MASK)) {
1530         return -ENOTSUP;
1531     }
1532 
1533     memcpy(&new_header, &s->header, sizeof(new_header));
1534 
1535     new_header.features &= ~(QED_F_BACKING_FILE |
1536                              QED_F_BACKING_FORMAT_NO_PROBE);
1537 
1538     /* Adjust feature flags */
1539     if (backing_file) {
1540         new_header.features |= QED_F_BACKING_FILE;
1541 
1542         if (qed_fmt_is_raw(backing_fmt)) {
1543             new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1544         }
1545     }
1546 
1547     /* Calculate new header size */
1548     backing_file_len = 0;
1549 
1550     if (backing_file) {
1551         backing_file_len = strlen(backing_file);
1552     }
1553 
1554     buffer_len = sizeof(new_header);
1555     new_header.backing_filename_offset = buffer_len;
1556     new_header.backing_filename_size = backing_file_len;
1557     buffer_len += backing_file_len;
1558 
1559     /* Make sure we can rewrite header without failing */
1560     if (buffer_len > new_header.header_size * new_header.cluster_size) {
1561         return -ENOSPC;
1562     }
1563 
1564     /* Prepare new header */
1565     buffer = g_malloc(buffer_len);
1566 
1567     qed_header_cpu_to_le(&new_header, &le_header);
1568     memcpy(buffer, &le_header, sizeof(le_header));
1569     buffer_len = sizeof(le_header);
1570 
1571     if (backing_file) {
1572         memcpy(buffer + buffer_len, backing_file, backing_file_len);
1573         buffer_len += backing_file_len;
1574     }
1575 
1576     /* Write new header */
1577     ret = bdrv_pwrite_sync(bs->file->bs, 0, buffer, buffer_len);
1578     g_free(buffer);
1579     if (ret == 0) {
1580         memcpy(&s->header, &new_header, sizeof(new_header));
1581     }
1582     return ret;
1583 }
1584 
1585 static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
1586 {
1587     BDRVQEDState *s = bs->opaque;
1588     Error *local_err = NULL;
1589     int ret;
1590 
1591     bdrv_qed_close(bs);
1592 
1593     bdrv_invalidate_cache(bs->file->bs, &local_err);
1594     if (local_err) {
1595         error_propagate(errp, local_err);
1596         return;
1597     }
1598 
1599     memset(s, 0, sizeof(BDRVQEDState));
1600     ret = bdrv_qed_open(bs, NULL, bs->open_flags, &local_err);
1601     if (local_err) {
1602         error_setg(errp, "Could not reopen qed layer: %s",
1603                    error_get_pretty(local_err));
1604         error_free(local_err);
1605         return;
1606     } else if (ret < 0) {
1607         error_setg_errno(errp, -ret, "Could not reopen qed layer");
1608         return;
1609     }
1610 }
1611 
1612 static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1613                           BdrvCheckMode fix)
1614 {
1615     BDRVQEDState *s = bs->opaque;
1616 
1617     return qed_check(s, result, !!fix);
1618 }
1619 
1620 static QemuOptsList qed_create_opts = {
1621     .name = "qed-create-opts",
1622     .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1623     .desc = {
1624         {
1625             .name = BLOCK_OPT_SIZE,
1626             .type = QEMU_OPT_SIZE,
1627             .help = "Virtual disk size"
1628         },
1629         {
1630             .name = BLOCK_OPT_BACKING_FILE,
1631             .type = QEMU_OPT_STRING,
1632             .help = "File name of a base image"
1633         },
1634         {
1635             .name = BLOCK_OPT_BACKING_FMT,
1636             .type = QEMU_OPT_STRING,
1637             .help = "Image format of the base image"
1638         },
1639         {
1640             .name = BLOCK_OPT_CLUSTER_SIZE,
1641             .type = QEMU_OPT_SIZE,
1642             .help = "Cluster size (in bytes)",
1643             .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1644         },
1645         {
1646             .name = BLOCK_OPT_TABLE_SIZE,
1647             .type = QEMU_OPT_SIZE,
1648             .help = "L1/L2 table size (in clusters)"
1649         },
1650         { /* end of list */ }
1651     }
1652 };
1653 
1654 static BlockDriver bdrv_qed = {
1655     .format_name              = "qed",
1656     .instance_size            = sizeof(BDRVQEDState),
1657     .create_opts              = &qed_create_opts,
1658     .supports_backing         = true,
1659 
1660     .bdrv_probe               = bdrv_qed_probe,
1661     .bdrv_open                = bdrv_qed_open,
1662     .bdrv_close               = bdrv_qed_close,
1663     .bdrv_reopen_prepare      = bdrv_qed_reopen_prepare,
1664     .bdrv_create              = bdrv_qed_create,
1665     .bdrv_has_zero_init       = bdrv_has_zero_init_1,
1666     .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
1667     .bdrv_aio_readv           = bdrv_qed_aio_readv,
1668     .bdrv_aio_writev          = bdrv_qed_aio_writev,
1669     .bdrv_co_write_zeroes     = bdrv_qed_co_write_zeroes,
1670     .bdrv_truncate            = bdrv_qed_truncate,
1671     .bdrv_getlength           = bdrv_qed_getlength,
1672     .bdrv_get_info            = bdrv_qed_get_info,
1673     .bdrv_refresh_limits      = bdrv_qed_refresh_limits,
1674     .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1675     .bdrv_invalidate_cache    = bdrv_qed_invalidate_cache,
1676     .bdrv_check               = bdrv_qed_check,
1677     .bdrv_detach_aio_context  = bdrv_qed_detach_aio_context,
1678     .bdrv_attach_aio_context  = bdrv_qed_attach_aio_context,
1679 };
1680 
1681 static void bdrv_qed_init(void)
1682 {
1683     bdrv_register(&bdrv_qed);
1684 }
1685 
1686 block_init(bdrv_qed_init);
1687