xref: /openbmc/qemu/block/blkio.c (revision 3878d0c7)
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2 /*
3  * libblkio BlockDriver
4  *
5  * Copyright Red Hat, Inc.
6  *
7  * Author:
8  *   Stefan Hajnoczi <stefanha@redhat.com>
9  */
10 
11 #include "qemu/osdep.h"
12 #include <blkio.h>
13 #include "block/block_int.h"
14 #include "exec/memory.h"
15 #include "exec/cpu-common.h" /* for qemu_ram_get_fd() */
16 #include "qapi/error.h"
17 #include "qemu/error-report.h"
18 #include "qapi/qmp/qdict.h"
19 #include "qemu/module.h"
20 #include "exec/memory.h" /* for ram_block_discard_disable() */
21 
22 /*
23  * Keep the QEMU BlockDriver names identical to the libblkio driver names.
24  * Using macros instead of typing out the string literals avoids typos.
25  */
26 #define DRIVER_IO_URING "io_uring"
27 #define DRIVER_NVME_IO_URING "nvme-io_uring"
28 #define DRIVER_VIRTIO_BLK_VHOST_USER "virtio-blk-vhost-user"
29 #define DRIVER_VIRTIO_BLK_VHOST_VDPA "virtio-blk-vhost-vdpa"
30 
31 /*
32  * Allocated bounce buffers are kept in a list sorted by buffer address.
33  */
34 typedef struct BlkioBounceBuf {
35     QLIST_ENTRY(BlkioBounceBuf) next;
36 
37     /* The bounce buffer */
38     struct iovec buf;
39 } BlkioBounceBuf;
40 
41 typedef struct {
42     /*
43      * libblkio is not thread-safe so this lock protects ->blkio and
44      * ->blkioq.
45      */
46     QemuMutex blkio_lock;
47     struct blkio *blkio;
48     struct blkioq *blkioq; /* make this multi-queue in the future... */
49     int completion_fd;
50 
51     /*
52      * Polling fetches the next completion into this field.
53      *
54      * No lock is necessary since only one thread calls aio_poll() and invokes
55      * fd and poll handlers.
56      */
57     struct blkio_completion poll_completion;
58 
59     /*
60      * Protects ->bounce_pool, ->bounce_bufs, ->bounce_available.
61      *
62      * Lock ordering: ->bounce_lock before ->blkio_lock.
63      */
64     CoMutex bounce_lock;
65 
66     /* Bounce buffer pool */
67     struct blkio_mem_region bounce_pool;
68 
69     /* Sorted list of allocated bounce buffers */
70     QLIST_HEAD(, BlkioBounceBuf) bounce_bufs;
71 
72     /* Queue for coroutines waiting for bounce buffer space */
73     CoQueue bounce_available;
74 
75     /* The value of the "mem-region-alignment" property */
76     size_t mem_region_alignment;
77 
78     /* Can we skip adding/deleting blkio_mem_regions? */
79     bool needs_mem_regions;
80 
81     /* Are file descriptors necessary for blkio_mem_regions? */
82     bool needs_mem_region_fd;
83 
84     /* Are madvise(MADV_DONTNEED)-style operations unavailable? */
85     bool may_pin_mem_regions;
86 } BDRVBlkioState;
87 
88 /* Called with s->bounce_lock held */
89 static int blkio_resize_bounce_pool(BDRVBlkioState *s, int64_t bytes)
90 {
91     /* There can be no allocated bounce buffers during resize */
92     assert(QLIST_EMPTY(&s->bounce_bufs));
93 
94     /* Pad size to reduce frequency of resize calls */
95     bytes += 128 * 1024;
96 
97     WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
98         int ret;
99 
100         if (s->bounce_pool.addr) {
101             blkio_unmap_mem_region(s->blkio, &s->bounce_pool);
102             blkio_free_mem_region(s->blkio, &s->bounce_pool);
103             memset(&s->bounce_pool, 0, sizeof(s->bounce_pool));
104         }
105 
106         /* Automatically freed when s->blkio is destroyed */
107         ret = blkio_alloc_mem_region(s->blkio, &s->bounce_pool, bytes);
108         if (ret < 0) {
109             return ret;
110         }
111 
112         ret = blkio_map_mem_region(s->blkio, &s->bounce_pool);
113         if (ret < 0) {
114             blkio_free_mem_region(s->blkio, &s->bounce_pool);
115             memset(&s->bounce_pool, 0, sizeof(s->bounce_pool));
116             return ret;
117         }
118     }
119 
120     return 0;
121 }
122 
123 /* Called with s->bounce_lock held */
124 static bool
125 blkio_do_alloc_bounce_buffer(BDRVBlkioState *s, BlkioBounceBuf *bounce,
126                              int64_t bytes)
127 {
128     void *addr = s->bounce_pool.addr;
129     BlkioBounceBuf *cur = NULL;
130     BlkioBounceBuf *prev = NULL;
131     ptrdiff_t space;
132 
133     /*
134      * This is just a linear search over the holes between requests. An
135      * efficient allocator would be nice.
136      */
137     QLIST_FOREACH(cur, &s->bounce_bufs, next) {
138         space = cur->buf.iov_base - addr;
139         if (bytes <= space) {
140             QLIST_INSERT_BEFORE(cur, bounce, next);
141             bounce->buf.iov_base = addr;
142             bounce->buf.iov_len = bytes;
143             return true;
144         }
145 
146         addr = cur->buf.iov_base + cur->buf.iov_len;
147         prev = cur;
148     }
149 
150     /* Is there space after the last request? */
151     space = s->bounce_pool.addr + s->bounce_pool.len - addr;
152     if (bytes > space) {
153         return false;
154     }
155     if (prev) {
156         QLIST_INSERT_AFTER(prev, bounce, next);
157     } else {
158         QLIST_INSERT_HEAD(&s->bounce_bufs, bounce, next);
159     }
160     bounce->buf.iov_base = addr;
161     bounce->buf.iov_len = bytes;
162     return true;
163 }
164 
165 static int coroutine_fn
166 blkio_alloc_bounce_buffer(BDRVBlkioState *s, BlkioBounceBuf *bounce,
167                           int64_t bytes)
168 {
169     /*
170      * Ensure fairness: first time around we join the back of the queue,
171      * subsequently we join the front so we don't lose our place.
172      */
173     CoQueueWaitFlags wait_flags = 0;
174 
175     QEMU_LOCK_GUARD(&s->bounce_lock);
176 
177     /* Ensure fairness: don't even try if other requests are already waiting */
178     if (!qemu_co_queue_empty(&s->bounce_available)) {
179         qemu_co_queue_wait_flags(&s->bounce_available, &s->bounce_lock,
180                                  wait_flags);
181         wait_flags = CO_QUEUE_WAIT_FRONT;
182     }
183 
184     while (true) {
185         if (blkio_do_alloc_bounce_buffer(s, bounce, bytes)) {
186             /* Kick the next queued request since there may be space */
187             qemu_co_queue_next(&s->bounce_available);
188             return 0;
189         }
190 
191         /*
192          * If there are no in-flight requests then the pool was simply too
193          * small.
194          */
195         if (QLIST_EMPTY(&s->bounce_bufs)) {
196             bool ok;
197             int ret;
198 
199             ret = blkio_resize_bounce_pool(s, bytes);
200             if (ret < 0) {
201                 /* Kick the next queued request since that may fail too */
202                 qemu_co_queue_next(&s->bounce_available);
203                 return ret;
204             }
205 
206             ok = blkio_do_alloc_bounce_buffer(s, bounce, bytes);
207             assert(ok); /* must have space this time */
208             return 0;
209         }
210 
211         qemu_co_queue_wait_flags(&s->bounce_available, &s->bounce_lock,
212                                  wait_flags);
213         wait_flags = CO_QUEUE_WAIT_FRONT;
214     }
215 }
216 
217 static void coroutine_fn blkio_free_bounce_buffer(BDRVBlkioState *s,
218                                                   BlkioBounceBuf *bounce)
219 {
220     QEMU_LOCK_GUARD(&s->bounce_lock);
221 
222     QLIST_REMOVE(bounce, next);
223 
224     /* Wake up waiting coroutines since space may now be available */
225     qemu_co_queue_next(&s->bounce_available);
226 }
227 
228 /* For async to .bdrv_co_*() conversion */
229 typedef struct {
230     Coroutine *coroutine;
231     int ret;
232 } BlkioCoData;
233 
234 static void blkio_completion_fd_read(void *opaque)
235 {
236     BlockDriverState *bs = opaque;
237     BDRVBlkioState *s = bs->opaque;
238     uint64_t val;
239     int ret;
240 
241     /* Polling may have already fetched a completion */
242     if (s->poll_completion.user_data != NULL) {
243         BlkioCoData *cod = s->poll_completion.user_data;
244         cod->ret = s->poll_completion.ret;
245 
246         /* Clear it in case aio_co_wake() enters a nested event loop */
247         s->poll_completion.user_data = NULL;
248 
249         aio_co_wake(cod->coroutine);
250     }
251 
252     /* Reset completion fd status */
253     ret = read(s->completion_fd, &val, sizeof(val));
254 
255     /* Ignore errors, there's nothing we can do */
256     (void)ret;
257 
258     /*
259      * Reading one completion at a time makes nested event loop re-entrancy
260      * simple. Change this loop to get multiple completions in one go if it
261      * becomes a performance bottleneck.
262      */
263     while (true) {
264         struct blkio_completion completion;
265 
266         WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
267             ret = blkioq_do_io(s->blkioq, &completion, 0, 1, NULL);
268         }
269         if (ret != 1) {
270             break;
271         }
272 
273         BlkioCoData *cod = completion.user_data;
274         cod->ret = completion.ret;
275         aio_co_wake(cod->coroutine);
276     }
277 }
278 
279 static bool blkio_completion_fd_poll(void *opaque)
280 {
281     BlockDriverState *bs = opaque;
282     BDRVBlkioState *s = bs->opaque;
283     int ret;
284 
285     /* Just in case we already fetched a completion */
286     if (s->poll_completion.user_data != NULL) {
287         return true;
288     }
289 
290     WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
291         ret = blkioq_do_io(s->blkioq, &s->poll_completion, 0, 1, NULL);
292     }
293     return ret == 1;
294 }
295 
296 static void blkio_completion_fd_poll_ready(void *opaque)
297 {
298     blkio_completion_fd_read(opaque);
299 }
300 
301 static void blkio_attach_aio_context(BlockDriverState *bs,
302                                      AioContext *new_context)
303 {
304     BDRVBlkioState *s = bs->opaque;
305 
306     aio_set_fd_handler(new_context,
307                        s->completion_fd,
308                        false,
309                        blkio_completion_fd_read,
310                        NULL,
311                        blkio_completion_fd_poll,
312                        blkio_completion_fd_poll_ready,
313                        bs);
314 }
315 
316 static void blkio_detach_aio_context(BlockDriverState *bs)
317 {
318     BDRVBlkioState *s = bs->opaque;
319 
320     aio_set_fd_handler(bdrv_get_aio_context(bs),
321                        s->completion_fd,
322                        false, NULL, NULL, NULL, NULL, NULL);
323 }
324 
325 /* Call with s->blkio_lock held to submit I/O after enqueuing a new request */
326 static void blkio_submit_io(BlockDriverState *bs)
327 {
328     if (qatomic_read(&bs->io_plugged) == 0) {
329         BDRVBlkioState *s = bs->opaque;
330 
331         blkioq_do_io(s->blkioq, NULL, 0, 0, NULL);
332     }
333 }
334 
335 static int coroutine_fn
336 blkio_co_pdiscard(BlockDriverState *bs, int64_t offset, int64_t bytes)
337 {
338     BDRVBlkioState *s = bs->opaque;
339     BlkioCoData cod = {
340         .coroutine = qemu_coroutine_self(),
341     };
342 
343     WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
344         blkioq_discard(s->blkioq, offset, bytes, &cod, 0);
345         blkio_submit_io(bs);
346     }
347 
348     qemu_coroutine_yield();
349     return cod.ret;
350 }
351 
352 static int coroutine_fn
353 blkio_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
354                 QEMUIOVector *qiov, BdrvRequestFlags flags)
355 {
356     BlkioCoData cod = {
357         .coroutine = qemu_coroutine_self(),
358     };
359     BDRVBlkioState *s = bs->opaque;
360     bool use_bounce_buffer =
361         s->needs_mem_regions && !(flags & BDRV_REQ_REGISTERED_BUF);
362     BlkioBounceBuf bounce;
363     struct iovec *iov = qiov->iov;
364     int iovcnt = qiov->niov;
365 
366     if (use_bounce_buffer) {
367         int ret = blkio_alloc_bounce_buffer(s, &bounce, bytes);
368         if (ret < 0) {
369             return ret;
370         }
371 
372         iov = &bounce.buf;
373         iovcnt = 1;
374     }
375 
376     WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
377         blkioq_readv(s->blkioq, offset, iov, iovcnt, &cod, 0);
378         blkio_submit_io(bs);
379     }
380 
381     qemu_coroutine_yield();
382 
383     if (use_bounce_buffer) {
384         if (cod.ret == 0) {
385             qemu_iovec_from_buf(qiov, 0,
386                                 bounce.buf.iov_base,
387                                 bounce.buf.iov_len);
388         }
389 
390         blkio_free_bounce_buffer(s, &bounce);
391     }
392 
393     return cod.ret;
394 }
395 
396 static int coroutine_fn blkio_co_pwritev(BlockDriverState *bs, int64_t offset,
397         int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags)
398 {
399     uint32_t blkio_flags = (flags & BDRV_REQ_FUA) ? BLKIO_REQ_FUA : 0;
400     BlkioCoData cod = {
401         .coroutine = qemu_coroutine_self(),
402     };
403     BDRVBlkioState *s = bs->opaque;
404     bool use_bounce_buffer =
405         s->needs_mem_regions && !(flags & BDRV_REQ_REGISTERED_BUF);
406     BlkioBounceBuf bounce;
407     struct iovec *iov = qiov->iov;
408     int iovcnt = qiov->niov;
409 
410     if (use_bounce_buffer) {
411         int ret = blkio_alloc_bounce_buffer(s, &bounce, bytes);
412         if (ret < 0) {
413             return ret;
414         }
415 
416         qemu_iovec_to_buf(qiov, 0, bounce.buf.iov_base, bytes);
417         iov = &bounce.buf;
418         iovcnt = 1;
419     }
420 
421     WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
422         blkioq_writev(s->blkioq, offset, iov, iovcnt, &cod, blkio_flags);
423         blkio_submit_io(bs);
424     }
425 
426     qemu_coroutine_yield();
427 
428     if (use_bounce_buffer) {
429         blkio_free_bounce_buffer(s, &bounce);
430     }
431 
432     return cod.ret;
433 }
434 
435 static int coroutine_fn blkio_co_flush(BlockDriverState *bs)
436 {
437     BDRVBlkioState *s = bs->opaque;
438     BlkioCoData cod = {
439         .coroutine = qemu_coroutine_self(),
440     };
441 
442     WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
443         blkioq_flush(s->blkioq, &cod, 0);
444         blkio_submit_io(bs);
445     }
446 
447     qemu_coroutine_yield();
448     return cod.ret;
449 }
450 
451 static int coroutine_fn blkio_co_pwrite_zeroes(BlockDriverState *bs,
452     int64_t offset, int64_t bytes, BdrvRequestFlags flags)
453 {
454     BDRVBlkioState *s = bs->opaque;
455     BlkioCoData cod = {
456         .coroutine = qemu_coroutine_self(),
457     };
458     uint32_t blkio_flags = 0;
459 
460     if (flags & BDRV_REQ_FUA) {
461         blkio_flags |= BLKIO_REQ_FUA;
462     }
463     if (!(flags & BDRV_REQ_MAY_UNMAP)) {
464         blkio_flags |= BLKIO_REQ_NO_UNMAP;
465     }
466     if (flags & BDRV_REQ_NO_FALLBACK) {
467         blkio_flags |= BLKIO_REQ_NO_FALLBACK;
468     }
469 
470     WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
471         blkioq_write_zeroes(s->blkioq, offset, bytes, &cod, blkio_flags);
472         blkio_submit_io(bs);
473     }
474 
475     qemu_coroutine_yield();
476     return cod.ret;
477 }
478 
479 static void blkio_io_unplug(BlockDriverState *bs)
480 {
481     BDRVBlkioState *s = bs->opaque;
482 
483     WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
484         blkio_submit_io(bs);
485     }
486 }
487 
488 typedef enum {
489     BMRR_OK,
490     BMRR_SKIP,
491     BMRR_FAIL,
492 } BlkioMemRegionResult;
493 
494 /*
495  * Produce a struct blkio_mem_region for a given address and size.
496  *
497  * This function produces identical results when called multiple times with the
498  * same arguments. This property is necessary because blkio_unmap_mem_region()
499  * must receive the same struct blkio_mem_region field values that were passed
500  * to blkio_map_mem_region().
501  */
502 static BlkioMemRegionResult
503 blkio_mem_region_from_host(BlockDriverState *bs,
504                            void *host, size_t size,
505                            struct blkio_mem_region *region,
506                            Error **errp)
507 {
508     BDRVBlkioState *s = bs->opaque;
509     int fd = -1;
510     ram_addr_t fd_offset = 0;
511 
512     if (((uintptr_t)host | size) % s->mem_region_alignment) {
513         error_setg(errp, "unaligned buf %p with size %zu", host, size);
514         return BMRR_FAIL;
515     }
516 
517     /* Attempt to find the fd for the underlying memory */
518     if (s->needs_mem_region_fd) {
519         RAMBlock *ram_block;
520         RAMBlock *end_block;
521         ram_addr_t offset;
522 
523         /*
524          * bdrv_register_buf() is called with the BQL held so mr lives at least
525          * until this function returns.
526          */
527         ram_block = qemu_ram_block_from_host(host, false, &fd_offset);
528         if (ram_block) {
529             fd = qemu_ram_get_fd(ram_block);
530         }
531         if (fd == -1) {
532             /*
533              * Ideally every RAMBlock would have an fd. pc-bios and other
534              * things don't. Luckily they are usually not I/O buffers and we
535              * can just ignore them.
536              */
537             return BMRR_SKIP;
538         }
539 
540         /* Make sure the fd covers the entire range */
541         end_block = qemu_ram_block_from_host(host + size - 1, false, &offset);
542         if (ram_block != end_block) {
543             error_setg(errp, "registered buffer at %p with size %zu extends "
544                        "beyond RAMBlock", host, size);
545             return BMRR_FAIL;
546         }
547     }
548 
549     *region = (struct blkio_mem_region){
550         .addr = host,
551         .len = size,
552         .fd = fd,
553         .fd_offset = fd_offset,
554     };
555     return BMRR_OK;
556 }
557 
558 static bool blkio_register_buf(BlockDriverState *bs, void *host, size_t size,
559                                Error **errp)
560 {
561     BDRVBlkioState *s = bs->opaque;
562     struct blkio_mem_region region;
563     BlkioMemRegionResult region_result;
564     int ret;
565 
566     /*
567      * Mapping memory regions conflicts with RAM discard (virtio-mem) when
568      * there is pinning, so only do it when necessary.
569      */
570     if (!s->needs_mem_regions && s->may_pin_mem_regions) {
571         return true;
572     }
573 
574     region_result = blkio_mem_region_from_host(bs, host, size, &region, errp);
575     if (region_result == BMRR_SKIP) {
576         return true;
577     } else if (region_result != BMRR_OK) {
578         return false;
579     }
580 
581     WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
582         ret = blkio_map_mem_region(s->blkio, &region);
583     }
584 
585     if (ret < 0) {
586         error_setg(errp, "Failed to add blkio mem region %p with size %zu: %s",
587                    host, size, blkio_get_error_msg());
588         return false;
589     }
590     return true;
591 }
592 
593 static void blkio_unregister_buf(BlockDriverState *bs, void *host, size_t size)
594 {
595     BDRVBlkioState *s = bs->opaque;
596     struct blkio_mem_region region;
597 
598     /* See blkio_register_buf() */
599     if (!s->needs_mem_regions && s->may_pin_mem_regions) {
600         return;
601     }
602 
603     if (blkio_mem_region_from_host(bs, host, size, &region, NULL) != BMRR_OK) {
604         return;
605     }
606 
607     WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
608         blkio_unmap_mem_region(s->blkio, &region);
609     }
610 }
611 
612 static int blkio_io_uring_open(BlockDriverState *bs, QDict *options, int flags,
613                                Error **errp)
614 {
615     const char *filename = qdict_get_str(options, "filename");
616     BDRVBlkioState *s = bs->opaque;
617     int ret;
618 
619     ret = blkio_set_str(s->blkio, "path", filename);
620     qdict_del(options, "filename");
621     if (ret < 0) {
622         error_setg_errno(errp, -ret, "failed to set path: %s",
623                          blkio_get_error_msg());
624         return ret;
625     }
626 
627     if (flags & BDRV_O_NOCACHE) {
628         ret = blkio_set_bool(s->blkio, "direct", true);
629         if (ret < 0) {
630             error_setg_errno(errp, -ret, "failed to set direct: %s",
631                              blkio_get_error_msg());
632             return ret;
633         }
634     }
635 
636     return 0;
637 }
638 
639 static int blkio_nvme_io_uring(BlockDriverState *bs, QDict *options, int flags,
640                                Error **errp)
641 {
642     const char *filename = qdict_get_str(options, "filename");
643     BDRVBlkioState *s = bs->opaque;
644     int ret;
645 
646     ret = blkio_set_str(s->blkio, "path", filename);
647     qdict_del(options, "filename");
648     if (ret < 0) {
649         error_setg_errno(errp, -ret, "failed to set path: %s",
650                          blkio_get_error_msg());
651         return ret;
652     }
653 
654     if (!(flags & BDRV_O_NOCACHE)) {
655         error_setg(errp, "cache.direct=off is not supported");
656         return -EINVAL;
657     }
658 
659     return 0;
660 }
661 
662 static int blkio_virtio_blk_common_open(BlockDriverState *bs,
663         QDict *options, int flags, Error **errp)
664 {
665     const char *path = qdict_get_try_str(options, "path");
666     BDRVBlkioState *s = bs->opaque;
667     int ret;
668 
669     if (!path) {
670         error_setg(errp, "missing 'path' option");
671         return -EINVAL;
672     }
673 
674     ret = blkio_set_str(s->blkio, "path", path);
675     qdict_del(options, "path");
676     if (ret < 0) {
677         error_setg_errno(errp, -ret, "failed to set path: %s",
678                          blkio_get_error_msg());
679         return ret;
680     }
681 
682     if (!(flags & BDRV_O_NOCACHE)) {
683         error_setg(errp, "cache.direct=off is not supported");
684         return -EINVAL;
685     }
686     return 0;
687 }
688 
689 static int blkio_file_open(BlockDriverState *bs, QDict *options, int flags,
690                            Error **errp)
691 {
692     const char *blkio_driver = bs->drv->protocol_name;
693     BDRVBlkioState *s = bs->opaque;
694     int ret;
695 
696     ret = blkio_create(blkio_driver, &s->blkio);
697     if (ret < 0) {
698         error_setg_errno(errp, -ret, "blkio_create failed: %s",
699                          blkio_get_error_msg());
700         return ret;
701     }
702 
703     if (strcmp(blkio_driver, DRIVER_IO_URING) == 0) {
704         ret = blkio_io_uring_open(bs, options, flags, errp);
705     } else if (strcmp(blkio_driver, DRIVER_NVME_IO_URING) == 0) {
706         ret = blkio_nvme_io_uring(bs, options, flags, errp);
707     } else if (strcmp(blkio_driver, DRIVER_VIRTIO_BLK_VHOST_USER) == 0) {
708         ret = blkio_virtio_blk_common_open(bs, options, flags, errp);
709     } else if (strcmp(blkio_driver, DRIVER_VIRTIO_BLK_VHOST_VDPA) == 0) {
710         ret = blkio_virtio_blk_common_open(bs, options, flags, errp);
711     } else {
712         g_assert_not_reached();
713     }
714     if (ret < 0) {
715         blkio_destroy(&s->blkio);
716         return ret;
717     }
718 
719     if (!(flags & BDRV_O_RDWR)) {
720         ret = blkio_set_bool(s->blkio, "read-only", true);
721         if (ret < 0) {
722             error_setg_errno(errp, -ret, "failed to set read-only: %s",
723                              blkio_get_error_msg());
724             blkio_destroy(&s->blkio);
725             return ret;
726         }
727     }
728 
729     ret = blkio_connect(s->blkio);
730     if (ret < 0) {
731         error_setg_errno(errp, -ret, "blkio_connect failed: %s",
732                          blkio_get_error_msg());
733         blkio_destroy(&s->blkio);
734         return ret;
735     }
736 
737     ret = blkio_get_bool(s->blkio,
738                          "needs-mem-regions",
739                          &s->needs_mem_regions);
740     if (ret < 0) {
741         error_setg_errno(errp, -ret,
742                          "failed to get needs-mem-regions: %s",
743                          blkio_get_error_msg());
744         blkio_destroy(&s->blkio);
745         return ret;
746     }
747 
748     ret = blkio_get_bool(s->blkio,
749                          "needs-mem-region-fd",
750                          &s->needs_mem_region_fd);
751     if (ret < 0) {
752         error_setg_errno(errp, -ret,
753                          "failed to get needs-mem-region-fd: %s",
754                          blkio_get_error_msg());
755         blkio_destroy(&s->blkio);
756         return ret;
757     }
758 
759     ret = blkio_get_uint64(s->blkio,
760                            "mem-region-alignment",
761                            &s->mem_region_alignment);
762     if (ret < 0) {
763         error_setg_errno(errp, -ret,
764                          "failed to get mem-region-alignment: %s",
765                          blkio_get_error_msg());
766         blkio_destroy(&s->blkio);
767         return ret;
768     }
769 
770     ret = blkio_get_bool(s->blkio,
771                          "may-pin-mem-regions",
772                          &s->may_pin_mem_regions);
773     if (ret < 0) {
774         /* Be conservative (assume pinning) if the property is not supported */
775         s->may_pin_mem_regions = s->needs_mem_regions;
776     }
777 
778     /*
779      * Notify if libblkio drivers pin memory and prevent features like
780      * virtio-mem from working.
781      */
782     if (s->may_pin_mem_regions) {
783         ret = ram_block_discard_disable(true);
784         if (ret < 0) {
785             error_setg_errno(errp, -ret, "ram_block_discard_disable() failed");
786             blkio_destroy(&s->blkio);
787             return ret;
788         }
789     }
790 
791     ret = blkio_start(s->blkio);
792     if (ret < 0) {
793         error_setg_errno(errp, -ret, "blkio_start failed: %s",
794                          blkio_get_error_msg());
795         blkio_destroy(&s->blkio);
796         if (s->may_pin_mem_regions) {
797             ram_block_discard_disable(false);
798         }
799         return ret;
800     }
801 
802     bs->supported_write_flags = BDRV_REQ_FUA | BDRV_REQ_REGISTERED_BUF;
803     bs->supported_zero_flags = BDRV_REQ_FUA | BDRV_REQ_MAY_UNMAP |
804                                BDRV_REQ_NO_FALLBACK;
805 
806     qemu_mutex_init(&s->blkio_lock);
807     qemu_co_mutex_init(&s->bounce_lock);
808     qemu_co_queue_init(&s->bounce_available);
809     QLIST_INIT(&s->bounce_bufs);
810     s->blkioq = blkio_get_queue(s->blkio, 0);
811     s->completion_fd = blkioq_get_completion_fd(s->blkioq);
812 
813     blkio_attach_aio_context(bs, bdrv_get_aio_context(bs));
814     return 0;
815 }
816 
817 static void blkio_close(BlockDriverState *bs)
818 {
819     BDRVBlkioState *s = bs->opaque;
820 
821     /* There is no destroy() API for s->bounce_lock */
822 
823     qemu_mutex_destroy(&s->blkio_lock);
824     blkio_detach_aio_context(bs);
825     blkio_destroy(&s->blkio);
826 
827     if (s->may_pin_mem_regions) {
828         ram_block_discard_disable(false);
829     }
830 }
831 
832 static int64_t blkio_getlength(BlockDriverState *bs)
833 {
834     BDRVBlkioState *s = bs->opaque;
835     uint64_t capacity;
836     int ret;
837 
838     WITH_QEMU_LOCK_GUARD(&s->blkio_lock) {
839         ret = blkio_get_uint64(s->blkio, "capacity", &capacity);
840     }
841     if (ret < 0) {
842         return -ret;
843     }
844 
845     return capacity;
846 }
847 
848 static int blkio_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
849 {
850     return 0;
851 }
852 
853 static void blkio_refresh_limits(BlockDriverState *bs, Error **errp)
854 {
855     BDRVBlkioState *s = bs->opaque;
856     QEMU_LOCK_GUARD(&s->blkio_lock);
857     int value;
858     int ret;
859 
860     ret = blkio_get_int(s->blkio, "request-alignment", &value);
861     if (ret < 0) {
862         error_setg_errno(errp, -ret, "failed to get \"request-alignment\": %s",
863                          blkio_get_error_msg());
864         return;
865     }
866     bs->bl.request_alignment = value;
867     if (bs->bl.request_alignment < 1 ||
868         bs->bl.request_alignment >= INT_MAX ||
869         !is_power_of_2(bs->bl.request_alignment)) {
870         error_setg(errp, "invalid \"request-alignment\" value %" PRIu32 ", "
871                    "must be a power of 2 less than INT_MAX",
872                    bs->bl.request_alignment);
873         return;
874     }
875 
876     ret = blkio_get_int(s->blkio, "optimal-io-size", &value);
877     if (ret < 0) {
878         error_setg_errno(errp, -ret, "failed to get \"optimal-io-size\": %s",
879                          blkio_get_error_msg());
880         return;
881     }
882     bs->bl.opt_transfer = value;
883     if (bs->bl.opt_transfer > INT_MAX ||
884         (bs->bl.opt_transfer % bs->bl.request_alignment)) {
885         error_setg(errp, "invalid \"optimal-io-size\" value %" PRIu32 ", must "
886                    "be a multiple of %" PRIu32, bs->bl.opt_transfer,
887                    bs->bl.request_alignment);
888         return;
889     }
890 
891     ret = blkio_get_int(s->blkio, "max-transfer", &value);
892     if (ret < 0) {
893         error_setg_errno(errp, -ret, "failed to get \"max-transfer\": %s",
894                          blkio_get_error_msg());
895         return;
896     }
897     bs->bl.max_transfer = value;
898     if ((bs->bl.max_transfer % bs->bl.request_alignment) ||
899         (bs->bl.opt_transfer && (bs->bl.max_transfer % bs->bl.opt_transfer))) {
900         error_setg(errp, "invalid \"max-transfer\" value %" PRIu32 ", must be "
901                    "a multiple of %" PRIu32 " and %" PRIu32 " (if non-zero)",
902                    bs->bl.max_transfer, bs->bl.request_alignment,
903                    bs->bl.opt_transfer);
904         return;
905     }
906 
907     ret = blkio_get_int(s->blkio, "buf-alignment", &value);
908     if (ret < 0) {
909         error_setg_errno(errp, -ret, "failed to get \"buf-alignment\": %s",
910                          blkio_get_error_msg());
911         return;
912     }
913     if (value < 1) {
914         error_setg(errp, "invalid \"buf-alignment\" value %d, must be "
915                    "positive", value);
916         return;
917     }
918     bs->bl.min_mem_alignment = value;
919 
920     ret = blkio_get_int(s->blkio, "optimal-buf-alignment", &value);
921     if (ret < 0) {
922         error_setg_errno(errp, -ret,
923                          "failed to get \"optimal-buf-alignment\": %s",
924                          blkio_get_error_msg());
925         return;
926     }
927     if (value < 1) {
928         error_setg(errp, "invalid \"optimal-buf-alignment\" value %d, "
929                    "must be positive", value);
930         return;
931     }
932     bs->bl.opt_mem_alignment = value;
933 
934     ret = blkio_get_int(s->blkio, "max-segments", &value);
935     if (ret < 0) {
936         error_setg_errno(errp, -ret, "failed to get \"max-segments\": %s",
937                          blkio_get_error_msg());
938         return;
939     }
940     if (value < 1) {
941         error_setg(errp, "invalid \"max-segments\" value %d, must be positive",
942                    value);
943         return;
944     }
945     bs->bl.max_iov = value;
946 }
947 
948 /*
949  * TODO
950  * Missing libblkio APIs:
951  * - block_status
952  * - co_invalidate_cache
953  *
954  * Out of scope?
955  * - create
956  * - truncate
957  */
958 
959 #define BLKIO_DRIVER(name, ...) \
960     { \
961         .format_name             = name, \
962         .protocol_name           = name, \
963         .instance_size           = sizeof(BDRVBlkioState), \
964         .bdrv_file_open          = blkio_file_open, \
965         .bdrv_close              = blkio_close, \
966         .bdrv_getlength          = blkio_getlength, \
967         .bdrv_get_info           = blkio_get_info, \
968         .bdrv_attach_aio_context = blkio_attach_aio_context, \
969         .bdrv_detach_aio_context = blkio_detach_aio_context, \
970         .bdrv_co_pdiscard        = blkio_co_pdiscard, \
971         .bdrv_co_preadv          = blkio_co_preadv, \
972         .bdrv_co_pwritev         = blkio_co_pwritev, \
973         .bdrv_co_flush_to_disk   = blkio_co_flush, \
974         .bdrv_co_pwrite_zeroes   = blkio_co_pwrite_zeroes, \
975         .bdrv_io_unplug          = blkio_io_unplug, \
976         .bdrv_refresh_limits     = blkio_refresh_limits, \
977         .bdrv_register_buf       = blkio_register_buf, \
978         .bdrv_unregister_buf     = blkio_unregister_buf, \
979         __VA_ARGS__ \
980     }
981 
982 static BlockDriver bdrv_io_uring = BLKIO_DRIVER(
983     DRIVER_IO_URING,
984     .bdrv_needs_filename = true,
985 );
986 
987 static BlockDriver bdrv_nvme_io_uring = BLKIO_DRIVER(
988     DRIVER_NVME_IO_URING,
989     .bdrv_needs_filename = true,
990 );
991 
992 static BlockDriver bdrv_virtio_blk_vhost_user = BLKIO_DRIVER(
993     DRIVER_VIRTIO_BLK_VHOST_USER
994 );
995 
996 static BlockDriver bdrv_virtio_blk_vhost_vdpa = BLKIO_DRIVER(
997     DRIVER_VIRTIO_BLK_VHOST_VDPA
998 );
999 
1000 static void bdrv_blkio_init(void)
1001 {
1002     bdrv_register(&bdrv_io_uring);
1003     bdrv_register(&bdrv_nvme_io_uring);
1004     bdrv_register(&bdrv_virtio_blk_vhost_user);
1005     bdrv_register(&bdrv_virtio_blk_vhost_vdpa);
1006 }
1007 
1008 block_init(bdrv_blkio_init);
1009