xref: /openbmc/qemu/block/io_uring.c (revision bb7e03cb)
1 /*
2  * Linux io_uring support.
3  *
4  * Copyright (C) 2009 IBM, Corp.
5  * Copyright (C) 2009 Red Hat, Inc.
6  * Copyright (C) 2019 Aarushi Mehta
7  *
8  * This work is licensed under the terms of the GNU GPL, version 2 or later.
9  * See the COPYING file in the top-level directory.
10  */
11 #include "qemu/osdep.h"
12 #include <liburing.h>
13 #include "block/aio.h"
14 #include "qemu/queue.h"
15 #include "block/block.h"
16 #include "block/raw-aio.h"
17 #include "qemu/coroutine.h"
18 #include "qapi/error.h"
19 #include "trace.h"
20 
21 
22 /* io_uring ring size */
23 #define MAX_ENTRIES 128
24 
25 typedef struct LuringAIOCB {
26     Coroutine *co;
27     struct io_uring_sqe sqeq;
28     ssize_t ret;
29     QEMUIOVector *qiov;
30     bool is_read;
31     QSIMPLEQ_ENTRY(LuringAIOCB) next;
32 
33     /*
34      * Buffered reads may require resubmission, see
35      * luring_resubmit_short_read().
36      */
37     int total_read;
38     QEMUIOVector resubmit_qiov;
39 } LuringAIOCB;
40 
41 typedef struct LuringQueue {
42     int plugged;
43     unsigned int in_queue;
44     unsigned int in_flight;
45     bool blocked;
46     QSIMPLEQ_HEAD(, LuringAIOCB) submit_queue;
47 } LuringQueue;
48 
49 typedef struct LuringState {
50     AioContext *aio_context;
51 
52     struct io_uring ring;
53 
54     /* io queue for submit at batch.  Protected by AioContext lock. */
55     LuringQueue io_q;
56 
57     /* I/O completion processing.  Only runs in I/O thread.  */
58     QEMUBH *completion_bh;
59 } LuringState;
60 
61 /**
62  * luring_resubmit:
63  *
64  * Resubmit a request by appending it to submit_queue.  The caller must ensure
65  * that ioq_submit() is called later so that submit_queue requests are started.
66  */
67 static void luring_resubmit(LuringState *s, LuringAIOCB *luringcb)
68 {
69     QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next);
70     s->io_q.in_queue++;
71 }
72 
73 /**
74  * luring_resubmit_short_read:
75  *
76  * Short reads are rare but may occur. The remaining read request needs to be
77  * resubmitted.
78  */
79 static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb,
80                                        int nread)
81 {
82     QEMUIOVector *resubmit_qiov;
83     size_t remaining;
84 
85     trace_luring_resubmit_short_read(s, luringcb, nread);
86 
87     /* Update read position */
88     luringcb->total_read += nread;
89     remaining = luringcb->qiov->size - luringcb->total_read;
90 
91     /* Shorten qiov */
92     resubmit_qiov = &luringcb->resubmit_qiov;
93     if (resubmit_qiov->iov == NULL) {
94         qemu_iovec_init(resubmit_qiov, luringcb->qiov->niov);
95     } else {
96         qemu_iovec_reset(resubmit_qiov);
97     }
98     qemu_iovec_concat(resubmit_qiov, luringcb->qiov, luringcb->total_read,
99                       remaining);
100 
101     /* Update sqe */
102     luringcb->sqeq.off += nread;
103     luringcb->sqeq.addr = (__u64)(uintptr_t)luringcb->resubmit_qiov.iov;
104     luringcb->sqeq.len = luringcb->resubmit_qiov.niov;
105 
106     luring_resubmit(s, luringcb);
107 }
108 
109 /**
110  * luring_process_completions:
111  * @s: AIO state
112  *
113  * Fetches completed I/O requests, consumes cqes and invokes their callbacks
114  * The function is somewhat tricky because it supports nested event loops, for
115  * example when a request callback invokes aio_poll().
116  *
117  * Function schedules BH completion so it  can be called again in a nested
118  * event loop.  When there are no events left  to complete the BH is being
119  * canceled.
120  *
121  */
122 static void luring_process_completions(LuringState *s)
123 {
124     struct io_uring_cqe *cqes;
125     int total_bytes;
126     /*
127      * Request completion callbacks can run the nested event loop.
128      * Schedule ourselves so the nested event loop will "see" remaining
129      * completed requests and process them.  Without this, completion
130      * callbacks that wait for other requests using a nested event loop
131      * would hang forever.
132      *
133      * This workaround is needed because io_uring uses poll_wait, which
134      * is woken up when new events are added to the uring, thus polling on
135      * the same uring fd will block unless more events are received.
136      *
137      * Other leaf block drivers (drivers that access the data themselves)
138      * are networking based, so they poll sockets for data and run the
139      * correct coroutine.
140      */
141     qemu_bh_schedule(s->completion_bh);
142 
143     while (io_uring_peek_cqe(&s->ring, &cqes) == 0) {
144         LuringAIOCB *luringcb;
145         int ret;
146 
147         if (!cqes) {
148             break;
149         }
150 
151         luringcb = io_uring_cqe_get_data(cqes);
152         ret = cqes->res;
153         io_uring_cqe_seen(&s->ring, cqes);
154         cqes = NULL;
155 
156         /* Change counters one-by-one because we can be nested. */
157         s->io_q.in_flight--;
158         trace_luring_process_completion(s, luringcb, ret);
159 
160         /* total_read is non-zero only for resubmitted read requests */
161         total_bytes = ret + luringcb->total_read;
162 
163         if (ret < 0) {
164             /*
165              * Only writev/readv/fsync requests on regular files or host block
166              * devices are submitted. Therefore -EAGAIN is not expected but it's
167              * known to happen sometimes with Linux SCSI. Submit again and hope
168              * the request completes successfully.
169              *
170              * For more information, see:
171              * https://lore.kernel.org/io-uring/20210727165811.284510-3-axboe@kernel.dk/T/#u
172              *
173              * If the code is changed to submit other types of requests in the
174              * future, then this workaround may need to be extended to deal with
175              * genuine -EAGAIN results that should not be resubmitted
176              * immediately.
177              */
178             if (ret == -EINTR || ret == -EAGAIN) {
179                 luring_resubmit(s, luringcb);
180                 continue;
181             }
182         } else if (!luringcb->qiov) {
183             goto end;
184         } else if (total_bytes == luringcb->qiov->size) {
185             ret = 0;
186         /* Only read/write */
187         } else {
188             /* Short Read/Write */
189             if (luringcb->is_read) {
190                 if (ret > 0) {
191                     luring_resubmit_short_read(s, luringcb, ret);
192                     continue;
193                 } else {
194                     /* Pad with zeroes */
195                     qemu_iovec_memset(luringcb->qiov, total_bytes, 0,
196                                       luringcb->qiov->size - total_bytes);
197                     ret = 0;
198                 }
199             } else {
200                 ret = -ENOSPC;
201             }
202         }
203 end:
204         luringcb->ret = ret;
205         qemu_iovec_destroy(&luringcb->resubmit_qiov);
206 
207         /*
208          * If the coroutine is already entered it must be in ioq_submit()
209          * and will notice luringcb->ret has been filled in when it
210          * eventually runs later. Coroutines cannot be entered recursively
211          * so avoid doing that!
212          */
213         if (!qemu_coroutine_entered(luringcb->co)) {
214             aio_co_wake(luringcb->co);
215         }
216     }
217     qemu_bh_cancel(s->completion_bh);
218 }
219 
220 static int ioq_submit(LuringState *s)
221 {
222     int ret = 0;
223     LuringAIOCB *luringcb, *luringcb_next;
224 
225     while (s->io_q.in_queue > 0) {
226         /*
227          * Try to fetch sqes from the ring for requests waiting in
228          * the overflow queue
229          */
230         QSIMPLEQ_FOREACH_SAFE(luringcb, &s->io_q.submit_queue, next,
231                               luringcb_next) {
232             struct io_uring_sqe *sqes = io_uring_get_sqe(&s->ring);
233             if (!sqes) {
234                 break;
235             }
236             /* Prep sqe for submission */
237             *sqes = luringcb->sqeq;
238             QSIMPLEQ_REMOVE_HEAD(&s->io_q.submit_queue, next);
239         }
240         ret = io_uring_submit(&s->ring);
241         trace_luring_io_uring_submit(s, ret);
242         /* Prevent infinite loop if submission is refused */
243         if (ret <= 0) {
244             if (ret == -EAGAIN || ret == -EINTR) {
245                 continue;
246             }
247             break;
248         }
249         s->io_q.in_flight += ret;
250         s->io_q.in_queue  -= ret;
251     }
252     s->io_q.blocked = (s->io_q.in_queue > 0);
253 
254     if (s->io_q.in_flight) {
255         /*
256          * We can try to complete something just right away if there are
257          * still requests in-flight.
258          */
259         luring_process_completions(s);
260     }
261     return ret;
262 }
263 
264 static void luring_process_completions_and_submit(LuringState *s)
265 {
266     aio_context_acquire(s->aio_context);
267     luring_process_completions(s);
268 
269     if (!s->io_q.plugged && s->io_q.in_queue > 0) {
270         ioq_submit(s);
271     }
272     aio_context_release(s->aio_context);
273 }
274 
275 static void qemu_luring_completion_bh(void *opaque)
276 {
277     LuringState *s = opaque;
278     luring_process_completions_and_submit(s);
279 }
280 
281 static void qemu_luring_completion_cb(void *opaque)
282 {
283     LuringState *s = opaque;
284     luring_process_completions_and_submit(s);
285 }
286 
287 static bool qemu_luring_poll_cb(void *opaque)
288 {
289     LuringState *s = opaque;
290 
291     return io_uring_cq_ready(&s->ring);
292 }
293 
294 static void qemu_luring_poll_ready(void *opaque)
295 {
296     LuringState *s = opaque;
297 
298     luring_process_completions_and_submit(s);
299 }
300 
301 static void ioq_init(LuringQueue *io_q)
302 {
303     QSIMPLEQ_INIT(&io_q->submit_queue);
304     io_q->plugged = 0;
305     io_q->in_queue = 0;
306     io_q->in_flight = 0;
307     io_q->blocked = false;
308 }
309 
310 void luring_io_plug(BlockDriverState *bs, LuringState *s)
311 {
312     trace_luring_io_plug(s);
313     s->io_q.plugged++;
314 }
315 
316 void luring_io_unplug(BlockDriverState *bs, LuringState *s)
317 {
318     assert(s->io_q.plugged);
319     trace_luring_io_unplug(s, s->io_q.blocked, s->io_q.plugged,
320                            s->io_q.in_queue, s->io_q.in_flight);
321     if (--s->io_q.plugged == 0 &&
322         !s->io_q.blocked && s->io_q.in_queue > 0) {
323         ioq_submit(s);
324     }
325 }
326 
327 /**
328  * luring_do_submit:
329  * @fd: file descriptor for I/O
330  * @luringcb: AIO control block
331  * @s: AIO state
332  * @offset: offset for request
333  * @type: type of request
334  *
335  * Fetches sqes from ring, adds to pending queue and preps them
336  *
337  */
338 static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s,
339                             uint64_t offset, int type)
340 {
341     int ret;
342     struct io_uring_sqe *sqes = &luringcb->sqeq;
343 
344     switch (type) {
345     case QEMU_AIO_WRITE:
346         io_uring_prep_writev(sqes, fd, luringcb->qiov->iov,
347                              luringcb->qiov->niov, offset);
348         break;
349     case QEMU_AIO_READ:
350         io_uring_prep_readv(sqes, fd, luringcb->qiov->iov,
351                             luringcb->qiov->niov, offset);
352         break;
353     case QEMU_AIO_FLUSH:
354         io_uring_prep_fsync(sqes, fd, IORING_FSYNC_DATASYNC);
355         break;
356     default:
357         fprintf(stderr, "%s: invalid AIO request type, aborting 0x%x.\n",
358                         __func__, type);
359         abort();
360     }
361     io_uring_sqe_set_data(sqes, luringcb);
362 
363     QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next);
364     s->io_q.in_queue++;
365     trace_luring_do_submit(s, s->io_q.blocked, s->io_q.plugged,
366                            s->io_q.in_queue, s->io_q.in_flight);
367     if (!s->io_q.blocked &&
368         (!s->io_q.plugged ||
369          s->io_q.in_flight + s->io_q.in_queue >= MAX_ENTRIES)) {
370         ret = ioq_submit(s);
371         trace_luring_do_submit_done(s, ret);
372         return ret;
373     }
374     return 0;
375 }
376 
377 int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd,
378                                   uint64_t offset, QEMUIOVector *qiov, int type)
379 {
380     int ret;
381     LuringAIOCB luringcb = {
382         .co         = qemu_coroutine_self(),
383         .ret        = -EINPROGRESS,
384         .qiov       = qiov,
385         .is_read    = (type == QEMU_AIO_READ),
386     };
387     trace_luring_co_submit(bs, s, &luringcb, fd, offset, qiov ? qiov->size : 0,
388                            type);
389     ret = luring_do_submit(fd, &luringcb, s, offset, type);
390 
391     if (ret < 0) {
392         return ret;
393     }
394 
395     if (luringcb.ret == -EINPROGRESS) {
396         qemu_coroutine_yield();
397     }
398     return luringcb.ret;
399 }
400 
401 void luring_detach_aio_context(LuringState *s, AioContext *old_context)
402 {
403     aio_set_fd_handler(old_context, s->ring.ring_fd, false,
404                        NULL, NULL, NULL, NULL, s);
405     qemu_bh_delete(s->completion_bh);
406     s->aio_context = NULL;
407 }
408 
409 void luring_attach_aio_context(LuringState *s, AioContext *new_context)
410 {
411     s->aio_context = new_context;
412     s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s);
413     aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false,
414                        qemu_luring_completion_cb, NULL,
415                        qemu_luring_poll_cb, qemu_luring_poll_ready, s);
416 }
417 
418 LuringState *luring_init(Error **errp)
419 {
420     int rc;
421     LuringState *s = g_new0(LuringState, 1);
422     struct io_uring *ring = &s->ring;
423 
424     trace_luring_init_state(s, sizeof(*s));
425 
426     rc = io_uring_queue_init(MAX_ENTRIES, ring, 0);
427     if (rc < 0) {
428         error_setg_errno(errp, errno, "failed to init linux io_uring ring");
429         g_free(s);
430         return NULL;
431     }
432 
433     ioq_init(&s->io_q);
434 #ifdef CONFIG_LIBURING_REGISTER_RING_FD
435     if (io_uring_register_ring_fd(&s->ring) < 0) {
436         /*
437          * Only warn about this error: we will fallback to the non-optimized
438          * io_uring operations.
439          */
440         warn_report("failed to register linux io_uring ring file descriptor");
441     }
442 #endif
443 
444     return s;
445 }
446 
447 void luring_cleanup(LuringState *s)
448 {
449     io_uring_queue_exit(&s->ring);
450     trace_luring_cleanup_state(s);
451     g_free(s);
452 }
453