xref: /openbmc/qemu/block/linux-aio.c (revision 433fcea40c31ff355f84da22a46977c2a1b596c3)
1 /*
2  * Linux native AIO support.
3  *
4  * Copyright (C) 2009 IBM, Corp.
5  * Copyright (C) 2009 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  */
10 #include "qemu/osdep.h"
11 #include "block/aio.h"
12 #include "qemu/queue.h"
13 #include "block/block.h"
14 #include "block/raw-aio.h"
15 #include "qemu/event_notifier.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/defer-call.h"
18 #include "qapi/error.h"
19 #include "sysemu/block-backend.h"
20 
21 /* Only used for assertions.  */
22 #include "qemu/coroutine_int.h"
23 
24 #include <libaio.h>
25 
26 /*
27  * Queue size (per-device).
28  *
29  * XXX: eventually we need to communicate this to the guest and/or make it
30  *      tunable by the guest.  If we get more outstanding requests at a time
31  *      than this we will get EAGAIN from io_submit which is communicated to
32  *      the guest as an I/O error.
33  */
34 #define MAX_EVENTS 1024
35 
36 /* Maximum number of requests in a batch. (default value) */
37 #define DEFAULT_MAX_BATCH 32
38 
39 struct qemu_laiocb {
40     Coroutine *co;
41     LinuxAioState *ctx;
42     struct iocb iocb;
43     ssize_t ret;
44     size_t nbytes;
45     QEMUIOVector *qiov;
46     bool is_read;
47     QSIMPLEQ_ENTRY(qemu_laiocb) next;
48 };
49 
50 typedef struct {
51     unsigned int in_queue;
52     unsigned int in_flight;
53     bool blocked;
54     QSIMPLEQ_HEAD(, qemu_laiocb) pending;
55 } LaioQueue;
56 
57 struct LinuxAioState {
58     AioContext *aio_context;
59 
60     io_context_t ctx;
61     EventNotifier e;
62 
63     /* No locking required, only accessed from AioContext home thread */
64     LaioQueue io_q;
65     QEMUBH *completion_bh;
66     int event_idx;
67     int event_max;
68 };
69 
70 static void ioq_submit(LinuxAioState *s);
71 
72 static inline ssize_t io_event_ret(struct io_event *ev)
73 {
74     return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
75 }
76 
77 /*
78  * Completes an AIO request.
79  */
80 static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
81 {
82     int ret;
83 
84     ret = laiocb->ret;
85     if (ret != -ECANCELED) {
86         if (ret == laiocb->nbytes) {
87             ret = 0;
88         } else if (ret >= 0) {
89             /* Short reads mean EOF, pad with zeros. */
90             if (laiocb->is_read) {
91                 qemu_iovec_memset(laiocb->qiov, ret, 0,
92                     laiocb->qiov->size - ret);
93             } else {
94                 ret = -ENOSPC;
95             }
96         }
97     }
98 
99     laiocb->ret = ret;
100 
101     /*
102      * If the coroutine is already entered it must be in ioq_submit() and
103      * will notice laio->ret has been filled in when it eventually runs
104      * later.  Coroutines cannot be entered recursively so avoid doing
105      * that!
106      */
107     assert(laiocb->co->ctx == laiocb->ctx->aio_context);
108     if (!qemu_coroutine_entered(laiocb->co)) {
109         aio_co_wake(laiocb->co);
110     }
111 }
112 
113 /**
114  * aio_ring buffer which is shared between userspace and kernel.
115  *
116  * This copied from linux/fs/aio.c, common header does not exist
117  * but AIO exists for ages so we assume ABI is stable.
118  */
119 struct aio_ring {
120     unsigned    id;    /* kernel internal index number */
121     unsigned    nr;    /* number of io_events */
122     unsigned    head;  /* Written to by userland or by kernel. */
123     unsigned    tail;
124 
125     unsigned    magic;
126     unsigned    compat_features;
127     unsigned    incompat_features;
128     unsigned    header_length;  /* size of aio_ring */
129 
130     struct io_event io_events[];
131 };
132 
133 /**
134  * io_getevents_peek:
135  * @ctx: AIO context
136  * @events: pointer on events array, output value
137 
138  * Returns the number of completed events and sets a pointer
139  * on events array.  This function does not update the internal
140  * ring buffer, only reads head and tail.  When @events has been
141  * processed io_getevents_commit() must be called.
142  */
143 static inline unsigned int io_getevents_peek(io_context_t ctx,
144                                              struct io_event **events)
145 {
146     struct aio_ring *ring = (struct aio_ring *)ctx;
147     unsigned int head = ring->head, tail = ring->tail;
148     unsigned int nr;
149 
150     nr = tail >= head ? tail - head : ring->nr - head;
151     *events = ring->io_events + head;
152     /* To avoid speculative loads of s->events[i] before observing tail.
153        Paired with smp_wmb() inside linux/fs/aio.c: aio_complete(). */
154     smp_rmb();
155 
156     return nr;
157 }
158 
159 /**
160  * io_getevents_commit:
161  * @ctx: AIO context
162  * @nr: the number of events on which head should be advanced
163  *
164  * Advances head of a ring buffer.
165  */
166 static inline void io_getevents_commit(io_context_t ctx, unsigned int nr)
167 {
168     struct aio_ring *ring = (struct aio_ring *)ctx;
169 
170     if (nr) {
171         ring->head = (ring->head + nr) % ring->nr;
172     }
173 }
174 
175 /**
176  * io_getevents_advance_and_peek:
177  * @ctx: AIO context
178  * @events: pointer on events array, output value
179  * @nr: the number of events on which head should be advanced
180  *
181  * Advances head of a ring buffer and returns number of elements left.
182  */
183 static inline unsigned int
184 io_getevents_advance_and_peek(io_context_t ctx,
185                               struct io_event **events,
186                               unsigned int nr)
187 {
188     io_getevents_commit(ctx, nr);
189     return io_getevents_peek(ctx, events);
190 }
191 
192 /**
193  * qemu_laio_process_completions:
194  * @s: AIO state
195  *
196  * Fetches completed I/O requests and invokes their callbacks.
197  *
198  * The function is somewhat tricky because it supports nested event loops, for
199  * example when a request callback invokes aio_poll().  In order to do this,
200  * indices are kept in LinuxAioState.  Function schedules BH completion so it
201  * can be called again in a nested event loop.  When there are no events left
202  * to complete the BH is being canceled.
203  */
204 static void qemu_laio_process_completions(LinuxAioState *s)
205 {
206     struct io_event *events;
207 
208     /* Reschedule so nested event loops see currently pending completions */
209     qemu_bh_schedule(s->completion_bh);
210 
211     while ((s->event_max = io_getevents_advance_and_peek(s->ctx, &events,
212                                                          s->event_idx))) {
213         for (s->event_idx = 0; s->event_idx < s->event_max; ) {
214             struct iocb *iocb = events[s->event_idx].obj;
215             struct qemu_laiocb *laiocb =
216                 container_of(iocb, struct qemu_laiocb, iocb);
217 
218             laiocb->ret = io_event_ret(&events[s->event_idx]);
219 
220             /* Change counters one-by-one because we can be nested. */
221             s->io_q.in_flight--;
222             s->event_idx++;
223             qemu_laio_process_completion(laiocb);
224         }
225     }
226 
227     qemu_bh_cancel(s->completion_bh);
228 
229     /* If we are nested we have to notify the level above that we are done
230      * by setting event_max to zero, upper level will then jump out of it's
231      * own `for` loop.  If we are the last all counters dropped to zero. */
232     s->event_max = 0;
233     s->event_idx = 0;
234 }
235 
236 static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
237 {
238     qemu_laio_process_completions(s);
239 
240     if (!QSIMPLEQ_EMPTY(&s->io_q.pending)) {
241         ioq_submit(s);
242     }
243 }
244 
245 static void qemu_laio_completion_bh(void *opaque)
246 {
247     LinuxAioState *s = opaque;
248 
249     qemu_laio_process_completions_and_submit(s);
250 }
251 
252 static void qemu_laio_completion_cb(EventNotifier *e)
253 {
254     LinuxAioState *s = container_of(e, LinuxAioState, e);
255 
256     if (event_notifier_test_and_clear(&s->e)) {
257         qemu_laio_process_completions_and_submit(s);
258     }
259 }
260 
261 static bool qemu_laio_poll_cb(void *opaque)
262 {
263     EventNotifier *e = opaque;
264     LinuxAioState *s = container_of(e, LinuxAioState, e);
265     struct io_event *events;
266 
267     return io_getevents_peek(s->ctx, &events);
268 }
269 
270 static void qemu_laio_poll_ready(EventNotifier *opaque)
271 {
272     EventNotifier *e = opaque;
273     LinuxAioState *s = container_of(e, LinuxAioState, e);
274 
275     qemu_laio_process_completions_and_submit(s);
276 }
277 
278 static void ioq_init(LaioQueue *io_q)
279 {
280     QSIMPLEQ_INIT(&io_q->pending);
281     io_q->in_queue = 0;
282     io_q->in_flight = 0;
283     io_q->blocked = false;
284 }
285 
286 static void ioq_submit(LinuxAioState *s)
287 {
288     int ret, len;
289     struct qemu_laiocb *aiocb;
290     struct iocb *iocbs[MAX_EVENTS];
291     QSIMPLEQ_HEAD(, qemu_laiocb) completed;
292 
293     do {
294         if (s->io_q.in_flight >= MAX_EVENTS) {
295             break;
296         }
297         len = 0;
298         QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) {
299             iocbs[len++] = &aiocb->iocb;
300             if (s->io_q.in_flight + len >= MAX_EVENTS) {
301                 break;
302             }
303         }
304 
305         ret = io_submit(s->ctx, len, iocbs);
306         if (ret == -EAGAIN) {
307             break;
308         }
309         if (ret < 0) {
310             /* Fail the first request, retry the rest */
311             aiocb = QSIMPLEQ_FIRST(&s->io_q.pending);
312             QSIMPLEQ_REMOVE_HEAD(&s->io_q.pending, next);
313             s->io_q.in_queue--;
314             aiocb->ret = ret;
315             qemu_laio_process_completion(aiocb);
316             continue;
317         }
318 
319         s->io_q.in_flight += ret;
320         s->io_q.in_queue  -= ret;
321         aiocb = container_of(iocbs[ret - 1], struct qemu_laiocb, iocb);
322         QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed);
323     } while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending));
324     s->io_q.blocked = (s->io_q.in_queue > 0);
325 
326     if (s->io_q.in_flight) {
327         /* We can try to complete something just right away if there are
328          * still requests in-flight. */
329         qemu_laio_process_completions(s);
330         /*
331          * Even we have completed everything (in_flight == 0), the queue can
332          * have still pended requests (in_queue > 0).  We do not attempt to
333          * repeat submission to avoid IO hang.  The reason is simple: s->e is
334          * still set and completion callback will be called shortly and all
335          * pended requests will be submitted from there.
336          */
337     }
338 }
339 
340 static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch)
341 {
342     uint64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH;
343 
344     /*
345      * AIO context can be shared between multiple block devices, so
346      * `dev_max_batch` allows reducing the batch size for latency-sensitive
347      * devices.
348      */
349     max_batch = MIN_NON_ZERO(dev_max_batch, max_batch);
350 
351     /* limit the batch with the number of available events */
352     max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch);
353 
354     return max_batch;
355 }
356 
357 static void laio_deferred_fn(void *opaque)
358 {
359     LinuxAioState *s = opaque;
360 
361     if (!s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
362         ioq_submit(s);
363     }
364 }
365 
366 static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
367                           int type, uint64_t dev_max_batch)
368 {
369     LinuxAioState *s = laiocb->ctx;
370     struct iocb *iocbs = &laiocb->iocb;
371     QEMUIOVector *qiov = laiocb->qiov;
372 
373     switch (type) {
374     case QEMU_AIO_WRITE:
375         io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
376         break;
377     case QEMU_AIO_ZONE_APPEND:
378         io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
379         break;
380     case QEMU_AIO_READ:
381         io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset);
382         break;
383     /* Currently Linux kernel does not support other operations */
384     default:
385         fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
386                         __func__, type);
387         return -EIO;
388     }
389     io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
390 
391     QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next);
392     s->io_q.in_queue++;
393     if (!s->io_q.blocked) {
394         if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch)) {
395             ioq_submit(s);
396         } else {
397             defer_call(laio_deferred_fn, s);
398         }
399     }
400 
401     return 0;
402 }
403 
404 int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
405                                 int type, uint64_t dev_max_batch)
406 {
407     int ret;
408     AioContext *ctx = qemu_get_current_aio_context();
409     struct qemu_laiocb laiocb = {
410         .co         = qemu_coroutine_self(),
411         .nbytes     = qiov->size,
412         .ctx        = aio_get_linux_aio(ctx),
413         .ret        = -EINPROGRESS,
414         .is_read    = (type == QEMU_AIO_READ),
415         .qiov       = qiov,
416     };
417 
418     ret = laio_do_submit(fd, &laiocb, offset, type, dev_max_batch);
419     if (ret < 0) {
420         return ret;
421     }
422 
423     if (laiocb.ret == -EINPROGRESS) {
424         qemu_coroutine_yield();
425     }
426     return laiocb.ret;
427 }
428 
429 void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
430 {
431     aio_set_event_notifier(old_context, &s->e, NULL, NULL, NULL);
432     qemu_bh_delete(s->completion_bh);
433     s->aio_context = NULL;
434 }
435 
436 void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
437 {
438     s->aio_context = new_context;
439     s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
440     aio_set_event_notifier(new_context, &s->e,
441                            qemu_laio_completion_cb,
442                            qemu_laio_poll_cb,
443                            qemu_laio_poll_ready);
444 }
445 
446 LinuxAioState *laio_init(Error **errp)
447 {
448     int rc;
449     LinuxAioState *s;
450 
451     s = g_malloc0(sizeof(*s));
452     rc = event_notifier_init(&s->e, false);
453     if (rc < 0) {
454         error_setg_errno(errp, -rc, "failed to initialize event notifier");
455         goto out_free_state;
456     }
457 
458     rc = io_setup(MAX_EVENTS, &s->ctx);
459     if (rc < 0) {
460         error_setg_errno(errp, -rc, "failed to create linux AIO context");
461         goto out_close_efd;
462     }
463 
464     ioq_init(&s->io_q);
465 
466     return s;
467 
468 out_close_efd:
469     event_notifier_cleanup(&s->e);
470 out_free_state:
471     g_free(s);
472     return NULL;
473 }
474 
475 void laio_cleanup(LinuxAioState *s)
476 {
477     event_notifier_cleanup(&s->e);
478 
479     if (io_destroy(s->ctx) != 0) {
480         fprintf(stderr, "%s: destroy AIO context %p failed\n",
481                         __func__, &s->ctx);
482     }
483     g_free(s);
484 }
485