xref: /openbmc/qemu/block/linux-aio.c (revision 4a09d0bb)
1 /*
2  * Linux native AIO support.
3  *
4  * Copyright (C) 2009 IBM, Corp.
5  * Copyright (C) 2009 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  */
10 #include "qemu/osdep.h"
11 #include "qemu-common.h"
12 #include "block/aio.h"
13 #include "qemu/queue.h"
14 #include "block/block.h"
15 #include "block/raw-aio.h"
16 #include "qemu/event_notifier.h"
17 #include "qemu/coroutine.h"
18 
19 #include <libaio.h>
20 
21 /*
22  * Queue size (per-device).
23  *
24  * XXX: eventually we need to communicate this to the guest and/or make it
25  *      tunable by the guest.  If we get more outstanding requests at a time
26  *      than this we will get EAGAIN from io_submit which is communicated to
27  *      the guest as an I/O error.
28  */
29 #define MAX_EVENTS 128
30 
31 struct qemu_laiocb {
32     BlockAIOCB common;
33     Coroutine *co;
34     LinuxAioState *ctx;
35     struct iocb iocb;
36     ssize_t ret;
37     size_t nbytes;
38     QEMUIOVector *qiov;
39     bool is_read;
40     QSIMPLEQ_ENTRY(qemu_laiocb) next;
41 };
42 
43 typedef struct {
44     int plugged;
45     unsigned int in_queue;
46     unsigned int in_flight;
47     bool blocked;
48     QSIMPLEQ_HEAD(, qemu_laiocb) pending;
49 } LaioQueue;
50 
51 struct LinuxAioState {
52     AioContext *aio_context;
53 
54     io_context_t ctx;
55     EventNotifier e;
56 
57     /* io queue for submit at batch */
58     LaioQueue io_q;
59 
60     /* I/O completion processing */
61     QEMUBH *completion_bh;
62     int event_idx;
63     int event_max;
64 };
65 
66 static void ioq_submit(LinuxAioState *s);
67 
68 static inline ssize_t io_event_ret(struct io_event *ev)
69 {
70     return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
71 }
72 
73 /*
74  * Completes an AIO request (calls the callback and frees the ACB).
75  */
76 static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
77 {
78     int ret;
79 
80     ret = laiocb->ret;
81     if (ret != -ECANCELED) {
82         if (ret == laiocb->nbytes) {
83             ret = 0;
84         } else if (ret >= 0) {
85             /* Short reads mean EOF, pad with zeros. */
86             if (laiocb->is_read) {
87                 qemu_iovec_memset(laiocb->qiov, ret, 0,
88                     laiocb->qiov->size - ret);
89             } else {
90                 ret = -ENOSPC;
91             }
92         }
93     }
94 
95     laiocb->ret = ret;
96     if (laiocb->co) {
97         /* If the coroutine is already entered it must be in ioq_submit() and
98          * will notice laio->ret has been filled in when it eventually runs
99          * later.  Coroutines cannot be entered recursively so avoid doing
100          * that!
101          */
102         if (!qemu_coroutine_entered(laiocb->co)) {
103             qemu_coroutine_enter(laiocb->co);
104         }
105     } else {
106         laiocb->common.cb(laiocb->common.opaque, ret);
107         qemu_aio_unref(laiocb);
108     }
109 }
110 
111 /**
112  * aio_ring buffer which is shared between userspace and kernel.
113  *
114  * This copied from linux/fs/aio.c, common header does not exist
115  * but AIO exists for ages so we assume ABI is stable.
116  */
117 struct aio_ring {
118     unsigned    id;    /* kernel internal index number */
119     unsigned    nr;    /* number of io_events */
120     unsigned    head;  /* Written to by userland or by kernel. */
121     unsigned    tail;
122 
123     unsigned    magic;
124     unsigned    compat_features;
125     unsigned    incompat_features;
126     unsigned    header_length;  /* size of aio_ring */
127 
128     struct io_event io_events[0];
129 };
130 
131 /**
132  * io_getevents_peek:
133  * @ctx: AIO context
134  * @events: pointer on events array, output value
135 
136  * Returns the number of completed events and sets a pointer
137  * on events array.  This function does not update the internal
138  * ring buffer, only reads head and tail.  When @events has been
139  * processed io_getevents_commit() must be called.
140  */
141 static inline unsigned int io_getevents_peek(io_context_t ctx,
142                                              struct io_event **events)
143 {
144     struct aio_ring *ring = (struct aio_ring *)ctx;
145     unsigned int head = ring->head, tail = ring->tail;
146     unsigned int nr;
147 
148     nr = tail >= head ? tail - head : ring->nr - head;
149     *events = ring->io_events + head;
150     /* To avoid speculative loads of s->events[i] before observing tail.
151        Paired with smp_wmb() inside linux/fs/aio.c: aio_complete(). */
152     smp_rmb();
153 
154     return nr;
155 }
156 
157 /**
158  * io_getevents_commit:
159  * @ctx: AIO context
160  * @nr: the number of events on which head should be advanced
161  *
162  * Advances head of a ring buffer.
163  */
164 static inline void io_getevents_commit(io_context_t ctx, unsigned int nr)
165 {
166     struct aio_ring *ring = (struct aio_ring *)ctx;
167 
168     if (nr) {
169         ring->head = (ring->head + nr) % ring->nr;
170     }
171 }
172 
173 /**
174  * io_getevents_advance_and_peek:
175  * @ctx: AIO context
176  * @events: pointer on events array, output value
177  * @nr: the number of events on which head should be advanced
178  *
179  * Advances head of a ring buffer and returns number of elements left.
180  */
181 static inline unsigned int
182 io_getevents_advance_and_peek(io_context_t ctx,
183                               struct io_event **events,
184                               unsigned int nr)
185 {
186     io_getevents_commit(ctx, nr);
187     return io_getevents_peek(ctx, events);
188 }
189 
190 /**
191  * qemu_laio_process_completions:
192  * @s: AIO state
193  *
194  * Fetches completed I/O requests and invokes their callbacks.
195  *
196  * The function is somewhat tricky because it supports nested event loops, for
197  * example when a request callback invokes aio_poll().  In order to do this,
198  * indices are kept in LinuxAioState.  Function schedules BH completion so it
199  * can be called again in a nested event loop.  When there are no events left
200  * to complete the BH is being canceled.
201  */
202 static void qemu_laio_process_completions(LinuxAioState *s)
203 {
204     struct io_event *events;
205 
206     /* Reschedule so nested event loops see currently pending completions */
207     qemu_bh_schedule(s->completion_bh);
208 
209     while ((s->event_max = io_getevents_advance_and_peek(s->ctx, &events,
210                                                          s->event_idx))) {
211         for (s->event_idx = 0; s->event_idx < s->event_max; ) {
212             struct iocb *iocb = events[s->event_idx].obj;
213             struct qemu_laiocb *laiocb =
214                 container_of(iocb, struct qemu_laiocb, iocb);
215 
216             laiocb->ret = io_event_ret(&events[s->event_idx]);
217 
218             /* Change counters one-by-one because we can be nested. */
219             s->io_q.in_flight--;
220             s->event_idx++;
221             qemu_laio_process_completion(laiocb);
222         }
223     }
224 
225     qemu_bh_cancel(s->completion_bh);
226 
227     /* If we are nested we have to notify the level above that we are done
228      * by setting event_max to zero, upper level will then jump out of it's
229      * own `for` loop.  If we are the last all counters droped to zero. */
230     s->event_max = 0;
231     s->event_idx = 0;
232 }
233 
234 static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
235 {
236     qemu_laio_process_completions(s);
237     if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
238         ioq_submit(s);
239     }
240 }
241 
242 static void qemu_laio_completion_bh(void *opaque)
243 {
244     LinuxAioState *s = opaque;
245 
246     qemu_laio_process_completions_and_submit(s);
247 }
248 
249 static void qemu_laio_completion_cb(EventNotifier *e)
250 {
251     LinuxAioState *s = container_of(e, LinuxAioState, e);
252 
253     if (event_notifier_test_and_clear(&s->e)) {
254         qemu_laio_process_completions_and_submit(s);
255     }
256 }
257 
258 static bool qemu_laio_poll_cb(void *opaque)
259 {
260     EventNotifier *e = opaque;
261     LinuxAioState *s = container_of(e, LinuxAioState, e);
262     struct io_event *events;
263 
264     if (!io_getevents_peek(s->ctx, &events)) {
265         return false;
266     }
267 
268     qemu_laio_process_completions_and_submit(s);
269     return true;
270 }
271 
272 static void laio_cancel(BlockAIOCB *blockacb)
273 {
274     struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb;
275     struct io_event event;
276     int ret;
277 
278     if (laiocb->ret != -EINPROGRESS) {
279         return;
280     }
281     ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event);
282     laiocb->ret = -ECANCELED;
283     if (ret != 0) {
284         /* iocb is not cancelled, cb will be called by the event loop later */
285         return;
286     }
287 
288     laiocb->common.cb(laiocb->common.opaque, laiocb->ret);
289 }
290 
291 static const AIOCBInfo laio_aiocb_info = {
292     .aiocb_size         = sizeof(struct qemu_laiocb),
293     .cancel_async       = laio_cancel,
294 };
295 
296 static void ioq_init(LaioQueue *io_q)
297 {
298     QSIMPLEQ_INIT(&io_q->pending);
299     io_q->plugged = 0;
300     io_q->in_queue = 0;
301     io_q->in_flight = 0;
302     io_q->blocked = false;
303 }
304 
305 static void ioq_submit(LinuxAioState *s)
306 {
307     int ret, len;
308     struct qemu_laiocb *aiocb;
309     struct iocb *iocbs[MAX_EVENTS];
310     QSIMPLEQ_HEAD(, qemu_laiocb) completed;
311 
312     do {
313         if (s->io_q.in_flight >= MAX_EVENTS) {
314             break;
315         }
316         len = 0;
317         QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) {
318             iocbs[len++] = &aiocb->iocb;
319             if (s->io_q.in_flight + len >= MAX_EVENTS) {
320                 break;
321             }
322         }
323 
324         ret = io_submit(s->ctx, len, iocbs);
325         if (ret == -EAGAIN) {
326             break;
327         }
328         if (ret < 0) {
329             /* Fail the first request, retry the rest */
330             aiocb = QSIMPLEQ_FIRST(&s->io_q.pending);
331             QSIMPLEQ_REMOVE_HEAD(&s->io_q.pending, next);
332             s->io_q.in_queue--;
333             aiocb->ret = ret;
334             qemu_laio_process_completion(aiocb);
335             continue;
336         }
337 
338         s->io_q.in_flight += ret;
339         s->io_q.in_queue  -= ret;
340         aiocb = container_of(iocbs[ret - 1], struct qemu_laiocb, iocb);
341         QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed);
342     } while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending));
343     s->io_q.blocked = (s->io_q.in_queue > 0);
344 
345     if (s->io_q.in_flight) {
346         /* We can try to complete something just right away if there are
347          * still requests in-flight. */
348         qemu_laio_process_completions(s);
349         /*
350          * Even we have completed everything (in_flight == 0), the queue can
351          * have still pended requests (in_queue > 0).  We do not attempt to
352          * repeat submission to avoid IO hang.  The reason is simple: s->e is
353          * still set and completion callback will be called shortly and all
354          * pended requests will be submitted from there.
355          */
356     }
357 }
358 
359 void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
360 {
361     s->io_q.plugged++;
362 }
363 
364 void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s)
365 {
366     assert(s->io_q.plugged);
367     if (--s->io_q.plugged == 0 &&
368         !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
369         ioq_submit(s);
370     }
371 }
372 
373 static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
374                           int type)
375 {
376     LinuxAioState *s = laiocb->ctx;
377     struct iocb *iocbs = &laiocb->iocb;
378     QEMUIOVector *qiov = laiocb->qiov;
379 
380     switch (type) {
381     case QEMU_AIO_WRITE:
382         io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
383 	break;
384     case QEMU_AIO_READ:
385         io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset);
386 	break;
387     /* Currently Linux kernel does not support other operations */
388     default:
389         fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
390                         __func__, type);
391         return -EIO;
392     }
393     io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
394 
395     QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next);
396     s->io_q.in_queue++;
397     if (!s->io_q.blocked &&
398         (!s->io_q.plugged ||
399          s->io_q.in_flight + s->io_q.in_queue >= MAX_EVENTS)) {
400         ioq_submit(s);
401     }
402 
403     return 0;
404 }
405 
406 int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
407                                 uint64_t offset, QEMUIOVector *qiov, int type)
408 {
409     int ret;
410     struct qemu_laiocb laiocb = {
411         .co         = qemu_coroutine_self(),
412         .nbytes     = qiov->size,
413         .ctx        = s,
414         .ret        = -EINPROGRESS,
415         .is_read    = (type == QEMU_AIO_READ),
416         .qiov       = qiov,
417     };
418 
419     ret = laio_do_submit(fd, &laiocb, offset, type);
420     if (ret < 0) {
421         return ret;
422     }
423 
424     if (laiocb.ret == -EINPROGRESS) {
425         qemu_coroutine_yield();
426     }
427     return laiocb.ret;
428 }
429 
430 BlockAIOCB *laio_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
431         int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
432         BlockCompletionFunc *cb, void *opaque, int type)
433 {
434     struct qemu_laiocb *laiocb;
435     off_t offset = sector_num * BDRV_SECTOR_SIZE;
436     int ret;
437 
438     laiocb = qemu_aio_get(&laio_aiocb_info, bs, cb, opaque);
439     laiocb->nbytes = nb_sectors * BDRV_SECTOR_SIZE;
440     laiocb->ctx = s;
441     laiocb->ret = -EINPROGRESS;
442     laiocb->is_read = (type == QEMU_AIO_READ);
443     laiocb->qiov = qiov;
444 
445     ret = laio_do_submit(fd, laiocb, offset, type);
446     if (ret < 0) {
447         qemu_aio_unref(laiocb);
448         return NULL;
449     }
450 
451     return &laiocb->common;
452 }
453 
454 void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
455 {
456     aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
457     qemu_bh_delete(s->completion_bh);
458 }
459 
460 void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
461 {
462     s->aio_context = new_context;
463     s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
464     aio_set_event_notifier(new_context, &s->e, false,
465                            qemu_laio_completion_cb,
466                            qemu_laio_poll_cb);
467 }
468 
469 LinuxAioState *laio_init(void)
470 {
471     LinuxAioState *s;
472 
473     s = g_malloc0(sizeof(*s));
474     if (event_notifier_init(&s->e, false) < 0) {
475         goto out_free_state;
476     }
477 
478     if (io_setup(MAX_EVENTS, &s->ctx) != 0) {
479         goto out_close_efd;
480     }
481 
482     ioq_init(&s->io_q);
483 
484     return s;
485 
486 out_close_efd:
487     event_notifier_cleanup(&s->e);
488 out_free_state:
489     g_free(s);
490     return NULL;
491 }
492 
493 void laio_cleanup(LinuxAioState *s)
494 {
495     event_notifier_cleanup(&s->e);
496 
497     if (io_destroy(s->ctx) != 0) {
498         fprintf(stderr, "%s: destroy AIO context %p failed\n",
499                         __func__, &s->ctx);
500     }
501     g_free(s);
502 }
503