1 /*
2 * Linux native AIO support.
3 *
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 */
10 #include "qemu/osdep.h"
11 #include "block/aio.h"
12 #include "qemu/queue.h"
13 #include "block/block.h"
14 #include "block/raw-aio.h"
15 #include "qemu/event_notifier.h"
16 #include "qemu/coroutine.h"
17 #include "qemu/defer-call.h"
18 #include "qapi/error.h"
19 #include "sysemu/block-backend.h"
20
21 /* Only used for assertions. */
22 #include "qemu/coroutine_int.h"
23
24 #include <libaio.h>
25
26 /*
27 * Queue size (per-device).
28 *
29 * XXX: eventually we need to communicate this to the guest and/or make it
30 * tunable by the guest. If we get more outstanding requests at a time
31 * than this we will get EAGAIN from io_submit which is communicated to
32 * the guest as an I/O error.
33 */
34 #define MAX_EVENTS 1024
35
36 /* Maximum number of requests in a batch. (default value) */
37 #define DEFAULT_MAX_BATCH 32
38
39 struct qemu_laiocb {
40 Coroutine *co;
41 LinuxAioState *ctx;
42 struct iocb iocb;
43 ssize_t ret;
44 size_t nbytes;
45 QEMUIOVector *qiov;
46 bool is_read;
47 QSIMPLEQ_ENTRY(qemu_laiocb) next;
48 };
49
50 typedef struct {
51 unsigned int in_queue;
52 unsigned int in_flight;
53 bool blocked;
54 QSIMPLEQ_HEAD(, qemu_laiocb) pending;
55 } LaioQueue;
56
57 struct LinuxAioState {
58 AioContext *aio_context;
59
60 io_context_t ctx;
61 EventNotifier e;
62
63 /* No locking required, only accessed from AioContext home thread */
64 LaioQueue io_q;
65 QEMUBH *completion_bh;
66 int event_idx;
67 int event_max;
68 };
69
70 static void ioq_submit(LinuxAioState *s);
71
io_event_ret(struct io_event * ev)72 static inline ssize_t io_event_ret(struct io_event *ev)
73 {
74 return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
75 }
76
77 /*
78 * Completes an AIO request.
79 */
qemu_laio_process_completion(struct qemu_laiocb * laiocb)80 static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
81 {
82 int ret;
83
84 ret = laiocb->ret;
85 if (ret != -ECANCELED) {
86 if (ret == laiocb->nbytes) {
87 ret = 0;
88 } else if (ret >= 0) {
89 /* Short reads mean EOF, pad with zeros. */
90 if (laiocb->is_read) {
91 qemu_iovec_memset(laiocb->qiov, ret, 0,
92 laiocb->qiov->size - ret);
93 } else {
94 ret = -ENOSPC;
95 }
96 }
97 }
98
99 laiocb->ret = ret;
100
101 /*
102 * If the coroutine is already entered it must be in ioq_submit() and
103 * will notice laio->ret has been filled in when it eventually runs
104 * later. Coroutines cannot be entered recursively so avoid doing
105 * that!
106 */
107 assert(laiocb->co->ctx == laiocb->ctx->aio_context);
108 if (!qemu_coroutine_entered(laiocb->co)) {
109 aio_co_wake(laiocb->co);
110 }
111 }
112
113 /**
114 * aio_ring buffer which is shared between userspace and kernel.
115 *
116 * This copied from linux/fs/aio.c, common header does not exist
117 * but AIO exists for ages so we assume ABI is stable.
118 */
119 struct aio_ring {
120 unsigned id; /* kernel internal index number */
121 unsigned nr; /* number of io_events */
122 unsigned head; /* Written to by userland or by kernel. */
123 unsigned tail;
124
125 unsigned magic;
126 unsigned compat_features;
127 unsigned incompat_features;
128 unsigned header_length; /* size of aio_ring */
129
130 struct io_event io_events[];
131 };
132
133 /**
134 * io_getevents_peek:
135 * @ctx: AIO context
136 * @events: pointer on events array, output value
137
138 * Returns the number of completed events and sets a pointer
139 * on events array. This function does not update the internal
140 * ring buffer, only reads head and tail. When @events has been
141 * processed io_getevents_commit() must be called.
142 */
io_getevents_peek(io_context_t ctx,struct io_event ** events)143 static inline unsigned int io_getevents_peek(io_context_t ctx,
144 struct io_event **events)
145 {
146 struct aio_ring *ring = (struct aio_ring *)ctx;
147 unsigned int head = ring->head, tail = ring->tail;
148 unsigned int nr;
149
150 nr = tail >= head ? tail - head : ring->nr - head;
151 *events = ring->io_events + head;
152 /* To avoid speculative loads of s->events[i] before observing tail.
153 Paired with smp_wmb() inside linux/fs/aio.c: aio_complete(). */
154 smp_rmb();
155
156 return nr;
157 }
158
159 /**
160 * io_getevents_commit:
161 * @ctx: AIO context
162 * @nr: the number of events on which head should be advanced
163 *
164 * Advances head of a ring buffer.
165 */
io_getevents_commit(io_context_t ctx,unsigned int nr)166 static inline void io_getevents_commit(io_context_t ctx, unsigned int nr)
167 {
168 struct aio_ring *ring = (struct aio_ring *)ctx;
169
170 if (nr) {
171 ring->head = (ring->head + nr) % ring->nr;
172 }
173 }
174
175 /**
176 * io_getevents_advance_and_peek:
177 * @ctx: AIO context
178 * @events: pointer on events array, output value
179 * @nr: the number of events on which head should be advanced
180 *
181 * Advances head of a ring buffer and returns number of elements left.
182 */
183 static inline unsigned int
io_getevents_advance_and_peek(io_context_t ctx,struct io_event ** events,unsigned int nr)184 io_getevents_advance_and_peek(io_context_t ctx,
185 struct io_event **events,
186 unsigned int nr)
187 {
188 io_getevents_commit(ctx, nr);
189 return io_getevents_peek(ctx, events);
190 }
191
192 /**
193 * qemu_laio_process_completions:
194 * @s: AIO state
195 *
196 * Fetches completed I/O requests and invokes their callbacks.
197 *
198 * The function is somewhat tricky because it supports nested event loops, for
199 * example when a request callback invokes aio_poll(). In order to do this,
200 * indices are kept in LinuxAioState. Function schedules BH completion so it
201 * can be called again in a nested event loop. When there are no events left
202 * to complete the BH is being canceled.
203 */
qemu_laio_process_completions(LinuxAioState * s)204 static void qemu_laio_process_completions(LinuxAioState *s)
205 {
206 struct io_event *events;
207
208 defer_call_begin();
209
210 /* Reschedule so nested event loops see currently pending completions */
211 qemu_bh_schedule(s->completion_bh);
212
213 while ((s->event_max = io_getevents_advance_and_peek(s->ctx, &events,
214 s->event_idx))) {
215 for (s->event_idx = 0; s->event_idx < s->event_max; ) {
216 struct iocb *iocb = events[s->event_idx].obj;
217 struct qemu_laiocb *laiocb =
218 container_of(iocb, struct qemu_laiocb, iocb);
219
220 laiocb->ret = io_event_ret(&events[s->event_idx]);
221
222 /* Change counters one-by-one because we can be nested. */
223 s->io_q.in_flight--;
224 s->event_idx++;
225 qemu_laio_process_completion(laiocb);
226 }
227 }
228
229 qemu_bh_cancel(s->completion_bh);
230
231 /* If we are nested we have to notify the level above that we are done
232 * by setting event_max to zero, upper level will then jump out of it's
233 * own `for` loop. If we are the last all counters dropped to zero. */
234 s->event_max = 0;
235 s->event_idx = 0;
236
237 defer_call_end();
238 }
239
qemu_laio_process_completions_and_submit(LinuxAioState * s)240 static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
241 {
242 qemu_laio_process_completions(s);
243
244 if (!QSIMPLEQ_EMPTY(&s->io_q.pending)) {
245 ioq_submit(s);
246 }
247 }
248
qemu_laio_completion_bh(void * opaque)249 static void qemu_laio_completion_bh(void *opaque)
250 {
251 LinuxAioState *s = opaque;
252
253 qemu_laio_process_completions_and_submit(s);
254 }
255
qemu_laio_completion_cb(EventNotifier * e)256 static void qemu_laio_completion_cb(EventNotifier *e)
257 {
258 LinuxAioState *s = container_of(e, LinuxAioState, e);
259
260 if (event_notifier_test_and_clear(&s->e)) {
261 qemu_laio_process_completions_and_submit(s);
262 }
263 }
264
qemu_laio_poll_cb(void * opaque)265 static bool qemu_laio_poll_cb(void *opaque)
266 {
267 EventNotifier *e = opaque;
268 LinuxAioState *s = container_of(e, LinuxAioState, e);
269 struct io_event *events;
270
271 return io_getevents_peek(s->ctx, &events);
272 }
273
qemu_laio_poll_ready(EventNotifier * opaque)274 static void qemu_laio_poll_ready(EventNotifier *opaque)
275 {
276 EventNotifier *e = opaque;
277 LinuxAioState *s = container_of(e, LinuxAioState, e);
278
279 qemu_laio_process_completions_and_submit(s);
280 }
281
ioq_init(LaioQueue * io_q)282 static void ioq_init(LaioQueue *io_q)
283 {
284 QSIMPLEQ_INIT(&io_q->pending);
285 io_q->in_queue = 0;
286 io_q->in_flight = 0;
287 io_q->blocked = false;
288 }
289
ioq_submit(LinuxAioState * s)290 static void ioq_submit(LinuxAioState *s)
291 {
292 int ret, len;
293 struct qemu_laiocb *aiocb;
294 struct iocb *iocbs[MAX_EVENTS];
295 QSIMPLEQ_HEAD(, qemu_laiocb) completed;
296
297 do {
298 if (s->io_q.in_flight >= MAX_EVENTS) {
299 break;
300 }
301 len = 0;
302 QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) {
303 iocbs[len++] = &aiocb->iocb;
304 if (s->io_q.in_flight + len >= MAX_EVENTS) {
305 break;
306 }
307 }
308
309 ret = io_submit(s->ctx, len, iocbs);
310 if (ret == -EAGAIN) {
311 break;
312 }
313 if (ret < 0) {
314 /* Fail the first request, retry the rest */
315 aiocb = QSIMPLEQ_FIRST(&s->io_q.pending);
316 QSIMPLEQ_REMOVE_HEAD(&s->io_q.pending, next);
317 s->io_q.in_queue--;
318 aiocb->ret = ret;
319 qemu_laio_process_completion(aiocb);
320 continue;
321 }
322
323 s->io_q.in_flight += ret;
324 s->io_q.in_queue -= ret;
325 aiocb = container_of(iocbs[ret - 1], struct qemu_laiocb, iocb);
326 QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed);
327 } while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending));
328 s->io_q.blocked = (s->io_q.in_queue > 0);
329
330 if (s->io_q.in_flight) {
331 /* We can try to complete something just right away if there are
332 * still requests in-flight. */
333 qemu_laio_process_completions(s);
334 /*
335 * Even we have completed everything (in_flight == 0), the queue can
336 * have still pended requests (in_queue > 0). We do not attempt to
337 * repeat submission to avoid IO hang. The reason is simple: s->e is
338 * still set and completion callback will be called shortly and all
339 * pended requests will be submitted from there.
340 */
341 }
342 }
343
laio_max_batch(LinuxAioState * s,uint64_t dev_max_batch)344 static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch)
345 {
346 uint64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH;
347
348 /*
349 * AIO context can be shared between multiple block devices, so
350 * `dev_max_batch` allows reducing the batch size for latency-sensitive
351 * devices.
352 */
353 max_batch = MIN_NON_ZERO(dev_max_batch, max_batch);
354
355 /* limit the batch with the number of available events */
356 max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch);
357
358 return max_batch;
359 }
360
laio_deferred_fn(void * opaque)361 static void laio_deferred_fn(void *opaque)
362 {
363 LinuxAioState *s = opaque;
364
365 if (!s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
366 ioq_submit(s);
367 }
368 }
369
laio_do_submit(int fd,struct qemu_laiocb * laiocb,off_t offset,int type,uint64_t dev_max_batch)370 static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
371 int type, uint64_t dev_max_batch)
372 {
373 LinuxAioState *s = laiocb->ctx;
374 struct iocb *iocbs = &laiocb->iocb;
375 QEMUIOVector *qiov = laiocb->qiov;
376
377 switch (type) {
378 case QEMU_AIO_WRITE:
379 io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
380 break;
381 case QEMU_AIO_ZONE_APPEND:
382 io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
383 break;
384 case QEMU_AIO_READ:
385 io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset);
386 break;
387 case QEMU_AIO_FLUSH:
388 io_prep_fdsync(iocbs, fd);
389 break;
390 /* Currently Linux kernel does not support other operations */
391 default:
392 fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
393 __func__, type);
394 return -EIO;
395 }
396 io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
397
398 QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next);
399 s->io_q.in_queue++;
400 if (!s->io_q.blocked) {
401 if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch)) {
402 ioq_submit(s);
403 } else {
404 defer_call(laio_deferred_fn, s);
405 }
406 }
407
408 return 0;
409 }
410
laio_co_submit(int fd,uint64_t offset,QEMUIOVector * qiov,int type,uint64_t dev_max_batch)411 int coroutine_fn laio_co_submit(int fd, uint64_t offset, QEMUIOVector *qiov,
412 int type, uint64_t dev_max_batch)
413 {
414 int ret;
415 AioContext *ctx = qemu_get_current_aio_context();
416 struct qemu_laiocb laiocb = {
417 .co = qemu_coroutine_self(),
418 .nbytes = qiov ? qiov->size : 0,
419 .ctx = aio_get_linux_aio(ctx),
420 .ret = -EINPROGRESS,
421 .is_read = (type == QEMU_AIO_READ),
422 .qiov = qiov,
423 };
424
425 ret = laio_do_submit(fd, &laiocb, offset, type, dev_max_batch);
426 if (ret < 0) {
427 return ret;
428 }
429
430 if (laiocb.ret == -EINPROGRESS) {
431 qemu_coroutine_yield();
432 }
433 return laiocb.ret;
434 }
435
laio_detach_aio_context(LinuxAioState * s,AioContext * old_context)436 void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
437 {
438 aio_set_event_notifier(old_context, &s->e, NULL, NULL, NULL);
439 qemu_bh_delete(s->completion_bh);
440 s->aio_context = NULL;
441 }
442
laio_attach_aio_context(LinuxAioState * s,AioContext * new_context)443 void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
444 {
445 s->aio_context = new_context;
446 s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
447 aio_set_event_notifier(new_context, &s->e,
448 qemu_laio_completion_cb,
449 qemu_laio_poll_cb,
450 qemu_laio_poll_ready);
451 }
452
laio_init(Error ** errp)453 LinuxAioState *laio_init(Error **errp)
454 {
455 int rc;
456 LinuxAioState *s;
457
458 s = g_malloc0(sizeof(*s));
459 rc = event_notifier_init(&s->e, false);
460 if (rc < 0) {
461 error_setg_errno(errp, -rc, "failed to initialize event notifier");
462 goto out_free_state;
463 }
464
465 rc = io_setup(MAX_EVENTS, &s->ctx);
466 if (rc < 0) {
467 error_setg_errno(errp, -rc, "failed to create linux AIO context");
468 goto out_close_efd;
469 }
470
471 ioq_init(&s->io_q);
472
473 return s;
474
475 out_close_efd:
476 event_notifier_cleanup(&s->e);
477 out_free_state:
478 g_free(s);
479 return NULL;
480 }
481
laio_cleanup(LinuxAioState * s)482 void laio_cleanup(LinuxAioState *s)
483 {
484 event_notifier_cleanup(&s->e);
485
486 if (io_destroy(s->ctx) != 0) {
487 fprintf(stderr, "%s: destroy AIO context %p failed\n",
488 __func__, &s->ctx);
489 }
490 g_free(s);
491 }
492
laio_has_fdsync(int fd)493 bool laio_has_fdsync(int fd)
494 {
495 struct iocb cb;
496 struct iocb *cbs[] = {&cb, NULL};
497
498 io_context_t ctx = 0;
499 io_setup(1, &ctx);
500
501 /* check if host kernel supports IO_CMD_FDSYNC */
502 io_prep_fdsync(&cb, fd);
503 int ret = io_submit(ctx, 1, cbs);
504
505 io_destroy(ctx);
506 return (ret == -EINVAL) ? false : true;
507 }
508