1de23077eSJens Axboe #ifndef IOU_CORE_H 2de23077eSJens Axboe #define IOU_CORE_H 3de23077eSJens Axboe 4de23077eSJens Axboe #include <linux/errno.h> 5cd40cae2SJens Axboe #include <linux/lockdep.h> 6ab1c84d8SPavel Begunkov #include <linux/io_uring_types.h> 7ab1c84d8SPavel Begunkov #include "io-wq.h" 8a6b21fbbSPavel Begunkov #include "slist.h" 9ab1c84d8SPavel Begunkov #include "filetable.h" 10de23077eSJens Axboe 11f3b44f92SJens Axboe #ifndef CREATE_TRACE_POINTS 12f3b44f92SJens Axboe #include <trace/events/io_uring.h> 13f3b44f92SJens Axboe #endif 14f3b44f92SJens Axboe 1597b388d7SJens Axboe enum { 1697b388d7SJens Axboe IOU_OK = 0, 1797b388d7SJens Axboe IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, 18114eccdfSDylan Yudaken 19114eccdfSDylan Yudaken /* 20*91482864SPavel Begunkov * Intended only when both IO_URING_F_MULTISHOT is passed 21*91482864SPavel Begunkov * to indicate to the poll runner that multishot should be 22114eccdfSDylan Yudaken * removed and the result is set on req->cqe.res. 23114eccdfSDylan Yudaken */ 24114eccdfSDylan Yudaken IOU_STOP_MULTISHOT = -ECANCELED, 2597b388d7SJens Axboe }; 2697b388d7SJens Axboe 27aa1df3a3SPavel Begunkov struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow); 2868494a65SPavel Begunkov bool io_req_cqe_overflow(struct io_kiocb *req); 29c0e0d6baSDylan Yudaken int io_run_task_work_sig(struct io_ring_ctx *ctx); 30b3026767SDylan Yudaken int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked); 31c0e0d6baSDylan Yudaken int io_run_local_work(struct io_ring_ctx *ctx); 329046c641SPavel Begunkov void io_req_complete_failed(struct io_kiocb *req, s32 res); 339046c641SPavel Begunkov void __io_req_complete(struct io_kiocb *req, unsigned issue_flags); 349046c641SPavel Begunkov void io_req_complete_post(struct io_kiocb *req); 359046c641SPavel Begunkov void __io_req_complete_post(struct io_kiocb *req); 3652120f0fSDylan Yudaken bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, 3752120f0fSDylan Yudaken bool allow_overflow); 38eb42cebbSPavel Begunkov bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, 39eb42cebbSPavel Begunkov bool allow_overflow); 409046c641SPavel Begunkov void __io_commit_cqring_flush(struct io_ring_ctx *ctx); 419046c641SPavel Begunkov 429046c641SPavel Begunkov struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); 439046c641SPavel Begunkov 449046c641SPavel Begunkov struct file *io_file_get_normal(struct io_kiocb *req, int fd); 459046c641SPavel Begunkov struct file *io_file_get_fixed(struct io_kiocb *req, int fd, 469046c641SPavel Begunkov unsigned issue_flags); 479046c641SPavel Begunkov 48f6b543fdSJens Axboe static inline bool io_req_ffs_set(struct io_kiocb *req) 49f6b543fdSJens Axboe { 50f6b543fdSJens Axboe return req->flags & REQ_F_FIXED_FILE; 51f6b543fdSJens Axboe } 52f6b543fdSJens Axboe 539046c641SPavel Begunkov bool io_is_uring_fops(struct file *file); 549046c641SPavel Begunkov bool io_alloc_async_data(struct io_kiocb *req); 559046c641SPavel Begunkov void io_req_task_work_add(struct io_kiocb *req); 569046c641SPavel Begunkov void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags); 579046c641SPavel Begunkov void io_req_task_queue(struct io_kiocb *req); 589046c641SPavel Begunkov void io_queue_iowq(struct io_kiocb *req, bool *dont_use); 599046c641SPavel Begunkov void io_req_task_complete(struct io_kiocb *req, bool *locked); 609046c641SPavel Begunkov void io_req_task_queue_fail(struct io_kiocb *req, int ret); 619046c641SPavel Begunkov void io_req_task_submit(struct io_kiocb *req, bool *locked); 629046c641SPavel Begunkov void tctx_task_work(struct callback_head *cb); 639046c641SPavel Begunkov __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); 649046c641SPavel Begunkov int io_uring_alloc_task_context(struct task_struct *task, 659046c641SPavel Begunkov struct io_ring_ctx *ctx); 669046c641SPavel Begunkov 679046c641SPavel Begunkov int io_poll_issue(struct io_kiocb *req, bool *locked); 689046c641SPavel Begunkov int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); 699046c641SPavel Begunkov int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); 709046c641SPavel Begunkov void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node); 719046c641SPavel Begunkov int io_req_prep_async(struct io_kiocb *req); 729046c641SPavel Begunkov 739046c641SPavel Begunkov struct io_wq_work *io_wq_free_work(struct io_wq_work *work); 749046c641SPavel Begunkov void io_wq_submit_work(struct io_wq_work *work); 759046c641SPavel Begunkov 769046c641SPavel Begunkov void io_free_req(struct io_kiocb *req); 779046c641SPavel Begunkov void io_queue_next(struct io_kiocb *req); 78e70cb608SPavel Begunkov void __io_put_task(struct task_struct *task, int nr); 7963809137SPavel Begunkov void io_task_refs_refill(struct io_uring_task *tctx); 80bd1a3783SPavel Begunkov bool __io_alloc_req_refill(struct io_ring_ctx *ctx); 819046c641SPavel Begunkov 829046c641SPavel Begunkov bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, 839046c641SPavel Begunkov bool cancel_all); 849046c641SPavel Begunkov 859046c641SPavel Begunkov #define io_for_each_link(pos, head) \ 869046c641SPavel Begunkov for (pos = (head); pos; pos = pos->link) 87f3b44f92SJens Axboe 8825399321SPavel Begunkov static inline void io_cq_lock(struct io_ring_ctx *ctx) 8925399321SPavel Begunkov __acquires(ctx->completion_lock) 9025399321SPavel Begunkov { 9125399321SPavel Begunkov spin_lock(&ctx->completion_lock); 9225399321SPavel Begunkov } 9325399321SPavel Begunkov 9425399321SPavel Begunkov void io_cq_unlock_post(struct io_ring_ctx *ctx); 9525399321SPavel Begunkov 96aa1df3a3SPavel Begunkov static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx, 97aa1df3a3SPavel Begunkov bool overflow) 98f3b44f92SJens Axboe { 99f3b44f92SJens Axboe if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { 100f3b44f92SJens Axboe struct io_uring_cqe *cqe = ctx->cqe_cached; 101f3b44f92SJens Axboe 102f3b44f92SJens Axboe ctx->cached_cq_tail++; 103f3b44f92SJens Axboe ctx->cqe_cached++; 104b3659a65SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) 105b3659a65SPavel Begunkov ctx->cqe_cached++; 106f3b44f92SJens Axboe return cqe; 107f3b44f92SJens Axboe } 108f3b44f92SJens Axboe 109aa1df3a3SPavel Begunkov return __io_get_cqe(ctx, overflow); 110aa1df3a3SPavel Begunkov } 111aa1df3a3SPavel Begunkov 112aa1df3a3SPavel Begunkov static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) 113aa1df3a3SPavel Begunkov { 114aa1df3a3SPavel Begunkov return io_get_cqe_overflow(ctx, false); 115f3b44f92SJens Axboe } 116f3b44f92SJens Axboe 117f3b44f92SJens Axboe static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, 118f3b44f92SJens Axboe struct io_kiocb *req) 119f3b44f92SJens Axboe { 120f3b44f92SJens Axboe struct io_uring_cqe *cqe; 121f3b44f92SJens Axboe 122f3b44f92SJens Axboe /* 123f3b44f92SJens Axboe * If we can't get a cq entry, userspace overflowed the 124f3b44f92SJens Axboe * submission (by quite a lot). Increment the overflow count in 125f3b44f92SJens Axboe * the ring. 126f3b44f92SJens Axboe */ 127f3b44f92SJens Axboe cqe = io_get_cqe(ctx); 128e8c328c3SPavel Begunkov if (unlikely(!cqe)) 129e8c328c3SPavel Begunkov return io_req_cqe_overflow(req); 130e0486f3fSDylan Yudaken 131e0486f3fSDylan Yudaken trace_io_uring_complete(req->ctx, req, req->cqe.user_data, 132e0486f3fSDylan Yudaken req->cqe.res, req->cqe.flags, 133e0486f3fSDylan Yudaken (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0, 134e0486f3fSDylan Yudaken (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0); 135e0486f3fSDylan Yudaken 136f3b44f92SJens Axboe memcpy(cqe, &req->cqe, sizeof(*cqe)); 137e8c328c3SPavel Begunkov 138e8c328c3SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) { 139f3b44f92SJens Axboe u64 extra1 = 0, extra2 = 0; 140f3b44f92SJens Axboe 141f3b44f92SJens Axboe if (req->flags & REQ_F_CQE32_INIT) { 142f3b44f92SJens Axboe extra1 = req->extra1; 143f3b44f92SJens Axboe extra2 = req->extra2; 144f3b44f92SJens Axboe } 145f3b44f92SJens Axboe 146f3b44f92SJens Axboe WRITE_ONCE(cqe->big_cqe[0], extra1); 147f3b44f92SJens Axboe WRITE_ONCE(cqe->big_cqe[1], extra2); 148e8c328c3SPavel Begunkov } 149f3b44f92SJens Axboe return true; 150f3b44f92SJens Axboe } 151f3b44f92SJens Axboe 152531113bbSJens Axboe static inline void req_set_fail(struct io_kiocb *req) 153531113bbSJens Axboe { 154531113bbSJens Axboe req->flags |= REQ_F_FAIL; 155531113bbSJens Axboe if (req->flags & REQ_F_CQE_SKIP) { 156531113bbSJens Axboe req->flags &= ~REQ_F_CQE_SKIP; 157531113bbSJens Axboe req->flags |= REQ_F_SKIP_LINK_CQES; 158531113bbSJens Axboe } 159531113bbSJens Axboe } 160531113bbSJens Axboe 161de23077eSJens Axboe static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) 162de23077eSJens Axboe { 163de23077eSJens Axboe req->cqe.res = res; 164de23077eSJens Axboe req->cqe.flags = cflags; 165de23077eSJens Axboe } 166de23077eSJens Axboe 16799f15d8dSJens Axboe static inline bool req_has_async_data(struct io_kiocb *req) 16899f15d8dSJens Axboe { 16999f15d8dSJens Axboe return req->flags & REQ_F_ASYNC_DATA; 17099f15d8dSJens Axboe } 17199f15d8dSJens Axboe 172531113bbSJens Axboe static inline void io_put_file(struct file *file) 173531113bbSJens Axboe { 174531113bbSJens Axboe if (file) 175531113bbSJens Axboe fput(file); 176531113bbSJens Axboe } 177531113bbSJens Axboe 178cd40cae2SJens Axboe static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, 179cd40cae2SJens Axboe unsigned issue_flags) 180cd40cae2SJens Axboe { 181cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 182cd40cae2SJens Axboe if (issue_flags & IO_URING_F_UNLOCKED) 183cd40cae2SJens Axboe mutex_unlock(&ctx->uring_lock); 184cd40cae2SJens Axboe } 185cd40cae2SJens Axboe 186cd40cae2SJens Axboe static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, 187cd40cae2SJens Axboe unsigned issue_flags) 188cd40cae2SJens Axboe { 189cd40cae2SJens Axboe /* 190cd40cae2SJens Axboe * "Normal" inline submissions always hold the uring_lock, since we 191cd40cae2SJens Axboe * grab it from the system call. Same is true for the SQPOLL offload. 192cd40cae2SJens Axboe * The only exception is when we've detached the request and issue it 193cd40cae2SJens Axboe * from an async worker thread, grab the lock for that case. 194cd40cae2SJens Axboe */ 195cd40cae2SJens Axboe if (issue_flags & IO_URING_F_UNLOCKED) 196cd40cae2SJens Axboe mutex_lock(&ctx->uring_lock); 197cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 198cd40cae2SJens Axboe } 199cd40cae2SJens Axboe 200f9ead18cSJens Axboe static inline void io_commit_cqring(struct io_ring_ctx *ctx) 201f9ead18cSJens Axboe { 202f9ead18cSJens Axboe /* order cqe stores with ring update */ 203f9ead18cSJens Axboe smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); 204f9ead18cSJens Axboe } 205f9ead18cSJens Axboe 206fc86f9d3SPavel Begunkov /* requires smb_mb() prior, see wq_has_sleeper() */ 207fc86f9d3SPavel Begunkov static inline void __io_cqring_wake(struct io_ring_ctx *ctx) 208f3b44f92SJens Axboe { 209f3b44f92SJens Axboe /* 210f3b44f92SJens Axboe * wake_up_all() may seem excessive, but io_wake_function() and 211f3b44f92SJens Axboe * io_should_wake() handle the termination of the loop and only 212f3b44f92SJens Axboe * wake as many waiters as we need to. 213f3b44f92SJens Axboe */ 214fc86f9d3SPavel Begunkov if (waitqueue_active(&ctx->cq_wait)) 215f3b44f92SJens Axboe wake_up_all(&ctx->cq_wait); 216f3b44f92SJens Axboe } 217f3b44f92SJens Axboe 218fc86f9d3SPavel Begunkov static inline void io_cqring_wake(struct io_ring_ctx *ctx) 219fc86f9d3SPavel Begunkov { 220fc86f9d3SPavel Begunkov smp_mb(); 221fc86f9d3SPavel Begunkov __io_cqring_wake(ctx); 222fc86f9d3SPavel Begunkov } 223fc86f9d3SPavel Begunkov 22417437f31SJens Axboe static inline bool io_sqring_full(struct io_ring_ctx *ctx) 22517437f31SJens Axboe { 22617437f31SJens Axboe struct io_rings *r = ctx->rings; 22717437f31SJens Axboe 22817437f31SJens Axboe return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; 22917437f31SJens Axboe } 23017437f31SJens Axboe 23117437f31SJens Axboe static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) 23217437f31SJens Axboe { 23317437f31SJens Axboe struct io_rings *rings = ctx->rings; 23417437f31SJens Axboe 23517437f31SJens Axboe /* make sure SQ entry isn't read before tail */ 23617437f31SJens Axboe return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; 23717437f31SJens Axboe } 23817437f31SJens Axboe 239c0e0d6baSDylan Yudaken static inline int io_run_task_work(void) 24017437f31SJens Axboe { 24146a525e1SJens Axboe if (task_work_pending(current)) { 24246a525e1SJens Axboe if (test_thread_flag(TIF_NOTIFY_SIGNAL)) 24317437f31SJens Axboe clear_notify_signal(); 24446a525e1SJens Axboe __set_current_state(TASK_RUNNING); 24517437f31SJens Axboe task_work_run(); 246c0e0d6baSDylan Yudaken return 1; 24717437f31SJens Axboe } 24817437f31SJens Axboe 249c0e0d6baSDylan Yudaken return 0; 250c0e0d6baSDylan Yudaken } 251c0e0d6baSDylan Yudaken 252dac6a0eaSJens Axboe static inline bool io_task_work_pending(struct io_ring_ctx *ctx) 253dac6a0eaSJens Axboe { 254dac6a0eaSJens Axboe return test_thread_flag(TIF_NOTIFY_SIGNAL) || 255dac6a0eaSJens Axboe !wq_list_empty(&ctx->work_llist); 256dac6a0eaSJens Axboe } 257dac6a0eaSJens Axboe 258c0e0d6baSDylan Yudaken static inline int io_run_task_work_ctx(struct io_ring_ctx *ctx) 259c0e0d6baSDylan Yudaken { 260c0e0d6baSDylan Yudaken int ret = 0; 261c0e0d6baSDylan Yudaken int ret2; 262c0e0d6baSDylan Yudaken 263c0e0d6baSDylan Yudaken if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) 264c0e0d6baSDylan Yudaken ret = io_run_local_work(ctx); 265c0e0d6baSDylan Yudaken 266c0e0d6baSDylan Yudaken /* want to run this after in case more is added */ 267c0e0d6baSDylan Yudaken ret2 = io_run_task_work(); 268c0e0d6baSDylan Yudaken 269c0e0d6baSDylan Yudaken /* Try propagate error in favour of if tasks were run, 270c0e0d6baSDylan Yudaken * but still make sure to run them if requested 271c0e0d6baSDylan Yudaken */ 272c0e0d6baSDylan Yudaken if (ret >= 0) 273c0e0d6baSDylan Yudaken ret += ret2; 274c0e0d6baSDylan Yudaken 275c0e0d6baSDylan Yudaken return ret; 27617437f31SJens Axboe } 27717437f31SJens Axboe 27844f87745SPavel Begunkov static inline int io_run_local_work_locked(struct io_ring_ctx *ctx) 27944f87745SPavel Begunkov { 280b3026767SDylan Yudaken bool locked; 281b3026767SDylan Yudaken int ret; 282b3026767SDylan Yudaken 28344f87745SPavel Begunkov if (llist_empty(&ctx->work_llist)) 28444f87745SPavel Begunkov return 0; 285b3026767SDylan Yudaken 286b3026767SDylan Yudaken locked = true; 287b3026767SDylan Yudaken ret = __io_run_local_work(ctx, &locked); 288b3026767SDylan Yudaken /* shouldn't happen! */ 289b3026767SDylan Yudaken if (WARN_ON_ONCE(!locked)) 290b3026767SDylan Yudaken mutex_lock(&ctx->uring_lock); 291b3026767SDylan Yudaken return ret; 29244f87745SPavel Begunkov } 29344f87745SPavel Begunkov 294aa1e90f6SPavel Begunkov static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) 295aa1e90f6SPavel Begunkov { 296aa1e90f6SPavel Begunkov if (!*locked) { 297aa1e90f6SPavel Begunkov mutex_lock(&ctx->uring_lock); 298aa1e90f6SPavel Begunkov *locked = true; 299aa1e90f6SPavel Begunkov } 300aa1e90f6SPavel Begunkov } 301aa1e90f6SPavel Begunkov 3029da070b1SPavel Begunkov /* 3039da070b1SPavel Begunkov * Don't complete immediately but use deferred completion infrastructure. 3049da070b1SPavel Begunkov * Protected by ->uring_lock and can only be used either with 3059da070b1SPavel Begunkov * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. 3069da070b1SPavel Begunkov */ 3079da070b1SPavel Begunkov static inline void io_req_complete_defer(struct io_kiocb *req) 3089da070b1SPavel Begunkov __must_hold(&req->ctx->uring_lock) 309aa1e90f6SPavel Begunkov { 310aa1e90f6SPavel Begunkov struct io_submit_state *state = &req->ctx->submit_state; 311aa1e90f6SPavel Begunkov 3129da070b1SPavel Begunkov lockdep_assert_held(&req->ctx->uring_lock); 3139da070b1SPavel Begunkov 314aa1e90f6SPavel Begunkov wq_list_add_tail(&req->comp_list, &state->compl_reqs); 315aa1e90f6SPavel Begunkov } 316aa1e90f6SPavel Begunkov 31746929b08SPavel Begunkov static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) 31846929b08SPavel Begunkov { 31946929b08SPavel Begunkov if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd)) 32046929b08SPavel Begunkov __io_commit_cqring_flush(ctx); 32146929b08SPavel Begunkov } 32246929b08SPavel Begunkov 323e70cb608SPavel Begunkov /* must to be called somewhat shortly after putting a request */ 324e70cb608SPavel Begunkov static inline void io_put_task(struct task_struct *task, int nr) 325e70cb608SPavel Begunkov { 326e70cb608SPavel Begunkov if (likely(task == current)) 327e70cb608SPavel Begunkov task->io_uring->cached_refs += nr; 328e70cb608SPavel Begunkov else 329e70cb608SPavel Begunkov __io_put_task(task, nr); 330e70cb608SPavel Begunkov } 331e70cb608SPavel Begunkov 33263809137SPavel Begunkov static inline void io_get_task_refs(int nr) 33363809137SPavel Begunkov { 33463809137SPavel Begunkov struct io_uring_task *tctx = current->io_uring; 33563809137SPavel Begunkov 33663809137SPavel Begunkov tctx->cached_refs -= nr; 33763809137SPavel Begunkov if (unlikely(tctx->cached_refs < 0)) 33863809137SPavel Begunkov io_task_refs_refill(tctx); 33963809137SPavel Begunkov } 34063809137SPavel Begunkov 341bd1a3783SPavel Begunkov static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) 342bd1a3783SPavel Begunkov { 343bd1a3783SPavel Begunkov return !ctx->submit_state.free_list.next; 344bd1a3783SPavel Begunkov } 345bd1a3783SPavel Begunkov 346bd1a3783SPavel Begunkov static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx) 347bd1a3783SPavel Begunkov { 348bd1a3783SPavel Begunkov if (unlikely(io_req_cache_empty(ctx))) 349bd1a3783SPavel Begunkov return __io_alloc_req_refill(ctx); 350bd1a3783SPavel Begunkov return true; 351bd1a3783SPavel Begunkov } 352bd1a3783SPavel Begunkov 353bd1a3783SPavel Begunkov static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) 354bd1a3783SPavel Begunkov { 355bd1a3783SPavel Begunkov struct io_wq_work_node *node; 356bd1a3783SPavel Begunkov 357bd1a3783SPavel Begunkov node = wq_stack_extract(&ctx->submit_state.free_list); 358bd1a3783SPavel Begunkov return container_of(node, struct io_kiocb, comp_list); 359bd1a3783SPavel Begunkov } 360bd1a3783SPavel Begunkov 36176de6749SPavel Begunkov static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) 36276de6749SPavel Begunkov { 3636567506bSPavel Begunkov return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || 3646567506bSPavel Begunkov ctx->submitter_task == current); 36576de6749SPavel Begunkov } 36676de6749SPavel Begunkov 367de23077eSJens Axboe #endif 368