1de23077eSJens Axboe #ifndef IOU_CORE_H 2de23077eSJens Axboe #define IOU_CORE_H 3de23077eSJens Axboe 4de23077eSJens Axboe #include <linux/errno.h> 5cd40cae2SJens Axboe #include <linux/lockdep.h> 6ab1c84d8SPavel Begunkov #include <linux/io_uring_types.h> 7ab1c84d8SPavel Begunkov #include "io-wq.h" 8a6b21fbbSPavel Begunkov #include "slist.h" 9ab1c84d8SPavel Begunkov #include "filetable.h" 10de23077eSJens Axboe 11f3b44f92SJens Axboe #ifndef CREATE_TRACE_POINTS 12f3b44f92SJens Axboe #include <trace/events/io_uring.h> 13f3b44f92SJens Axboe #endif 14f3b44f92SJens Axboe 1597b388d7SJens Axboe enum { 1697b388d7SJens Axboe IOU_OK = 0, 1797b388d7SJens Axboe IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, 18114eccdfSDylan Yudaken 19114eccdfSDylan Yudaken /* 20114eccdfSDylan Yudaken * Intended only when both REQ_F_POLLED and REQ_F_APOLL_MULTISHOT 21114eccdfSDylan Yudaken * are set to indicate to the poll runner that multishot should be 22114eccdfSDylan Yudaken * removed and the result is set on req->cqe.res. 23114eccdfSDylan Yudaken */ 24114eccdfSDylan Yudaken IOU_STOP_MULTISHOT = -ECANCELED, 2597b388d7SJens Axboe }; 2697b388d7SJens Axboe 27faf88ddeSPavel Begunkov struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx); 2868494a65SPavel Begunkov bool io_req_cqe_overflow(struct io_kiocb *req); 299046c641SPavel Begunkov int io_run_task_work_sig(void); 309046c641SPavel Begunkov void io_req_complete_failed(struct io_kiocb *req, s32 res); 319046c641SPavel Begunkov void __io_req_complete(struct io_kiocb *req, unsigned issue_flags); 329046c641SPavel Begunkov void io_req_complete_post(struct io_kiocb *req); 339046c641SPavel Begunkov void __io_req_complete_post(struct io_kiocb *req); 3452120f0fSDylan Yudaken bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, 3552120f0fSDylan Yudaken bool allow_overflow); 36eb42cebbSPavel Begunkov bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, 37eb42cebbSPavel Begunkov bool allow_overflow); 389046c641SPavel Begunkov void __io_commit_cqring_flush(struct io_ring_ctx *ctx); 399046c641SPavel Begunkov 409046c641SPavel Begunkov struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); 419046c641SPavel Begunkov 429046c641SPavel Begunkov struct file *io_file_get_normal(struct io_kiocb *req, int fd); 439046c641SPavel Begunkov struct file *io_file_get_fixed(struct io_kiocb *req, int fd, 449046c641SPavel Begunkov unsigned issue_flags); 459046c641SPavel Begunkov 46f6b543fdSJens Axboe static inline bool io_req_ffs_set(struct io_kiocb *req) 47f6b543fdSJens Axboe { 48f6b543fdSJens Axboe return req->flags & REQ_F_FIXED_FILE; 49f6b543fdSJens Axboe } 50f6b543fdSJens Axboe 519046c641SPavel Begunkov bool io_is_uring_fops(struct file *file); 529046c641SPavel Begunkov bool io_alloc_async_data(struct io_kiocb *req); 539046c641SPavel Begunkov void io_req_task_work_add(struct io_kiocb *req); 549046c641SPavel Begunkov void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags); 559046c641SPavel Begunkov void io_req_task_queue(struct io_kiocb *req); 569046c641SPavel Begunkov void io_queue_iowq(struct io_kiocb *req, bool *dont_use); 579046c641SPavel Begunkov void io_req_task_complete(struct io_kiocb *req, bool *locked); 589046c641SPavel Begunkov void io_req_task_queue_fail(struct io_kiocb *req, int ret); 599046c641SPavel Begunkov void io_req_task_submit(struct io_kiocb *req, bool *locked); 609046c641SPavel Begunkov void tctx_task_work(struct callback_head *cb); 619046c641SPavel Begunkov __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); 629046c641SPavel Begunkov int io_uring_alloc_task_context(struct task_struct *task, 639046c641SPavel Begunkov struct io_ring_ctx *ctx); 649046c641SPavel Begunkov 659046c641SPavel Begunkov int io_poll_issue(struct io_kiocb *req, bool *locked); 669046c641SPavel Begunkov int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); 679046c641SPavel Begunkov int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); 689046c641SPavel Begunkov void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node); 699046c641SPavel Begunkov int io_req_prep_async(struct io_kiocb *req); 709046c641SPavel Begunkov 719046c641SPavel Begunkov struct io_wq_work *io_wq_free_work(struct io_wq_work *work); 729046c641SPavel Begunkov void io_wq_submit_work(struct io_wq_work *work); 739046c641SPavel Begunkov 749046c641SPavel Begunkov void io_free_req(struct io_kiocb *req); 759046c641SPavel Begunkov void io_queue_next(struct io_kiocb *req); 76e70cb608SPavel Begunkov void __io_put_task(struct task_struct *task, int nr); 7763809137SPavel Begunkov void io_task_refs_refill(struct io_uring_task *tctx); 78*bd1a3783SPavel Begunkov bool __io_alloc_req_refill(struct io_ring_ctx *ctx); 799046c641SPavel Begunkov 809046c641SPavel Begunkov bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, 819046c641SPavel Begunkov bool cancel_all); 829046c641SPavel Begunkov 839046c641SPavel Begunkov #define io_for_each_link(pos, head) \ 849046c641SPavel Begunkov for (pos = (head); pos; pos = pos->link) 85f3b44f92SJens Axboe 8625399321SPavel Begunkov static inline void io_cq_lock(struct io_ring_ctx *ctx) 8725399321SPavel Begunkov __acquires(ctx->completion_lock) 8825399321SPavel Begunkov { 8925399321SPavel Begunkov spin_lock(&ctx->completion_lock); 9025399321SPavel Begunkov } 9125399321SPavel Begunkov 9225399321SPavel Begunkov void io_cq_unlock_post(struct io_ring_ctx *ctx); 9325399321SPavel Begunkov 94f3b44f92SJens Axboe static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) 95f3b44f92SJens Axboe { 96f3b44f92SJens Axboe if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { 97f3b44f92SJens Axboe struct io_uring_cqe *cqe = ctx->cqe_cached; 98f3b44f92SJens Axboe 99f3b44f92SJens Axboe ctx->cached_cq_tail++; 100f3b44f92SJens Axboe ctx->cqe_cached++; 101b3659a65SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) 102b3659a65SPavel Begunkov ctx->cqe_cached++; 103f3b44f92SJens Axboe return cqe; 104f3b44f92SJens Axboe } 105f3b44f92SJens Axboe 106f3b44f92SJens Axboe return __io_get_cqe(ctx); 107f3b44f92SJens Axboe } 108f3b44f92SJens Axboe 109f3b44f92SJens Axboe static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, 110f3b44f92SJens Axboe struct io_kiocb *req) 111f3b44f92SJens Axboe { 112f3b44f92SJens Axboe struct io_uring_cqe *cqe; 113f3b44f92SJens Axboe 114f3b44f92SJens Axboe /* 115f3b44f92SJens Axboe * If we can't get a cq entry, userspace overflowed the 116f3b44f92SJens Axboe * submission (by quite a lot). Increment the overflow count in 117f3b44f92SJens Axboe * the ring. 118f3b44f92SJens Axboe */ 119f3b44f92SJens Axboe cqe = io_get_cqe(ctx); 120e8c328c3SPavel Begunkov if (unlikely(!cqe)) 121e8c328c3SPavel Begunkov return io_req_cqe_overflow(req); 122e0486f3fSDylan Yudaken 123e0486f3fSDylan Yudaken trace_io_uring_complete(req->ctx, req, req->cqe.user_data, 124e0486f3fSDylan Yudaken req->cqe.res, req->cqe.flags, 125e0486f3fSDylan Yudaken (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0, 126e0486f3fSDylan Yudaken (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0); 127e0486f3fSDylan Yudaken 128f3b44f92SJens Axboe memcpy(cqe, &req->cqe, sizeof(*cqe)); 129e8c328c3SPavel Begunkov 130e8c328c3SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) { 131f3b44f92SJens Axboe u64 extra1 = 0, extra2 = 0; 132f3b44f92SJens Axboe 133f3b44f92SJens Axboe if (req->flags & REQ_F_CQE32_INIT) { 134f3b44f92SJens Axboe extra1 = req->extra1; 135f3b44f92SJens Axboe extra2 = req->extra2; 136f3b44f92SJens Axboe } 137f3b44f92SJens Axboe 138f3b44f92SJens Axboe WRITE_ONCE(cqe->big_cqe[0], extra1); 139f3b44f92SJens Axboe WRITE_ONCE(cqe->big_cqe[1], extra2); 140e8c328c3SPavel Begunkov } 141f3b44f92SJens Axboe return true; 142f3b44f92SJens Axboe } 143f3b44f92SJens Axboe 144531113bbSJens Axboe static inline void req_set_fail(struct io_kiocb *req) 145531113bbSJens Axboe { 146531113bbSJens Axboe req->flags |= REQ_F_FAIL; 147531113bbSJens Axboe if (req->flags & REQ_F_CQE_SKIP) { 148531113bbSJens Axboe req->flags &= ~REQ_F_CQE_SKIP; 149531113bbSJens Axboe req->flags |= REQ_F_SKIP_LINK_CQES; 150531113bbSJens Axboe } 151531113bbSJens Axboe } 152531113bbSJens Axboe 153de23077eSJens Axboe static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) 154de23077eSJens Axboe { 155de23077eSJens Axboe req->cqe.res = res; 156de23077eSJens Axboe req->cqe.flags = cflags; 157de23077eSJens Axboe } 158de23077eSJens Axboe 15999f15d8dSJens Axboe static inline bool req_has_async_data(struct io_kiocb *req) 16099f15d8dSJens Axboe { 16199f15d8dSJens Axboe return req->flags & REQ_F_ASYNC_DATA; 16299f15d8dSJens Axboe } 16399f15d8dSJens Axboe 164531113bbSJens Axboe static inline void io_put_file(struct file *file) 165531113bbSJens Axboe { 166531113bbSJens Axboe if (file) 167531113bbSJens Axboe fput(file); 168531113bbSJens Axboe } 169531113bbSJens Axboe 170cd40cae2SJens Axboe static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, 171cd40cae2SJens Axboe unsigned issue_flags) 172cd40cae2SJens Axboe { 173cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 174cd40cae2SJens Axboe if (issue_flags & IO_URING_F_UNLOCKED) 175cd40cae2SJens Axboe mutex_unlock(&ctx->uring_lock); 176cd40cae2SJens Axboe } 177cd40cae2SJens Axboe 178cd40cae2SJens Axboe static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, 179cd40cae2SJens Axboe unsigned issue_flags) 180cd40cae2SJens Axboe { 181cd40cae2SJens Axboe /* 182cd40cae2SJens Axboe * "Normal" inline submissions always hold the uring_lock, since we 183cd40cae2SJens Axboe * grab it from the system call. Same is true for the SQPOLL offload. 184cd40cae2SJens Axboe * The only exception is when we've detached the request and issue it 185cd40cae2SJens Axboe * from an async worker thread, grab the lock for that case. 186cd40cae2SJens Axboe */ 187cd40cae2SJens Axboe if (issue_flags & IO_URING_F_UNLOCKED) 188cd40cae2SJens Axboe mutex_lock(&ctx->uring_lock); 189cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 190cd40cae2SJens Axboe } 191cd40cae2SJens Axboe 192f9ead18cSJens Axboe static inline void io_commit_cqring(struct io_ring_ctx *ctx) 193f9ead18cSJens Axboe { 194f9ead18cSJens Axboe /* order cqe stores with ring update */ 195f9ead18cSJens Axboe smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); 196f9ead18cSJens Axboe } 197f9ead18cSJens Axboe 198f3b44f92SJens Axboe static inline void io_cqring_wake(struct io_ring_ctx *ctx) 199f3b44f92SJens Axboe { 200f3b44f92SJens Axboe /* 201f3b44f92SJens Axboe * wake_up_all() may seem excessive, but io_wake_function() and 202f3b44f92SJens Axboe * io_should_wake() handle the termination of the loop and only 203f3b44f92SJens Axboe * wake as many waiters as we need to. 204f3b44f92SJens Axboe */ 205f3b44f92SJens Axboe if (wq_has_sleeper(&ctx->cq_wait)) 206f3b44f92SJens Axboe wake_up_all(&ctx->cq_wait); 207f3b44f92SJens Axboe } 208f3b44f92SJens Axboe 20917437f31SJens Axboe static inline bool io_sqring_full(struct io_ring_ctx *ctx) 21017437f31SJens Axboe { 21117437f31SJens Axboe struct io_rings *r = ctx->rings; 21217437f31SJens Axboe 21317437f31SJens Axboe return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; 21417437f31SJens Axboe } 21517437f31SJens Axboe 21617437f31SJens Axboe static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) 21717437f31SJens Axboe { 21817437f31SJens Axboe struct io_rings *rings = ctx->rings; 21917437f31SJens Axboe 22017437f31SJens Axboe /* make sure SQ entry isn't read before tail */ 22117437f31SJens Axboe return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; 22217437f31SJens Axboe } 22317437f31SJens Axboe 22417437f31SJens Axboe static inline bool io_run_task_work(void) 22517437f31SJens Axboe { 226625d38b3SPavel Begunkov if (test_thread_flag(TIF_NOTIFY_SIGNAL)) { 22717437f31SJens Axboe __set_current_state(TASK_RUNNING); 22817437f31SJens Axboe clear_notify_signal(); 22917437f31SJens Axboe if (task_work_pending(current)) 23017437f31SJens Axboe task_work_run(); 23117437f31SJens Axboe return true; 23217437f31SJens Axboe } 23317437f31SJens Axboe 23417437f31SJens Axboe return false; 23517437f31SJens Axboe } 23617437f31SJens Axboe 237aa1e90f6SPavel Begunkov static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) 238aa1e90f6SPavel Begunkov { 239aa1e90f6SPavel Begunkov if (!*locked) { 240aa1e90f6SPavel Begunkov mutex_lock(&ctx->uring_lock); 241aa1e90f6SPavel Begunkov *locked = true; 242aa1e90f6SPavel Begunkov } 243aa1e90f6SPavel Begunkov } 244aa1e90f6SPavel Begunkov 2459da070b1SPavel Begunkov /* 2469da070b1SPavel Begunkov * Don't complete immediately but use deferred completion infrastructure. 2479da070b1SPavel Begunkov * Protected by ->uring_lock and can only be used either with 2489da070b1SPavel Begunkov * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. 2499da070b1SPavel Begunkov */ 2509da070b1SPavel Begunkov static inline void io_req_complete_defer(struct io_kiocb *req) 2519da070b1SPavel Begunkov __must_hold(&req->ctx->uring_lock) 252aa1e90f6SPavel Begunkov { 253aa1e90f6SPavel Begunkov struct io_submit_state *state = &req->ctx->submit_state; 254aa1e90f6SPavel Begunkov 2559da070b1SPavel Begunkov lockdep_assert_held(&req->ctx->uring_lock); 2569da070b1SPavel Begunkov 257aa1e90f6SPavel Begunkov wq_list_add_tail(&req->comp_list, &state->compl_reqs); 258aa1e90f6SPavel Begunkov } 259aa1e90f6SPavel Begunkov 26046929b08SPavel Begunkov static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) 26146929b08SPavel Begunkov { 26246929b08SPavel Begunkov if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd)) 26346929b08SPavel Begunkov __io_commit_cqring_flush(ctx); 26446929b08SPavel Begunkov } 26546929b08SPavel Begunkov 266e70cb608SPavel Begunkov /* must to be called somewhat shortly after putting a request */ 267e70cb608SPavel Begunkov static inline void io_put_task(struct task_struct *task, int nr) 268e70cb608SPavel Begunkov { 269e70cb608SPavel Begunkov if (likely(task == current)) 270e70cb608SPavel Begunkov task->io_uring->cached_refs += nr; 271e70cb608SPavel Begunkov else 272e70cb608SPavel Begunkov __io_put_task(task, nr); 273e70cb608SPavel Begunkov } 274e70cb608SPavel Begunkov 27563809137SPavel Begunkov static inline void io_get_task_refs(int nr) 27663809137SPavel Begunkov { 27763809137SPavel Begunkov struct io_uring_task *tctx = current->io_uring; 27863809137SPavel Begunkov 27963809137SPavel Begunkov tctx->cached_refs -= nr; 28063809137SPavel Begunkov if (unlikely(tctx->cached_refs < 0)) 28163809137SPavel Begunkov io_task_refs_refill(tctx); 28263809137SPavel Begunkov } 28363809137SPavel Begunkov 284*bd1a3783SPavel Begunkov static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) 285*bd1a3783SPavel Begunkov { 286*bd1a3783SPavel Begunkov return !ctx->submit_state.free_list.next; 287*bd1a3783SPavel Begunkov } 288*bd1a3783SPavel Begunkov 289*bd1a3783SPavel Begunkov static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx) 290*bd1a3783SPavel Begunkov { 291*bd1a3783SPavel Begunkov if (unlikely(io_req_cache_empty(ctx))) 292*bd1a3783SPavel Begunkov return __io_alloc_req_refill(ctx); 293*bd1a3783SPavel Begunkov return true; 294*bd1a3783SPavel Begunkov } 295*bd1a3783SPavel Begunkov 296*bd1a3783SPavel Begunkov static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) 297*bd1a3783SPavel Begunkov { 298*bd1a3783SPavel Begunkov struct io_wq_work_node *node; 299*bd1a3783SPavel Begunkov 300*bd1a3783SPavel Begunkov node = wq_stack_extract(&ctx->submit_state.free_list); 301*bd1a3783SPavel Begunkov return container_of(node, struct io_kiocb, comp_list); 302*bd1a3783SPavel Begunkov } 303*bd1a3783SPavel Begunkov 304de23077eSJens Axboe #endif 305