1de23077eSJens Axboe #ifndef IOU_CORE_H 2de23077eSJens Axboe #define IOU_CORE_H 3de23077eSJens Axboe 4de23077eSJens Axboe #include <linux/errno.h> 5cd40cae2SJens Axboe #include <linux/lockdep.h> 6ab1c84d8SPavel Begunkov #include <linux/io_uring_types.h> 7ab1c84d8SPavel Begunkov #include "io-wq.h" 8a6b21fbbSPavel Begunkov #include "slist.h" 9ab1c84d8SPavel Begunkov #include "filetable.h" 10de23077eSJens Axboe 11f3b44f92SJens Axboe #ifndef CREATE_TRACE_POINTS 12f3b44f92SJens Axboe #include <trace/events/io_uring.h> 13f3b44f92SJens Axboe #endif 14f3b44f92SJens Axboe 1597b388d7SJens Axboe enum { 1697b388d7SJens Axboe IOU_OK = 0, 1797b388d7SJens Axboe IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, 18114eccdfSDylan Yudaken 19114eccdfSDylan Yudaken /* 20114eccdfSDylan Yudaken * Intended only when both REQ_F_POLLED and REQ_F_APOLL_MULTISHOT 21114eccdfSDylan Yudaken * are set to indicate to the poll runner that multishot should be 22114eccdfSDylan Yudaken * removed and the result is set on req->cqe.res. 23114eccdfSDylan Yudaken */ 24114eccdfSDylan Yudaken IOU_STOP_MULTISHOT = -ECANCELED, 2597b388d7SJens Axboe }; 2697b388d7SJens Axboe 27faf88ddeSPavel Begunkov struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx); 2868494a65SPavel Begunkov bool io_req_cqe_overflow(struct io_kiocb *req); 299046c641SPavel Begunkov int io_run_task_work_sig(void); 309046c641SPavel Begunkov void io_req_complete_failed(struct io_kiocb *req, s32 res); 319046c641SPavel Begunkov void __io_req_complete(struct io_kiocb *req, unsigned issue_flags); 329046c641SPavel Begunkov void io_req_complete_post(struct io_kiocb *req); 339046c641SPavel Begunkov void __io_req_complete_post(struct io_kiocb *req); 3452120f0fSDylan Yudaken bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, 3552120f0fSDylan Yudaken bool allow_overflow); 36*eb42cebbSPavel Begunkov bool io_fill_cqe_aux(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags, 37*eb42cebbSPavel Begunkov bool allow_overflow); 389046c641SPavel Begunkov void __io_commit_cqring_flush(struct io_ring_ctx *ctx); 399046c641SPavel Begunkov 409046c641SPavel Begunkov struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); 419046c641SPavel Begunkov 429046c641SPavel Begunkov struct file *io_file_get_normal(struct io_kiocb *req, int fd); 439046c641SPavel Begunkov struct file *io_file_get_fixed(struct io_kiocb *req, int fd, 449046c641SPavel Begunkov unsigned issue_flags); 459046c641SPavel Begunkov 46f6b543fdSJens Axboe static inline bool io_req_ffs_set(struct io_kiocb *req) 47f6b543fdSJens Axboe { 48f6b543fdSJens Axboe return req->flags & REQ_F_FIXED_FILE; 49f6b543fdSJens Axboe } 50f6b543fdSJens Axboe 519046c641SPavel Begunkov bool io_is_uring_fops(struct file *file); 529046c641SPavel Begunkov bool io_alloc_async_data(struct io_kiocb *req); 539046c641SPavel Begunkov void io_req_task_work_add(struct io_kiocb *req); 549046c641SPavel Begunkov void io_req_tw_post_queue(struct io_kiocb *req, s32 res, u32 cflags); 559046c641SPavel Begunkov void io_req_task_queue(struct io_kiocb *req); 569046c641SPavel Begunkov void io_queue_iowq(struct io_kiocb *req, bool *dont_use); 579046c641SPavel Begunkov void io_req_task_complete(struct io_kiocb *req, bool *locked); 589046c641SPavel Begunkov void io_req_task_queue_fail(struct io_kiocb *req, int ret); 599046c641SPavel Begunkov void io_req_task_submit(struct io_kiocb *req, bool *locked); 609046c641SPavel Begunkov void tctx_task_work(struct callback_head *cb); 619046c641SPavel Begunkov __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); 629046c641SPavel Begunkov int io_uring_alloc_task_context(struct task_struct *task, 639046c641SPavel Begunkov struct io_ring_ctx *ctx); 649046c641SPavel Begunkov 659046c641SPavel Begunkov int io_poll_issue(struct io_kiocb *req, bool *locked); 669046c641SPavel Begunkov int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); 679046c641SPavel Begunkov int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); 689046c641SPavel Begunkov void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node); 699046c641SPavel Begunkov int io_req_prep_async(struct io_kiocb *req); 709046c641SPavel Begunkov 719046c641SPavel Begunkov struct io_wq_work *io_wq_free_work(struct io_wq_work *work); 729046c641SPavel Begunkov void io_wq_submit_work(struct io_wq_work *work); 739046c641SPavel Begunkov 749046c641SPavel Begunkov void io_free_req(struct io_kiocb *req); 759046c641SPavel Begunkov void io_queue_next(struct io_kiocb *req); 76e70cb608SPavel Begunkov void __io_put_task(struct task_struct *task, int nr); 779046c641SPavel Begunkov 789046c641SPavel Begunkov bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, 799046c641SPavel Begunkov bool cancel_all); 809046c641SPavel Begunkov 819046c641SPavel Begunkov #define io_for_each_link(pos, head) \ 829046c641SPavel Begunkov for (pos = (head); pos; pos = pos->link) 83f3b44f92SJens Axboe 8425399321SPavel Begunkov static inline void io_cq_lock(struct io_ring_ctx *ctx) 8525399321SPavel Begunkov __acquires(ctx->completion_lock) 8625399321SPavel Begunkov { 8725399321SPavel Begunkov spin_lock(&ctx->completion_lock); 8825399321SPavel Begunkov } 8925399321SPavel Begunkov 9025399321SPavel Begunkov void io_cq_unlock_post(struct io_ring_ctx *ctx); 9125399321SPavel Begunkov 92f3b44f92SJens Axboe static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) 93f3b44f92SJens Axboe { 94f3b44f92SJens Axboe if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { 95f3b44f92SJens Axboe struct io_uring_cqe *cqe = ctx->cqe_cached; 96f3b44f92SJens Axboe 97f3b44f92SJens Axboe ctx->cached_cq_tail++; 98f3b44f92SJens Axboe ctx->cqe_cached++; 99b3659a65SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) 100b3659a65SPavel Begunkov ctx->cqe_cached++; 101f3b44f92SJens Axboe return cqe; 102f3b44f92SJens Axboe } 103f3b44f92SJens Axboe 104f3b44f92SJens Axboe return __io_get_cqe(ctx); 105f3b44f92SJens Axboe } 106f3b44f92SJens Axboe 107f3b44f92SJens Axboe static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, 108f3b44f92SJens Axboe struct io_kiocb *req) 109f3b44f92SJens Axboe { 110f3b44f92SJens Axboe struct io_uring_cqe *cqe; 111f3b44f92SJens Axboe 112f3b44f92SJens Axboe /* 113f3b44f92SJens Axboe * If we can't get a cq entry, userspace overflowed the 114f3b44f92SJens Axboe * submission (by quite a lot). Increment the overflow count in 115f3b44f92SJens Axboe * the ring. 116f3b44f92SJens Axboe */ 117f3b44f92SJens Axboe cqe = io_get_cqe(ctx); 118e8c328c3SPavel Begunkov if (unlikely(!cqe)) 119e8c328c3SPavel Begunkov return io_req_cqe_overflow(req); 120e0486f3fSDylan Yudaken 121e0486f3fSDylan Yudaken trace_io_uring_complete(req->ctx, req, req->cqe.user_data, 122e0486f3fSDylan Yudaken req->cqe.res, req->cqe.flags, 123e0486f3fSDylan Yudaken (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0, 124e0486f3fSDylan Yudaken (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0); 125e0486f3fSDylan Yudaken 126f3b44f92SJens Axboe memcpy(cqe, &req->cqe, sizeof(*cqe)); 127e8c328c3SPavel Begunkov 128e8c328c3SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) { 129f3b44f92SJens Axboe u64 extra1 = 0, extra2 = 0; 130f3b44f92SJens Axboe 131f3b44f92SJens Axboe if (req->flags & REQ_F_CQE32_INIT) { 132f3b44f92SJens Axboe extra1 = req->extra1; 133f3b44f92SJens Axboe extra2 = req->extra2; 134f3b44f92SJens Axboe } 135f3b44f92SJens Axboe 136f3b44f92SJens Axboe WRITE_ONCE(cqe->big_cqe[0], extra1); 137f3b44f92SJens Axboe WRITE_ONCE(cqe->big_cqe[1], extra2); 138e8c328c3SPavel Begunkov } 139f3b44f92SJens Axboe return true; 140f3b44f92SJens Axboe } 141f3b44f92SJens Axboe 142531113bbSJens Axboe static inline void req_set_fail(struct io_kiocb *req) 143531113bbSJens Axboe { 144531113bbSJens Axboe req->flags |= REQ_F_FAIL; 145531113bbSJens Axboe if (req->flags & REQ_F_CQE_SKIP) { 146531113bbSJens Axboe req->flags &= ~REQ_F_CQE_SKIP; 147531113bbSJens Axboe req->flags |= REQ_F_SKIP_LINK_CQES; 148531113bbSJens Axboe } 149531113bbSJens Axboe } 150531113bbSJens Axboe 151de23077eSJens Axboe static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) 152de23077eSJens Axboe { 153de23077eSJens Axboe req->cqe.res = res; 154de23077eSJens Axboe req->cqe.flags = cflags; 155de23077eSJens Axboe } 156de23077eSJens Axboe 15799f15d8dSJens Axboe static inline bool req_has_async_data(struct io_kiocb *req) 15899f15d8dSJens Axboe { 15999f15d8dSJens Axboe return req->flags & REQ_F_ASYNC_DATA; 16099f15d8dSJens Axboe } 16199f15d8dSJens Axboe 162531113bbSJens Axboe static inline void io_put_file(struct file *file) 163531113bbSJens Axboe { 164531113bbSJens Axboe if (file) 165531113bbSJens Axboe fput(file); 166531113bbSJens Axboe } 167531113bbSJens Axboe 168cd40cae2SJens Axboe static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, 169cd40cae2SJens Axboe unsigned issue_flags) 170cd40cae2SJens Axboe { 171cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 172cd40cae2SJens Axboe if (issue_flags & IO_URING_F_UNLOCKED) 173cd40cae2SJens Axboe mutex_unlock(&ctx->uring_lock); 174cd40cae2SJens Axboe } 175cd40cae2SJens Axboe 176cd40cae2SJens Axboe static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, 177cd40cae2SJens Axboe unsigned issue_flags) 178cd40cae2SJens Axboe { 179cd40cae2SJens Axboe /* 180cd40cae2SJens Axboe * "Normal" inline submissions always hold the uring_lock, since we 181cd40cae2SJens Axboe * grab it from the system call. Same is true for the SQPOLL offload. 182cd40cae2SJens Axboe * The only exception is when we've detached the request and issue it 183cd40cae2SJens Axboe * from an async worker thread, grab the lock for that case. 184cd40cae2SJens Axboe */ 185cd40cae2SJens Axboe if (issue_flags & IO_URING_F_UNLOCKED) 186cd40cae2SJens Axboe mutex_lock(&ctx->uring_lock); 187cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 188cd40cae2SJens Axboe } 189cd40cae2SJens Axboe 190f9ead18cSJens Axboe static inline void io_commit_cqring(struct io_ring_ctx *ctx) 191f9ead18cSJens Axboe { 192f9ead18cSJens Axboe /* order cqe stores with ring update */ 193f9ead18cSJens Axboe smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); 194f9ead18cSJens Axboe } 195f9ead18cSJens Axboe 196f3b44f92SJens Axboe static inline void io_cqring_wake(struct io_ring_ctx *ctx) 197f3b44f92SJens Axboe { 198f3b44f92SJens Axboe /* 199f3b44f92SJens Axboe * wake_up_all() may seem excessive, but io_wake_function() and 200f3b44f92SJens Axboe * io_should_wake() handle the termination of the loop and only 201f3b44f92SJens Axboe * wake as many waiters as we need to. 202f3b44f92SJens Axboe */ 203f3b44f92SJens Axboe if (wq_has_sleeper(&ctx->cq_wait)) 204f3b44f92SJens Axboe wake_up_all(&ctx->cq_wait); 205f3b44f92SJens Axboe } 206f3b44f92SJens Axboe 20717437f31SJens Axboe static inline bool io_sqring_full(struct io_ring_ctx *ctx) 20817437f31SJens Axboe { 20917437f31SJens Axboe struct io_rings *r = ctx->rings; 21017437f31SJens Axboe 21117437f31SJens Axboe return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; 21217437f31SJens Axboe } 21317437f31SJens Axboe 21417437f31SJens Axboe static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) 21517437f31SJens Axboe { 21617437f31SJens Axboe struct io_rings *rings = ctx->rings; 21717437f31SJens Axboe 21817437f31SJens Axboe /* make sure SQ entry isn't read before tail */ 21917437f31SJens Axboe return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; 22017437f31SJens Axboe } 22117437f31SJens Axboe 22217437f31SJens Axboe static inline bool io_run_task_work(void) 22317437f31SJens Axboe { 224625d38b3SPavel Begunkov if (test_thread_flag(TIF_NOTIFY_SIGNAL)) { 22517437f31SJens Axboe __set_current_state(TASK_RUNNING); 22617437f31SJens Axboe clear_notify_signal(); 22717437f31SJens Axboe if (task_work_pending(current)) 22817437f31SJens Axboe task_work_run(); 22917437f31SJens Axboe return true; 23017437f31SJens Axboe } 23117437f31SJens Axboe 23217437f31SJens Axboe return false; 23317437f31SJens Axboe } 23417437f31SJens Axboe 235aa1e90f6SPavel Begunkov static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) 236aa1e90f6SPavel Begunkov { 237aa1e90f6SPavel Begunkov if (!*locked) { 238aa1e90f6SPavel Begunkov mutex_lock(&ctx->uring_lock); 239aa1e90f6SPavel Begunkov *locked = true; 240aa1e90f6SPavel Begunkov } 241aa1e90f6SPavel Begunkov } 242aa1e90f6SPavel Begunkov 2439da070b1SPavel Begunkov /* 2449da070b1SPavel Begunkov * Don't complete immediately but use deferred completion infrastructure. 2459da070b1SPavel Begunkov * Protected by ->uring_lock and can only be used either with 2469da070b1SPavel Begunkov * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. 2479da070b1SPavel Begunkov */ 2489da070b1SPavel Begunkov static inline void io_req_complete_defer(struct io_kiocb *req) 2499da070b1SPavel Begunkov __must_hold(&req->ctx->uring_lock) 250aa1e90f6SPavel Begunkov { 251aa1e90f6SPavel Begunkov struct io_submit_state *state = &req->ctx->submit_state; 252aa1e90f6SPavel Begunkov 2539da070b1SPavel Begunkov lockdep_assert_held(&req->ctx->uring_lock); 2549da070b1SPavel Begunkov 255aa1e90f6SPavel Begunkov wq_list_add_tail(&req->comp_list, &state->compl_reqs); 256aa1e90f6SPavel Begunkov } 257aa1e90f6SPavel Begunkov 25846929b08SPavel Begunkov static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) 25946929b08SPavel Begunkov { 26046929b08SPavel Begunkov if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd)) 26146929b08SPavel Begunkov __io_commit_cqring_flush(ctx); 26246929b08SPavel Begunkov } 26346929b08SPavel Begunkov 264e70cb608SPavel Begunkov /* must to be called somewhat shortly after putting a request */ 265e70cb608SPavel Begunkov static inline void io_put_task(struct task_struct *task, int nr) 266e70cb608SPavel Begunkov { 267e70cb608SPavel Begunkov if (likely(task == current)) 268e70cb608SPavel Begunkov task->io_uring->cached_refs += nr; 269e70cb608SPavel Begunkov else 270e70cb608SPavel Begunkov __io_put_task(task, nr); 271e70cb608SPavel Begunkov } 272e70cb608SPavel Begunkov 273de23077eSJens Axboe #endif 274