1de23077eSJens Axboe #ifndef IOU_CORE_H 2de23077eSJens Axboe #define IOU_CORE_H 3de23077eSJens Axboe 4de23077eSJens Axboe #include <linux/errno.h> 5cd40cae2SJens Axboe #include <linux/lockdep.h> 6ab1c84d8SPavel Begunkov #include <linux/io_uring_types.h> 744648532SJens Axboe #include <uapi/linux/eventpoll.h> 8ab1c84d8SPavel Begunkov #include "io-wq.h" 9a6b21fbbSPavel Begunkov #include "slist.h" 10ab1c84d8SPavel Begunkov #include "filetable.h" 11de23077eSJens Axboe 12f3b44f92SJens Axboe #ifndef CREATE_TRACE_POINTS 13f3b44f92SJens Axboe #include <trace/events/io_uring.h> 14f3b44f92SJens Axboe #endif 15f3b44f92SJens Axboe 1697b388d7SJens Axboe enum { 1797b388d7SJens Axboe IOU_OK = 0, 1897b388d7SJens Axboe IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, 19114eccdfSDylan Yudaken 20114eccdfSDylan Yudaken /* 2191482864SPavel Begunkov * Intended only when both IO_URING_F_MULTISHOT is passed 2291482864SPavel Begunkov * to indicate to the poll runner that multishot should be 23114eccdfSDylan Yudaken * removed and the result is set on req->cqe.res. 24114eccdfSDylan Yudaken */ 25114eccdfSDylan Yudaken IOU_STOP_MULTISHOT = -ECANCELED, 2697b388d7SJens Axboe }; 2797b388d7SJens Axboe 28aa1df3a3SPavel Begunkov struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow); 2968494a65SPavel Begunkov bool io_req_cqe_overflow(struct io_kiocb *req); 30c0e0d6baSDylan Yudaken int io_run_task_work_sig(struct io_ring_ctx *ctx); 31b3026767SDylan Yudaken int __io_run_local_work(struct io_ring_ctx *ctx, bool *locked); 32c0e0d6baSDylan Yudaken int io_run_local_work(struct io_ring_ctx *ctx); 33973fc83fSDylan Yudaken void io_req_defer_failed(struct io_kiocb *req, s32 res); 341bec951cSPavel Begunkov void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); 35b529c96aSDylan Yudaken bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); 369b8c5475SDylan Yudaken bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags, 37eb42cebbSPavel Begunkov bool allow_overflow); 389046c641SPavel Begunkov void __io_commit_cqring_flush(struct io_ring_ctx *ctx); 399046c641SPavel Begunkov 409046c641SPavel Begunkov struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); 419046c641SPavel Begunkov 429046c641SPavel Begunkov struct file *io_file_get_normal(struct io_kiocb *req, int fd); 439046c641SPavel Begunkov struct file *io_file_get_fixed(struct io_kiocb *req, int fd, 449046c641SPavel Begunkov unsigned issue_flags); 459046c641SPavel Begunkov 46f6b543fdSJens Axboe static inline bool io_req_ffs_set(struct io_kiocb *req) 47f6b543fdSJens Axboe { 48f6b543fdSJens Axboe return req->flags & REQ_F_FIXED_FILE; 49f6b543fdSJens Axboe } 50f6b543fdSJens Axboe 51e52d2e58SPavel Begunkov void __io_req_task_work_add(struct io_kiocb *req, bool allow_local); 529046c641SPavel Begunkov bool io_is_uring_fops(struct file *file); 539046c641SPavel Begunkov bool io_alloc_async_data(struct io_kiocb *req); 549046c641SPavel Begunkov void io_req_task_queue(struct io_kiocb *req); 559046c641SPavel Begunkov void io_queue_iowq(struct io_kiocb *req, bool *dont_use); 569046c641SPavel Begunkov void io_req_task_complete(struct io_kiocb *req, bool *locked); 579046c641SPavel Begunkov void io_req_task_queue_fail(struct io_kiocb *req, int ret); 589046c641SPavel Begunkov void io_req_task_submit(struct io_kiocb *req, bool *locked); 599046c641SPavel Begunkov void tctx_task_work(struct callback_head *cb); 609046c641SPavel Begunkov __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); 619046c641SPavel Begunkov int io_uring_alloc_task_context(struct task_struct *task, 629046c641SPavel Begunkov struct io_ring_ctx *ctx); 639046c641SPavel Begunkov 649046c641SPavel Begunkov int io_poll_issue(struct io_kiocb *req, bool *locked); 659046c641SPavel Begunkov int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); 669046c641SPavel Begunkov int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); 679046c641SPavel Begunkov void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node); 689046c641SPavel Begunkov int io_req_prep_async(struct io_kiocb *req); 699046c641SPavel Begunkov 709046c641SPavel Begunkov struct io_wq_work *io_wq_free_work(struct io_wq_work *work); 719046c641SPavel Begunkov void io_wq_submit_work(struct io_wq_work *work); 729046c641SPavel Begunkov 739046c641SPavel Begunkov void io_free_req(struct io_kiocb *req); 749046c641SPavel Begunkov void io_queue_next(struct io_kiocb *req); 75e70cb608SPavel Begunkov void __io_put_task(struct task_struct *task, int nr); 7663809137SPavel Begunkov void io_task_refs_refill(struct io_uring_task *tctx); 77bd1a3783SPavel Begunkov bool __io_alloc_req_refill(struct io_ring_ctx *ctx); 789046c641SPavel Begunkov 799046c641SPavel Begunkov bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, 809046c641SPavel Begunkov bool cancel_all); 819046c641SPavel Begunkov 82f26cc959SPavel Begunkov #define io_lockdep_assert_cq_locked(ctx) \ 83f26cc959SPavel Begunkov do { \ 84f26cc959SPavel Begunkov if (ctx->flags & IORING_SETUP_IOPOLL) { \ 85f26cc959SPavel Begunkov lockdep_assert_held(&ctx->uring_lock); \ 86f26cc959SPavel Begunkov } else if (!ctx->task_complete) { \ 87f26cc959SPavel Begunkov lockdep_assert_held(&ctx->completion_lock); \ 88f26cc959SPavel Begunkov } else if (ctx->submitter_task->flags & PF_EXITING) { \ 89f26cc959SPavel Begunkov lockdep_assert(current_work()); \ 90f26cc959SPavel Begunkov } else { \ 91f26cc959SPavel Begunkov lockdep_assert(current == ctx->submitter_task); \ 92f26cc959SPavel Begunkov } \ 93f26cc959SPavel Begunkov } while (0) 94f26cc959SPavel Begunkov 95e52d2e58SPavel Begunkov static inline void io_req_task_work_add(struct io_kiocb *req) 96e52d2e58SPavel Begunkov { 97e52d2e58SPavel Begunkov __io_req_task_work_add(req, true); 98e52d2e58SPavel Begunkov } 99e52d2e58SPavel Begunkov 1009046c641SPavel Begunkov #define io_for_each_link(pos, head) \ 1019046c641SPavel Begunkov for (pos = (head); pos; pos = pos->link) 102f3b44f92SJens Axboe 10325399321SPavel Begunkov void io_cq_unlock_post(struct io_ring_ctx *ctx); 10425399321SPavel Begunkov 105aa1df3a3SPavel Begunkov static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx, 106aa1df3a3SPavel Begunkov bool overflow) 107f3b44f92SJens Axboe { 108f26cc959SPavel Begunkov io_lockdep_assert_cq_locked(ctx); 109f26cc959SPavel Begunkov 110f3b44f92SJens Axboe if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { 111f3b44f92SJens Axboe struct io_uring_cqe *cqe = ctx->cqe_cached; 112f3b44f92SJens Axboe 113f3b44f92SJens Axboe ctx->cached_cq_tail++; 114f3b44f92SJens Axboe ctx->cqe_cached++; 115b3659a65SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) 116b3659a65SPavel Begunkov ctx->cqe_cached++; 117f3b44f92SJens Axboe return cqe; 118f3b44f92SJens Axboe } 119f3b44f92SJens Axboe 120aa1df3a3SPavel Begunkov return __io_get_cqe(ctx, overflow); 121aa1df3a3SPavel Begunkov } 122aa1df3a3SPavel Begunkov 123aa1df3a3SPavel Begunkov static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) 124aa1df3a3SPavel Begunkov { 125aa1df3a3SPavel Begunkov return io_get_cqe_overflow(ctx, false); 126f3b44f92SJens Axboe } 127f3b44f92SJens Axboe 128f3b44f92SJens Axboe static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, 129f3b44f92SJens Axboe struct io_kiocb *req) 130f3b44f92SJens Axboe { 131f3b44f92SJens Axboe struct io_uring_cqe *cqe; 132f3b44f92SJens Axboe 133f3b44f92SJens Axboe /* 134f3b44f92SJens Axboe * If we can't get a cq entry, userspace overflowed the 135f3b44f92SJens Axboe * submission (by quite a lot). Increment the overflow count in 136f3b44f92SJens Axboe * the ring. 137f3b44f92SJens Axboe */ 138f3b44f92SJens Axboe cqe = io_get_cqe(ctx); 139e8c328c3SPavel Begunkov if (unlikely(!cqe)) 140f66f7342SPavel Begunkov return false; 141e0486f3fSDylan Yudaken 142e0486f3fSDylan Yudaken trace_io_uring_complete(req->ctx, req, req->cqe.user_data, 143e0486f3fSDylan Yudaken req->cqe.res, req->cqe.flags, 144e0486f3fSDylan Yudaken (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0, 145e0486f3fSDylan Yudaken (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0); 146e0486f3fSDylan Yudaken 147f3b44f92SJens Axboe memcpy(cqe, &req->cqe, sizeof(*cqe)); 148e8c328c3SPavel Begunkov 149e8c328c3SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) { 150f3b44f92SJens Axboe u64 extra1 = 0, extra2 = 0; 151f3b44f92SJens Axboe 152f3b44f92SJens Axboe if (req->flags & REQ_F_CQE32_INIT) { 153f3b44f92SJens Axboe extra1 = req->extra1; 154f3b44f92SJens Axboe extra2 = req->extra2; 155f3b44f92SJens Axboe } 156f3b44f92SJens Axboe 157f3b44f92SJens Axboe WRITE_ONCE(cqe->big_cqe[0], extra1); 158f3b44f92SJens Axboe WRITE_ONCE(cqe->big_cqe[1], extra2); 159e8c328c3SPavel Begunkov } 160f3b44f92SJens Axboe return true; 161f3b44f92SJens Axboe } 162f3b44f92SJens Axboe 163f66f7342SPavel Begunkov static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, 164f66f7342SPavel Begunkov struct io_kiocb *req) 165f66f7342SPavel Begunkov { 166f66f7342SPavel Begunkov if (likely(__io_fill_cqe_req(ctx, req))) 167f66f7342SPavel Begunkov return true; 168f66f7342SPavel Begunkov return io_req_cqe_overflow(req); 169f66f7342SPavel Begunkov } 170f66f7342SPavel Begunkov 171531113bbSJens Axboe static inline void req_set_fail(struct io_kiocb *req) 172531113bbSJens Axboe { 173531113bbSJens Axboe req->flags |= REQ_F_FAIL; 174531113bbSJens Axboe if (req->flags & REQ_F_CQE_SKIP) { 175531113bbSJens Axboe req->flags &= ~REQ_F_CQE_SKIP; 176531113bbSJens Axboe req->flags |= REQ_F_SKIP_LINK_CQES; 177531113bbSJens Axboe } 178531113bbSJens Axboe } 179531113bbSJens Axboe 180de23077eSJens Axboe static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) 181de23077eSJens Axboe { 182de23077eSJens Axboe req->cqe.res = res; 183de23077eSJens Axboe req->cqe.flags = cflags; 184de23077eSJens Axboe } 185de23077eSJens Axboe 18699f15d8dSJens Axboe static inline bool req_has_async_data(struct io_kiocb *req) 18799f15d8dSJens Axboe { 18899f15d8dSJens Axboe return req->flags & REQ_F_ASYNC_DATA; 18999f15d8dSJens Axboe } 19099f15d8dSJens Axboe 191531113bbSJens Axboe static inline void io_put_file(struct file *file) 192531113bbSJens Axboe { 193531113bbSJens Axboe if (file) 194531113bbSJens Axboe fput(file); 195531113bbSJens Axboe } 196531113bbSJens Axboe 197cd40cae2SJens Axboe static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, 198cd40cae2SJens Axboe unsigned issue_flags) 199cd40cae2SJens Axboe { 200cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 201cd40cae2SJens Axboe if (issue_flags & IO_URING_F_UNLOCKED) 202cd40cae2SJens Axboe mutex_unlock(&ctx->uring_lock); 203cd40cae2SJens Axboe } 204cd40cae2SJens Axboe 205cd40cae2SJens Axboe static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, 206cd40cae2SJens Axboe unsigned issue_flags) 207cd40cae2SJens Axboe { 208cd40cae2SJens Axboe /* 209cd40cae2SJens Axboe * "Normal" inline submissions always hold the uring_lock, since we 210cd40cae2SJens Axboe * grab it from the system call. Same is true for the SQPOLL offload. 211cd40cae2SJens Axboe * The only exception is when we've detached the request and issue it 212cd40cae2SJens Axboe * from an async worker thread, grab the lock for that case. 213cd40cae2SJens Axboe */ 214cd40cae2SJens Axboe if (issue_flags & IO_URING_F_UNLOCKED) 215cd40cae2SJens Axboe mutex_lock(&ctx->uring_lock); 216cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 217cd40cae2SJens Axboe } 218cd40cae2SJens Axboe 219f9ead18cSJens Axboe static inline void io_commit_cqring(struct io_ring_ctx *ctx) 220f9ead18cSJens Axboe { 221f9ead18cSJens Axboe /* order cqe stores with ring update */ 222f9ead18cSJens Axboe smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); 223f9ead18cSJens Axboe } 224f9ead18cSJens Axboe 225fc86f9d3SPavel Begunkov /* requires smb_mb() prior, see wq_has_sleeper() */ 226fc86f9d3SPavel Begunkov static inline void __io_cqring_wake(struct io_ring_ctx *ctx) 227f3b44f92SJens Axboe { 228f3b44f92SJens Axboe /* 22944648532SJens Axboe * Trigger waitqueue handler on all waiters on our waitqueue. This 23044648532SJens Axboe * won't necessarily wake up all the tasks, io_should_wake() will make 23144648532SJens Axboe * that decision. 23244648532SJens Axboe * 23344648532SJens Axboe * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter 23444648532SJens Axboe * set in the mask so that if we recurse back into our own poll 23544648532SJens Axboe * waitqueue handlers, we know we have a dependency between eventfd or 23644648532SJens Axboe * epoll and should terminate multishot poll at that point. 237f3b44f92SJens Axboe */ 238fc86f9d3SPavel Begunkov if (waitqueue_active(&ctx->cq_wait)) 23944648532SJens Axboe __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, 24044648532SJens Axboe poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); 241f3b44f92SJens Axboe } 242f3b44f92SJens Axboe 243fc86f9d3SPavel Begunkov static inline void io_cqring_wake(struct io_ring_ctx *ctx) 244fc86f9d3SPavel Begunkov { 245fc86f9d3SPavel Begunkov smp_mb(); 246fc86f9d3SPavel Begunkov __io_cqring_wake(ctx); 247fc86f9d3SPavel Begunkov } 248fc86f9d3SPavel Begunkov 24917437f31SJens Axboe static inline bool io_sqring_full(struct io_ring_ctx *ctx) 25017437f31SJens Axboe { 25117437f31SJens Axboe struct io_rings *r = ctx->rings; 25217437f31SJens Axboe 25317437f31SJens Axboe return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; 25417437f31SJens Axboe } 25517437f31SJens Axboe 25617437f31SJens Axboe static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) 25717437f31SJens Axboe { 25817437f31SJens Axboe struct io_rings *rings = ctx->rings; 25917437f31SJens Axboe 26017437f31SJens Axboe /* make sure SQ entry isn't read before tail */ 26117437f31SJens Axboe return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; 26217437f31SJens Axboe } 26317437f31SJens Axboe 264c0e0d6baSDylan Yudaken static inline int io_run_task_work(void) 26517437f31SJens Axboe { 2667cfe7a09SJens Axboe /* 2677cfe7a09SJens Axboe * Always check-and-clear the task_work notification signal. With how 2687cfe7a09SJens Axboe * signaling works for task_work, we can find it set with nothing to 2697cfe7a09SJens Axboe * run. We need to clear it for that case, like get_signal() does. 2707cfe7a09SJens Axboe */ 27146a525e1SJens Axboe if (test_thread_flag(TIF_NOTIFY_SIGNAL)) 27217437f31SJens Axboe clear_notify_signal(); 2737cfe7a09SJens Axboe if (task_work_pending(current)) { 27446a525e1SJens Axboe __set_current_state(TASK_RUNNING); 27517437f31SJens Axboe task_work_run(); 276c0e0d6baSDylan Yudaken return 1; 27717437f31SJens Axboe } 27817437f31SJens Axboe 279c0e0d6baSDylan Yudaken return 0; 280c0e0d6baSDylan Yudaken } 281c0e0d6baSDylan Yudaken 282dac6a0eaSJens Axboe static inline bool io_task_work_pending(struct io_ring_ctx *ctx) 283dac6a0eaSJens Axboe { 2846434ec01SJens Axboe return task_work_pending(current) || !wq_list_empty(&ctx->work_llist); 285dac6a0eaSJens Axboe } 286dac6a0eaSJens Axboe 28744f87745SPavel Begunkov static inline int io_run_local_work_locked(struct io_ring_ctx *ctx) 28844f87745SPavel Begunkov { 289b3026767SDylan Yudaken bool locked; 290b3026767SDylan Yudaken int ret; 291b3026767SDylan Yudaken 29244f87745SPavel Begunkov if (llist_empty(&ctx->work_llist)) 29344f87745SPavel Begunkov return 0; 294b3026767SDylan Yudaken 295b3026767SDylan Yudaken locked = true; 296b3026767SDylan Yudaken ret = __io_run_local_work(ctx, &locked); 297b3026767SDylan Yudaken /* shouldn't happen! */ 298b3026767SDylan Yudaken if (WARN_ON_ONCE(!locked)) 299b3026767SDylan Yudaken mutex_lock(&ctx->uring_lock); 300b3026767SDylan Yudaken return ret; 30144f87745SPavel Begunkov } 30244f87745SPavel Begunkov 303aa1e90f6SPavel Begunkov static inline void io_tw_lock(struct io_ring_ctx *ctx, bool *locked) 304aa1e90f6SPavel Begunkov { 305aa1e90f6SPavel Begunkov if (!*locked) { 306aa1e90f6SPavel Begunkov mutex_lock(&ctx->uring_lock); 307aa1e90f6SPavel Begunkov *locked = true; 308aa1e90f6SPavel Begunkov } 309aa1e90f6SPavel Begunkov } 310aa1e90f6SPavel Begunkov 3119da070b1SPavel Begunkov /* 3129da070b1SPavel Begunkov * Don't complete immediately but use deferred completion infrastructure. 3139da070b1SPavel Begunkov * Protected by ->uring_lock and can only be used either with 3149da070b1SPavel Begunkov * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. 3159da070b1SPavel Begunkov */ 3169da070b1SPavel Begunkov static inline void io_req_complete_defer(struct io_kiocb *req) 3179da070b1SPavel Begunkov __must_hold(&req->ctx->uring_lock) 318aa1e90f6SPavel Begunkov { 319aa1e90f6SPavel Begunkov struct io_submit_state *state = &req->ctx->submit_state; 320aa1e90f6SPavel Begunkov 3219da070b1SPavel Begunkov lockdep_assert_held(&req->ctx->uring_lock); 3229da070b1SPavel Begunkov 323aa1e90f6SPavel Begunkov wq_list_add_tail(&req->comp_list, &state->compl_reqs); 324aa1e90f6SPavel Begunkov } 325aa1e90f6SPavel Begunkov 32646929b08SPavel Begunkov static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) 32746929b08SPavel Begunkov { 32846929b08SPavel Begunkov if (unlikely(ctx->off_timeout_used || ctx->drain_active || ctx->has_evfd)) 32946929b08SPavel Begunkov __io_commit_cqring_flush(ctx); 33046929b08SPavel Begunkov } 33146929b08SPavel Begunkov 332e70cb608SPavel Begunkov /* must to be called somewhat shortly after putting a request */ 333e70cb608SPavel Begunkov static inline void io_put_task(struct task_struct *task, int nr) 334e70cb608SPavel Begunkov { 335e70cb608SPavel Begunkov if (likely(task == current)) 336e70cb608SPavel Begunkov task->io_uring->cached_refs += nr; 337e70cb608SPavel Begunkov else 338e70cb608SPavel Begunkov __io_put_task(task, nr); 339e70cb608SPavel Begunkov } 340e70cb608SPavel Begunkov 34163809137SPavel Begunkov static inline void io_get_task_refs(int nr) 34263809137SPavel Begunkov { 34363809137SPavel Begunkov struct io_uring_task *tctx = current->io_uring; 34463809137SPavel Begunkov 34563809137SPavel Begunkov tctx->cached_refs -= nr; 34663809137SPavel Begunkov if (unlikely(tctx->cached_refs < 0)) 34763809137SPavel Begunkov io_task_refs_refill(tctx); 34863809137SPavel Begunkov } 34963809137SPavel Begunkov 350bd1a3783SPavel Begunkov static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) 351bd1a3783SPavel Begunkov { 352bd1a3783SPavel Begunkov return !ctx->submit_state.free_list.next; 353bd1a3783SPavel Begunkov } 354bd1a3783SPavel Begunkov 355bd1a3783SPavel Begunkov static inline bool io_alloc_req_refill(struct io_ring_ctx *ctx) 356bd1a3783SPavel Begunkov { 357bd1a3783SPavel Begunkov if (unlikely(io_req_cache_empty(ctx))) 358bd1a3783SPavel Begunkov return __io_alloc_req_refill(ctx); 359bd1a3783SPavel Begunkov return true; 360bd1a3783SPavel Begunkov } 361bd1a3783SPavel Begunkov 362bd1a3783SPavel Begunkov static inline struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx) 363bd1a3783SPavel Begunkov { 364bd1a3783SPavel Begunkov struct io_wq_work_node *node; 365bd1a3783SPavel Begunkov 366bd1a3783SPavel Begunkov node = wq_stack_extract(&ctx->submit_state.free_list); 367bd1a3783SPavel Begunkov return container_of(node, struct io_kiocb, comp_list); 368bd1a3783SPavel Begunkov } 369bd1a3783SPavel Begunkov 370*140102aeSPavel Begunkov static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) 371*140102aeSPavel Begunkov { 372*140102aeSPavel Begunkov return likely(ctx->submitter_task == current); 373*140102aeSPavel Begunkov } 374*140102aeSPavel Begunkov 37576de6749SPavel Begunkov static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) 37676de6749SPavel Begunkov { 3776567506bSPavel Begunkov return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || 3786567506bSPavel Begunkov ctx->submitter_task == current); 37976de6749SPavel Begunkov } 38076de6749SPavel Begunkov 381833b5dffSPavel Begunkov static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) 382833b5dffSPavel Begunkov { 383833b5dffSPavel Begunkov io_req_set_res(req, res, 0); 384833b5dffSPavel Begunkov req->io_task_work.func = io_req_task_complete; 385833b5dffSPavel Begunkov io_req_task_work_add(req); 386833b5dffSPavel Begunkov } 387833b5dffSPavel Begunkov 388de23077eSJens Axboe #endif 389