1de23077eSJens Axboe #ifndef IOU_CORE_H 2de23077eSJens Axboe #define IOU_CORE_H 3de23077eSJens Axboe 4de23077eSJens Axboe #include <linux/errno.h> 5cd40cae2SJens Axboe #include <linux/lockdep.h> 6b5d3ae20SJens Axboe #include <linux/resume_user_mode.h> 7c1755c25SBreno Leitao #include <linux/kasan.h> 8ab1c84d8SPavel Begunkov #include <linux/io_uring_types.h> 944648532SJens Axboe #include <uapi/linux/eventpoll.h> 10ab1c84d8SPavel Begunkov #include "io-wq.h" 11a6b21fbbSPavel Begunkov #include "slist.h" 12ab1c84d8SPavel Begunkov #include "filetable.h" 13de23077eSJens Axboe 14f3b44f92SJens Axboe #ifndef CREATE_TRACE_POINTS 15f3b44f92SJens Axboe #include <trace/events/io_uring.h> 16f3b44f92SJens Axboe #endif 17f3b44f92SJens Axboe 1897b388d7SJens Axboe enum { 198751d154SPavel Begunkov /* 208751d154SPavel Begunkov * A hint to not wake right away but delay until there are enough of 218751d154SPavel Begunkov * tw's queued to match the number of CQEs the task is waiting for. 228751d154SPavel Begunkov * 238751d154SPavel Begunkov * Must not be used wirh requests generating more than one CQE. 248751d154SPavel Begunkov * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set. 258751d154SPavel Begunkov */ 2691c7884aSPavel Begunkov IOU_F_TWQ_LAZY_WAKE = 1, 278501fe70SPavel Begunkov }; 288501fe70SPavel Begunkov 298501fe70SPavel Begunkov enum { 3097b388d7SJens Axboe IOU_OK = 0, 3197b388d7SJens Axboe IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, 32114eccdfSDylan Yudaken 33114eccdfSDylan Yudaken /* 3491482864SPavel Begunkov * Intended only when both IO_URING_F_MULTISHOT is passed 3591482864SPavel Begunkov * to indicate to the poll runner that multishot should be 36114eccdfSDylan Yudaken * removed and the result is set on req->cqe.res. 37114eccdfSDylan Yudaken */ 38114eccdfSDylan Yudaken IOU_STOP_MULTISHOT = -ECANCELED, 3997b388d7SJens Axboe }; 4097b388d7SJens Axboe 4120d6b633SPavel Begunkov bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow); 42056695bfSPavel Begunkov void io_req_cqe_overflow(struct io_kiocb *req); 43c0e0d6baSDylan Yudaken int io_run_task_work_sig(struct io_ring_ctx *ctx); 44973fc83fSDylan Yudaken void io_req_defer_failed(struct io_kiocb *req, s32 res); 451bec951cSPavel Begunkov void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); 46b529c96aSDylan Yudaken bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); 47b6b2bb58SPavel Begunkov bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags); 489046c641SPavel Begunkov void __io_commit_cqring_flush(struct io_ring_ctx *ctx); 499046c641SPavel Begunkov 509046c641SPavel Begunkov struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); 519046c641SPavel Begunkov 529046c641SPavel Begunkov struct file *io_file_get_normal(struct io_kiocb *req, int fd); 539046c641SPavel Begunkov struct file *io_file_get_fixed(struct io_kiocb *req, int fd, 549046c641SPavel Begunkov unsigned issue_flags); 559046c641SPavel Begunkov 568501fe70SPavel Begunkov void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); 579046c641SPavel Begunkov bool io_is_uring_fops(struct file *file); 589046c641SPavel Begunkov bool io_alloc_async_data(struct io_kiocb *req); 599046c641SPavel Begunkov void io_req_task_queue(struct io_kiocb *req); 60a282967cSPavel Begunkov void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use); 61a282967cSPavel Begunkov void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts); 629046c641SPavel Begunkov void io_req_task_queue_fail(struct io_kiocb *req, int ret); 63a282967cSPavel Begunkov void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts); 649046c641SPavel Begunkov void tctx_task_work(struct callback_head *cb); 659046c641SPavel Begunkov __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); 669046c641SPavel Begunkov int io_uring_alloc_task_context(struct task_struct *task, 679046c641SPavel Begunkov struct io_ring_ctx *ctx); 689046c641SPavel Begunkov 696e76ac59SJosh Triplett int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, 706e76ac59SJosh Triplett int start, int end); 716e76ac59SJosh Triplett 72a282967cSPavel Begunkov int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts); 739046c641SPavel Begunkov int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); 749046c641SPavel Begunkov int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); 75ec26c225SPavel Begunkov void __io_submit_flush_completions(struct io_ring_ctx *ctx); 769046c641SPavel Begunkov int io_req_prep_async(struct io_kiocb *req); 779046c641SPavel Begunkov 789046c641SPavel Begunkov struct io_wq_work *io_wq_free_work(struct io_wq_work *work); 799046c641SPavel Begunkov void io_wq_submit_work(struct io_wq_work *work); 809046c641SPavel Begunkov 819046c641SPavel Begunkov void io_free_req(struct io_kiocb *req); 829046c641SPavel Begunkov void io_queue_next(struct io_kiocb *req); 8363809137SPavel Begunkov void io_task_refs_refill(struct io_uring_task *tctx); 84bd1a3783SPavel Begunkov bool __io_alloc_req_refill(struct io_ring_ctx *ctx); 859046c641SPavel Begunkov 869046c641SPavel Begunkov bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, 879046c641SPavel Begunkov bool cancel_all); 889046c641SPavel Begunkov 89*4ca5f54aSJens Axboe void *io_mem_alloc(size_t size); 90*4ca5f54aSJens Axboe void io_mem_free(void *ptr); 91*4ca5f54aSJens Axboe 921658633cSJens Axboe #if defined(CONFIG_PROVE_LOCKING) 931658633cSJens Axboe static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) 941658633cSJens Axboe { 951658633cSJens Axboe lockdep_assert(in_task()); 961658633cSJens Axboe 971658633cSJens Axboe if (ctx->flags & IORING_SETUP_IOPOLL) { 981658633cSJens Axboe lockdep_assert_held(&ctx->uring_lock); 991658633cSJens Axboe } else if (!ctx->task_complete) { 1001658633cSJens Axboe lockdep_assert_held(&ctx->completion_lock); 1011658633cSJens Axboe } else if (ctx->submitter_task) { 1021658633cSJens Axboe /* 1031658633cSJens Axboe * ->submitter_task may be NULL and we can still post a CQE, 1041658633cSJens Axboe * if the ring has been setup with IORING_SETUP_R_DISABLED. 1051658633cSJens Axboe * Not from an SQE, as those cannot be submitted, but via 1061658633cSJens Axboe * updating tagged resources. 1071658633cSJens Axboe */ 1081658633cSJens Axboe if (ctx->submitter_task->flags & PF_EXITING) 1091658633cSJens Axboe lockdep_assert(current_work()); 1101658633cSJens Axboe else 1111658633cSJens Axboe lockdep_assert(current == ctx->submitter_task); 1121658633cSJens Axboe } 1131658633cSJens Axboe } 1141658633cSJens Axboe #else 1151658633cSJens Axboe static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) 1161658633cSJens Axboe { 1171658633cSJens Axboe } 1181658633cSJens Axboe #endif 119f26cc959SPavel Begunkov 120e52d2e58SPavel Begunkov static inline void io_req_task_work_add(struct io_kiocb *req) 121e52d2e58SPavel Begunkov { 1228501fe70SPavel Begunkov __io_req_task_work_add(req, 0); 123e52d2e58SPavel Begunkov } 124e52d2e58SPavel Begunkov 1259046c641SPavel Begunkov #define io_for_each_link(pos, head) \ 1269046c641SPavel Begunkov for (pos = (head); pos; pos = pos->link) 127f3b44f92SJens Axboe 12859fbc409SPavel Begunkov static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx, 12959fbc409SPavel Begunkov struct io_uring_cqe **ret, 130aa1df3a3SPavel Begunkov bool overflow) 131f3b44f92SJens Axboe { 132f26cc959SPavel Begunkov io_lockdep_assert_cq_locked(ctx); 133f26cc959SPavel Begunkov 13420d6b633SPavel Begunkov if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) { 13520d6b633SPavel Begunkov if (unlikely(!io_cqe_cache_refill(ctx, overflow))) 13659fbc409SPavel Begunkov return false; 13720d6b633SPavel Begunkov } 13859fbc409SPavel Begunkov *ret = ctx->cqe_cached; 139f3b44f92SJens Axboe ctx->cached_cq_tail++; 140f3b44f92SJens Axboe ctx->cqe_cached++; 141b3659a65SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) 142b3659a65SPavel Begunkov ctx->cqe_cached++; 14359fbc409SPavel Begunkov return true; 144f3b44f92SJens Axboe } 145f3b44f92SJens Axboe 14659fbc409SPavel Begunkov static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret) 147aa1df3a3SPavel Begunkov { 14859fbc409SPavel Begunkov return io_get_cqe_overflow(ctx, ret, false); 149f3b44f92SJens Axboe } 150f3b44f92SJens Axboe 151093a650bSPavel Begunkov static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, 152093a650bSPavel Begunkov struct io_kiocb *req) 153f3b44f92SJens Axboe { 154f3b44f92SJens Axboe struct io_uring_cqe *cqe; 155f3b44f92SJens Axboe 156f3b44f92SJens Axboe /* 157f3b44f92SJens Axboe * If we can't get a cq entry, userspace overflowed the 158f3b44f92SJens Axboe * submission (by quite a lot). Increment the overflow count in 159f3b44f92SJens Axboe * the ring. 160f3b44f92SJens Axboe */ 16159fbc409SPavel Begunkov if (unlikely(!io_get_cqe(ctx, &cqe))) 162f66f7342SPavel Begunkov return false; 163e0486f3fSDylan Yudaken 164a0727c73SPavel Begunkov if (trace_io_uring_complete_enabled()) 165e0486f3fSDylan Yudaken trace_io_uring_complete(req->ctx, req, req->cqe.user_data, 166e0486f3fSDylan Yudaken req->cqe.res, req->cqe.flags, 167b24c5d75SPavel Begunkov req->big_cqe.extra1, req->big_cqe.extra2); 168e0486f3fSDylan Yudaken 169f3b44f92SJens Axboe memcpy(cqe, &req->cqe, sizeof(*cqe)); 170e8c328c3SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) { 171b24c5d75SPavel Begunkov memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); 172b24c5d75SPavel Begunkov memset(&req->big_cqe, 0, sizeof(req->big_cqe)); 173e8c328c3SPavel Begunkov } 174f3b44f92SJens Axboe return true; 175f3b44f92SJens Axboe } 176f3b44f92SJens Axboe 177531113bbSJens Axboe static inline void req_set_fail(struct io_kiocb *req) 178531113bbSJens Axboe { 179531113bbSJens Axboe req->flags |= REQ_F_FAIL; 180531113bbSJens Axboe if (req->flags & REQ_F_CQE_SKIP) { 181531113bbSJens Axboe req->flags &= ~REQ_F_CQE_SKIP; 182531113bbSJens Axboe req->flags |= REQ_F_SKIP_LINK_CQES; 183531113bbSJens Axboe } 184531113bbSJens Axboe } 185531113bbSJens Axboe 186de23077eSJens Axboe static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) 187de23077eSJens Axboe { 188de23077eSJens Axboe req->cqe.res = res; 189de23077eSJens Axboe req->cqe.flags = cflags; 190de23077eSJens Axboe } 191de23077eSJens Axboe 19299f15d8dSJens Axboe static inline bool req_has_async_data(struct io_kiocb *req) 19399f15d8dSJens Axboe { 19499f15d8dSJens Axboe return req->flags & REQ_F_ASYNC_DATA; 19599f15d8dSJens Axboe } 19699f15d8dSJens Axboe 19717bc2837SJens Axboe static inline void io_put_file(struct io_kiocb *req) 198531113bbSJens Axboe { 19917bc2837SJens Axboe if (!(req->flags & REQ_F_FIXED_FILE) && req->file) 20017bc2837SJens Axboe fput(req->file); 201531113bbSJens Axboe } 202531113bbSJens Axboe 203cd40cae2SJens Axboe static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, 204cd40cae2SJens Axboe unsigned issue_flags) 205cd40cae2SJens Axboe { 206cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 207cd40cae2SJens Axboe if (issue_flags & IO_URING_F_UNLOCKED) 208cd40cae2SJens Axboe mutex_unlock(&ctx->uring_lock); 209cd40cae2SJens Axboe } 210cd40cae2SJens Axboe 211cd40cae2SJens Axboe static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, 212cd40cae2SJens Axboe unsigned issue_flags) 213cd40cae2SJens Axboe { 214cd40cae2SJens Axboe /* 215cd40cae2SJens Axboe * "Normal" inline submissions always hold the uring_lock, since we 216cd40cae2SJens Axboe * grab it from the system call. Same is true for the SQPOLL offload. 217cd40cae2SJens Axboe * The only exception is when we've detached the request and issue it 218cd40cae2SJens Axboe * from an async worker thread, grab the lock for that case. 219cd40cae2SJens Axboe */ 220cd40cae2SJens Axboe if (issue_flags & IO_URING_F_UNLOCKED) 221cd40cae2SJens Axboe mutex_lock(&ctx->uring_lock); 222cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 223cd40cae2SJens Axboe } 224cd40cae2SJens Axboe 225f9ead18cSJens Axboe static inline void io_commit_cqring(struct io_ring_ctx *ctx) 226f9ead18cSJens Axboe { 227f9ead18cSJens Axboe /* order cqe stores with ring update */ 228f9ead18cSJens Axboe smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); 229f9ead18cSJens Axboe } 230f9ead18cSJens Axboe 2317b235dd8SPavel Begunkov static inline void io_poll_wq_wake(struct io_ring_ctx *ctx) 2327b235dd8SPavel Begunkov { 233bca39f39SPavel Begunkov if (wq_has_sleeper(&ctx->poll_wq)) 2347b235dd8SPavel Begunkov __wake_up(&ctx->poll_wq, TASK_NORMAL, 0, 2357b235dd8SPavel Begunkov poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); 2367b235dd8SPavel Begunkov } 2377b235dd8SPavel Begunkov 2386e7248adSPavel Begunkov static inline void io_cqring_wake(struct io_ring_ctx *ctx) 239f3b44f92SJens Axboe { 240f3b44f92SJens Axboe /* 24144648532SJens Axboe * Trigger waitqueue handler on all waiters on our waitqueue. This 24244648532SJens Axboe * won't necessarily wake up all the tasks, io_should_wake() will make 24344648532SJens Axboe * that decision. 24444648532SJens Axboe * 24544648532SJens Axboe * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter 24644648532SJens Axboe * set in the mask so that if we recurse back into our own poll 24744648532SJens Axboe * waitqueue handlers, we know we have a dependency between eventfd or 24844648532SJens Axboe * epoll and should terminate multishot poll at that point. 249f3b44f92SJens Axboe */ 2506e7248adSPavel Begunkov if (wq_has_sleeper(&ctx->cq_wait)) 25144648532SJens Axboe __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, 25244648532SJens Axboe poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); 253f3b44f92SJens Axboe } 254f3b44f92SJens Axboe 25517437f31SJens Axboe static inline bool io_sqring_full(struct io_ring_ctx *ctx) 25617437f31SJens Axboe { 25717437f31SJens Axboe struct io_rings *r = ctx->rings; 25817437f31SJens Axboe 25917437f31SJens Axboe return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; 26017437f31SJens Axboe } 26117437f31SJens Axboe 26217437f31SJens Axboe static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) 26317437f31SJens Axboe { 26417437f31SJens Axboe struct io_rings *rings = ctx->rings; 265e3ef728fSJens Axboe unsigned int entries; 26617437f31SJens Axboe 26717437f31SJens Axboe /* make sure SQ entry isn't read before tail */ 268e3ef728fSJens Axboe entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; 269e3ef728fSJens Axboe return min(entries, ctx->sq_entries); 27017437f31SJens Axboe } 27117437f31SJens Axboe 272c0e0d6baSDylan Yudaken static inline int io_run_task_work(void) 27317437f31SJens Axboe { 2747cfe7a09SJens Axboe /* 2757cfe7a09SJens Axboe * Always check-and-clear the task_work notification signal. With how 2767cfe7a09SJens Axboe * signaling works for task_work, we can find it set with nothing to 2777cfe7a09SJens Axboe * run. We need to clear it for that case, like get_signal() does. 2787cfe7a09SJens Axboe */ 27946a525e1SJens Axboe if (test_thread_flag(TIF_NOTIFY_SIGNAL)) 28017437f31SJens Axboe clear_notify_signal(); 281b5d3ae20SJens Axboe /* 282b5d3ae20SJens Axboe * PF_IO_WORKER never returns to userspace, so check here if we have 283b5d3ae20SJens Axboe * notify work that needs processing. 284b5d3ae20SJens Axboe */ 285b5d3ae20SJens Axboe if (current->flags & PF_IO_WORKER && 2862f2bb1ffSJens Axboe test_thread_flag(TIF_NOTIFY_RESUME)) { 2872f2bb1ffSJens Axboe __set_current_state(TASK_RUNNING); 288b5d3ae20SJens Axboe resume_user_mode_work(NULL); 2892f2bb1ffSJens Axboe } 2907cfe7a09SJens Axboe if (task_work_pending(current)) { 29146a525e1SJens Axboe __set_current_state(TASK_RUNNING); 29217437f31SJens Axboe task_work_run(); 293c0e0d6baSDylan Yudaken return 1; 29417437f31SJens Axboe } 29517437f31SJens Axboe 296c0e0d6baSDylan Yudaken return 0; 297c0e0d6baSDylan Yudaken } 298c0e0d6baSDylan Yudaken 299dac6a0eaSJens Axboe static inline bool io_task_work_pending(struct io_ring_ctx *ctx) 300dac6a0eaSJens Axboe { 3016434ec01SJens Axboe return task_work_pending(current) || !wq_list_empty(&ctx->work_llist); 302dac6a0eaSJens Axboe } 303dac6a0eaSJens Axboe 304a282967cSPavel Begunkov static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts) 305aa1e90f6SPavel Begunkov { 306a282967cSPavel Begunkov if (!ts->locked) { 307aa1e90f6SPavel Begunkov mutex_lock(&ctx->uring_lock); 308a282967cSPavel Begunkov ts->locked = true; 309aa1e90f6SPavel Begunkov } 310aa1e90f6SPavel Begunkov } 311aa1e90f6SPavel Begunkov 3129da070b1SPavel Begunkov /* 3139da070b1SPavel Begunkov * Don't complete immediately but use deferred completion infrastructure. 3149da070b1SPavel Begunkov * Protected by ->uring_lock and can only be used either with 3159da070b1SPavel Begunkov * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. 3169da070b1SPavel Begunkov */ 3179da070b1SPavel Begunkov static inline void io_req_complete_defer(struct io_kiocb *req) 3189da070b1SPavel Begunkov __must_hold(&req->ctx->uring_lock) 319aa1e90f6SPavel Begunkov { 320aa1e90f6SPavel Begunkov struct io_submit_state *state = &req->ctx->submit_state; 321aa1e90f6SPavel Begunkov 3229da070b1SPavel Begunkov lockdep_assert_held(&req->ctx->uring_lock); 3239da070b1SPavel Begunkov 324aa1e90f6SPavel Begunkov wq_list_add_tail(&req->comp_list, &state->compl_reqs); 325aa1e90f6SPavel Begunkov } 326aa1e90f6SPavel Begunkov 32746929b08SPavel Begunkov static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) 32846929b08SPavel Begunkov { 329bca39f39SPavel Begunkov if (unlikely(ctx->off_timeout_used || ctx->drain_active || 330bca39f39SPavel Begunkov ctx->has_evfd || ctx->poll_activated)) 33146929b08SPavel Begunkov __io_commit_cqring_flush(ctx); 33246929b08SPavel Begunkov } 33346929b08SPavel Begunkov 33463809137SPavel Begunkov static inline void io_get_task_refs(int nr) 33563809137SPavel Begunkov { 33663809137SPavel Begunkov struct io_uring_task *tctx = current->io_uring; 33763809137SPavel Begunkov 33863809137SPavel Begunkov tctx->cached_refs -= nr; 33963809137SPavel Begunkov if (unlikely(tctx->cached_refs < 0)) 34063809137SPavel Begunkov io_task_refs_refill(tctx); 34163809137SPavel Begunkov } 34263809137SPavel Begunkov 343bd1a3783SPavel Begunkov static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) 344bd1a3783SPavel Begunkov { 345bd1a3783SPavel Begunkov return !ctx->submit_state.free_list.next; 346bd1a3783SPavel Begunkov } 347bd1a3783SPavel Begunkov 348c1755c25SBreno Leitao extern struct kmem_cache *req_cachep; 349c1755c25SBreno Leitao 350c8576f3eSPavel Begunkov static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx) 351bd1a3783SPavel Begunkov { 352c1755c25SBreno Leitao struct io_kiocb *req; 353bd1a3783SPavel Begunkov 354c1755c25SBreno Leitao req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list); 355c1755c25SBreno Leitao wq_stack_extract(&ctx->submit_state.free_list); 356c1755c25SBreno Leitao return req; 357bd1a3783SPavel Begunkov } 358bd1a3783SPavel Begunkov 359c8576f3eSPavel Begunkov static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req) 360c8576f3eSPavel Begunkov { 361c8576f3eSPavel Begunkov if (unlikely(io_req_cache_empty(ctx))) { 362c8576f3eSPavel Begunkov if (!__io_alloc_req_refill(ctx)) 363c8576f3eSPavel Begunkov return false; 364c8576f3eSPavel Begunkov } 365c8576f3eSPavel Begunkov *req = io_extract_req(ctx); 366c8576f3eSPavel Begunkov return true; 367c8576f3eSPavel Begunkov } 368c8576f3eSPavel Begunkov 369140102aeSPavel Begunkov static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) 370140102aeSPavel Begunkov { 371140102aeSPavel Begunkov return likely(ctx->submitter_task == current); 372140102aeSPavel Begunkov } 373140102aeSPavel Begunkov 37476de6749SPavel Begunkov static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) 37576de6749SPavel Begunkov { 3766567506bSPavel Begunkov return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || 3776567506bSPavel Begunkov ctx->submitter_task == current); 37876de6749SPavel Begunkov } 37976de6749SPavel Begunkov 380833b5dffSPavel Begunkov static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) 381833b5dffSPavel Begunkov { 382833b5dffSPavel Begunkov io_req_set_res(req, res, 0); 383833b5dffSPavel Begunkov req->io_task_work.func = io_req_task_complete; 384833b5dffSPavel Begunkov io_req_task_work_add(req); 385833b5dffSPavel Begunkov } 386833b5dffSPavel Begunkov 38796c7d4f8SBreno Leitao /* 38896c7d4f8SBreno Leitao * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each 38996c7d4f8SBreno Leitao * slot. 39096c7d4f8SBreno Leitao */ 39196c7d4f8SBreno Leitao static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) 39296c7d4f8SBreno Leitao { 39396c7d4f8SBreno Leitao if (ctx->flags & IORING_SETUP_SQE128) 39496c7d4f8SBreno Leitao return 2 * sizeof(struct io_uring_sqe); 39596c7d4f8SBreno Leitao return sizeof(struct io_uring_sqe); 39696c7d4f8SBreno Leitao } 397de23077eSJens Axboe #endif 398