1de23077eSJens Axboe #ifndef IOU_CORE_H 2de23077eSJens Axboe #define IOU_CORE_H 3de23077eSJens Axboe 4de23077eSJens Axboe #include <linux/errno.h> 5cd40cae2SJens Axboe #include <linux/lockdep.h> 6b5d3ae20SJens Axboe #include <linux/resume_user_mode.h> 7c1755c25SBreno Leitao #include <linux/kasan.h> 8ab1c84d8SPavel Begunkov #include <linux/io_uring_types.h> 944648532SJens Axboe #include <uapi/linux/eventpoll.h> 10ab1c84d8SPavel Begunkov #include "io-wq.h" 11a6b21fbbSPavel Begunkov #include "slist.h" 12ab1c84d8SPavel Begunkov #include "filetable.h" 13de23077eSJens Axboe 14f3b44f92SJens Axboe #ifndef CREATE_TRACE_POINTS 15f3b44f92SJens Axboe #include <trace/events/io_uring.h> 16f3b44f92SJens Axboe #endif 17f3b44f92SJens Axboe 1897b388d7SJens Axboe enum { 198501fe70SPavel Begunkov /* don't use deferred task_work */ 208501fe70SPavel Begunkov IOU_F_TWQ_FORCE_NORMAL = 1, 218751d154SPavel Begunkov 228751d154SPavel Begunkov /* 238751d154SPavel Begunkov * A hint to not wake right away but delay until there are enough of 248751d154SPavel Begunkov * tw's queued to match the number of CQEs the task is waiting for. 258751d154SPavel Begunkov * 268751d154SPavel Begunkov * Must not be used wirh requests generating more than one CQE. 278751d154SPavel Begunkov * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set. 288751d154SPavel Begunkov */ 298751d154SPavel Begunkov IOU_F_TWQ_LAZY_WAKE = 2, 308501fe70SPavel Begunkov }; 318501fe70SPavel Begunkov 328501fe70SPavel Begunkov enum { 3397b388d7SJens Axboe IOU_OK = 0, 3497b388d7SJens Axboe IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, 35114eccdfSDylan Yudaken 36114eccdfSDylan Yudaken /* 3791482864SPavel Begunkov * Intended only when both IO_URING_F_MULTISHOT is passed 3891482864SPavel Begunkov * to indicate to the poll runner that multishot should be 39114eccdfSDylan Yudaken * removed and the result is set on req->cqe.res. 40114eccdfSDylan Yudaken */ 41114eccdfSDylan Yudaken IOU_STOP_MULTISHOT = -ECANCELED, 4297b388d7SJens Axboe }; 4397b388d7SJens Axboe 44aa1df3a3SPavel Begunkov struct io_uring_cqe *__io_get_cqe(struct io_ring_ctx *ctx, bool overflow); 4568494a65SPavel Begunkov bool io_req_cqe_overflow(struct io_kiocb *req); 46c0e0d6baSDylan Yudaken int io_run_task_work_sig(struct io_ring_ctx *ctx); 47973fc83fSDylan Yudaken void io_req_defer_failed(struct io_kiocb *req, s32 res); 481bec951cSPavel Begunkov void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); 49b529c96aSDylan Yudaken bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); 509b8c5475SDylan Yudaken bool io_aux_cqe(struct io_ring_ctx *ctx, bool defer, u64 user_data, s32 res, u32 cflags, 51eb42cebbSPavel Begunkov bool allow_overflow); 529046c641SPavel Begunkov void __io_commit_cqring_flush(struct io_ring_ctx *ctx); 539046c641SPavel Begunkov 549046c641SPavel Begunkov struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); 559046c641SPavel Begunkov 569046c641SPavel Begunkov struct file *io_file_get_normal(struct io_kiocb *req, int fd); 579046c641SPavel Begunkov struct file *io_file_get_fixed(struct io_kiocb *req, int fd, 589046c641SPavel Begunkov unsigned issue_flags); 599046c641SPavel Begunkov 60f6b543fdSJens Axboe static inline bool io_req_ffs_set(struct io_kiocb *req) 61f6b543fdSJens Axboe { 62f6b543fdSJens Axboe return req->flags & REQ_F_FIXED_FILE; 63f6b543fdSJens Axboe } 64f6b543fdSJens Axboe 658501fe70SPavel Begunkov void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); 669046c641SPavel Begunkov bool io_is_uring_fops(struct file *file); 679046c641SPavel Begunkov bool io_alloc_async_data(struct io_kiocb *req); 689046c641SPavel Begunkov void io_req_task_queue(struct io_kiocb *req); 69a282967cSPavel Begunkov void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use); 70a282967cSPavel Begunkov void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts); 719046c641SPavel Begunkov void io_req_task_queue_fail(struct io_kiocb *req, int ret); 72a282967cSPavel Begunkov void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts); 739046c641SPavel Begunkov void tctx_task_work(struct callback_head *cb); 749046c641SPavel Begunkov __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); 759046c641SPavel Begunkov int io_uring_alloc_task_context(struct task_struct *task, 769046c641SPavel Begunkov struct io_ring_ctx *ctx); 779046c641SPavel Begunkov 78*6e76ac59SJosh Triplett int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, 79*6e76ac59SJosh Triplett int start, int end); 80*6e76ac59SJosh Triplett 81a282967cSPavel Begunkov int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts); 829046c641SPavel Begunkov int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); 839046c641SPavel Begunkov int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); 849046c641SPavel Begunkov void io_free_batch_list(struct io_ring_ctx *ctx, struct io_wq_work_node *node); 859046c641SPavel Begunkov int io_req_prep_async(struct io_kiocb *req); 869046c641SPavel Begunkov 879046c641SPavel Begunkov struct io_wq_work *io_wq_free_work(struct io_wq_work *work); 889046c641SPavel Begunkov void io_wq_submit_work(struct io_wq_work *work); 899046c641SPavel Begunkov 909046c641SPavel Begunkov void io_free_req(struct io_kiocb *req); 919046c641SPavel Begunkov void io_queue_next(struct io_kiocb *req); 9263809137SPavel Begunkov void io_task_refs_refill(struct io_uring_task *tctx); 93bd1a3783SPavel Begunkov bool __io_alloc_req_refill(struct io_ring_ctx *ctx); 949046c641SPavel Begunkov 959046c641SPavel Begunkov bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, 969046c641SPavel Begunkov bool cancel_all); 979046c641SPavel Begunkov 98f26cc959SPavel Begunkov #define io_lockdep_assert_cq_locked(ctx) \ 99f26cc959SPavel Begunkov do { \ 1008ce4269eSPavel Begunkov lockdep_assert(in_task()); \ 1018ce4269eSPavel Begunkov \ 102f26cc959SPavel Begunkov if (ctx->flags & IORING_SETUP_IOPOLL) { \ 103f26cc959SPavel Begunkov lockdep_assert_held(&ctx->uring_lock); \ 104f26cc959SPavel Begunkov } else if (!ctx->task_complete) { \ 105f26cc959SPavel Begunkov lockdep_assert_held(&ctx->completion_lock); \ 106f26cc959SPavel Begunkov } else if (ctx->submitter_task->flags & PF_EXITING) { \ 107f26cc959SPavel Begunkov lockdep_assert(current_work()); \ 108f26cc959SPavel Begunkov } else { \ 109f26cc959SPavel Begunkov lockdep_assert(current == ctx->submitter_task); \ 110f26cc959SPavel Begunkov } \ 111f26cc959SPavel Begunkov } while (0) 112f26cc959SPavel Begunkov 113e52d2e58SPavel Begunkov static inline void io_req_task_work_add(struct io_kiocb *req) 114e52d2e58SPavel Begunkov { 1158501fe70SPavel Begunkov __io_req_task_work_add(req, 0); 116e52d2e58SPavel Begunkov } 117e52d2e58SPavel Begunkov 1189046c641SPavel Begunkov #define io_for_each_link(pos, head) \ 1199046c641SPavel Begunkov for (pos = (head); pos; pos = pos->link) 120f3b44f92SJens Axboe 12125399321SPavel Begunkov void io_cq_unlock_post(struct io_ring_ctx *ctx); 12225399321SPavel Begunkov 123aa1df3a3SPavel Begunkov static inline struct io_uring_cqe *io_get_cqe_overflow(struct io_ring_ctx *ctx, 124aa1df3a3SPavel Begunkov bool overflow) 125f3b44f92SJens Axboe { 126f26cc959SPavel Begunkov io_lockdep_assert_cq_locked(ctx); 127f26cc959SPavel Begunkov 128f3b44f92SJens Axboe if (likely(ctx->cqe_cached < ctx->cqe_sentinel)) { 129f3b44f92SJens Axboe struct io_uring_cqe *cqe = ctx->cqe_cached; 130f3b44f92SJens Axboe 131f3b44f92SJens Axboe ctx->cached_cq_tail++; 132f3b44f92SJens Axboe ctx->cqe_cached++; 133b3659a65SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) 134b3659a65SPavel Begunkov ctx->cqe_cached++; 135f3b44f92SJens Axboe return cqe; 136f3b44f92SJens Axboe } 137f3b44f92SJens Axboe 138aa1df3a3SPavel Begunkov return __io_get_cqe(ctx, overflow); 139aa1df3a3SPavel Begunkov } 140aa1df3a3SPavel Begunkov 141aa1df3a3SPavel Begunkov static inline struct io_uring_cqe *io_get_cqe(struct io_ring_ctx *ctx) 142aa1df3a3SPavel Begunkov { 143aa1df3a3SPavel Begunkov return io_get_cqe_overflow(ctx, false); 144f3b44f92SJens Axboe } 145f3b44f92SJens Axboe 146f3b44f92SJens Axboe static inline bool __io_fill_cqe_req(struct io_ring_ctx *ctx, 147f3b44f92SJens Axboe struct io_kiocb *req) 148f3b44f92SJens Axboe { 149f3b44f92SJens Axboe struct io_uring_cqe *cqe; 150f3b44f92SJens Axboe 151f3b44f92SJens Axboe /* 152f3b44f92SJens Axboe * If we can't get a cq entry, userspace overflowed the 153f3b44f92SJens Axboe * submission (by quite a lot). Increment the overflow count in 154f3b44f92SJens Axboe * the ring. 155f3b44f92SJens Axboe */ 156f3b44f92SJens Axboe cqe = io_get_cqe(ctx); 157e8c328c3SPavel Begunkov if (unlikely(!cqe)) 158f66f7342SPavel Begunkov return false; 159e0486f3fSDylan Yudaken 160e0486f3fSDylan Yudaken trace_io_uring_complete(req->ctx, req, req->cqe.user_data, 161e0486f3fSDylan Yudaken req->cqe.res, req->cqe.flags, 162e0486f3fSDylan Yudaken (req->flags & REQ_F_CQE32_INIT) ? req->extra1 : 0, 163e0486f3fSDylan Yudaken (req->flags & REQ_F_CQE32_INIT) ? req->extra2 : 0); 164e0486f3fSDylan Yudaken 165f3b44f92SJens Axboe memcpy(cqe, &req->cqe, sizeof(*cqe)); 166e8c328c3SPavel Begunkov 167e8c328c3SPavel Begunkov if (ctx->flags & IORING_SETUP_CQE32) { 168f3b44f92SJens Axboe u64 extra1 = 0, extra2 = 0; 169f3b44f92SJens Axboe 170f3b44f92SJens Axboe if (req->flags & REQ_F_CQE32_INIT) { 171f3b44f92SJens Axboe extra1 = req->extra1; 172f3b44f92SJens Axboe extra2 = req->extra2; 173f3b44f92SJens Axboe } 174f3b44f92SJens Axboe 175f3b44f92SJens Axboe WRITE_ONCE(cqe->big_cqe[0], extra1); 176f3b44f92SJens Axboe WRITE_ONCE(cqe->big_cqe[1], extra2); 177e8c328c3SPavel Begunkov } 178f3b44f92SJens Axboe return true; 179f3b44f92SJens Axboe } 180f3b44f92SJens Axboe 181f66f7342SPavel Begunkov static inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, 182f66f7342SPavel Begunkov struct io_kiocb *req) 183f66f7342SPavel Begunkov { 184f66f7342SPavel Begunkov if (likely(__io_fill_cqe_req(ctx, req))) 185f66f7342SPavel Begunkov return true; 186f66f7342SPavel Begunkov return io_req_cqe_overflow(req); 187f66f7342SPavel Begunkov } 188f66f7342SPavel Begunkov 189531113bbSJens Axboe static inline void req_set_fail(struct io_kiocb *req) 190531113bbSJens Axboe { 191531113bbSJens Axboe req->flags |= REQ_F_FAIL; 192531113bbSJens Axboe if (req->flags & REQ_F_CQE_SKIP) { 193531113bbSJens Axboe req->flags &= ~REQ_F_CQE_SKIP; 194531113bbSJens Axboe req->flags |= REQ_F_SKIP_LINK_CQES; 195531113bbSJens Axboe } 196531113bbSJens Axboe } 197531113bbSJens Axboe 198de23077eSJens Axboe static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) 199de23077eSJens Axboe { 200de23077eSJens Axboe req->cqe.res = res; 201de23077eSJens Axboe req->cqe.flags = cflags; 202de23077eSJens Axboe } 203de23077eSJens Axboe 20499f15d8dSJens Axboe static inline bool req_has_async_data(struct io_kiocb *req) 20599f15d8dSJens Axboe { 20699f15d8dSJens Axboe return req->flags & REQ_F_ASYNC_DATA; 20799f15d8dSJens Axboe } 20899f15d8dSJens Axboe 209531113bbSJens Axboe static inline void io_put_file(struct file *file) 210531113bbSJens Axboe { 211531113bbSJens Axboe if (file) 212531113bbSJens Axboe fput(file); 213531113bbSJens Axboe } 214531113bbSJens Axboe 215cd40cae2SJens Axboe static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, 216cd40cae2SJens Axboe unsigned issue_flags) 217cd40cae2SJens Axboe { 218cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 219cd40cae2SJens Axboe if (issue_flags & IO_URING_F_UNLOCKED) 220cd40cae2SJens Axboe mutex_unlock(&ctx->uring_lock); 221cd40cae2SJens Axboe } 222cd40cae2SJens Axboe 223cd40cae2SJens Axboe static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, 224cd40cae2SJens Axboe unsigned issue_flags) 225cd40cae2SJens Axboe { 226cd40cae2SJens Axboe /* 227cd40cae2SJens Axboe * "Normal" inline submissions always hold the uring_lock, since we 228cd40cae2SJens Axboe * grab it from the system call. Same is true for the SQPOLL offload. 229cd40cae2SJens Axboe * The only exception is when we've detached the request and issue it 230cd40cae2SJens Axboe * from an async worker thread, grab the lock for that case. 231cd40cae2SJens Axboe */ 232cd40cae2SJens Axboe if (issue_flags & IO_URING_F_UNLOCKED) 233cd40cae2SJens Axboe mutex_lock(&ctx->uring_lock); 234cd40cae2SJens Axboe lockdep_assert_held(&ctx->uring_lock); 235cd40cae2SJens Axboe } 236cd40cae2SJens Axboe 237f9ead18cSJens Axboe static inline void io_commit_cqring(struct io_ring_ctx *ctx) 238f9ead18cSJens Axboe { 239f9ead18cSJens Axboe /* order cqe stores with ring update */ 240f9ead18cSJens Axboe smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); 241f9ead18cSJens Axboe } 242f9ead18cSJens Axboe 2437b235dd8SPavel Begunkov static inline void io_poll_wq_wake(struct io_ring_ctx *ctx) 2447b235dd8SPavel Begunkov { 245bca39f39SPavel Begunkov if (wq_has_sleeper(&ctx->poll_wq)) 2467b235dd8SPavel Begunkov __wake_up(&ctx->poll_wq, TASK_NORMAL, 0, 2477b235dd8SPavel Begunkov poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); 2487b235dd8SPavel Begunkov } 2497b235dd8SPavel Begunkov 2506e7248adSPavel Begunkov static inline void io_cqring_wake(struct io_ring_ctx *ctx) 251f3b44f92SJens Axboe { 252f3b44f92SJens Axboe /* 25344648532SJens Axboe * Trigger waitqueue handler on all waiters on our waitqueue. This 25444648532SJens Axboe * won't necessarily wake up all the tasks, io_should_wake() will make 25544648532SJens Axboe * that decision. 25644648532SJens Axboe * 25744648532SJens Axboe * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter 25844648532SJens Axboe * set in the mask so that if we recurse back into our own poll 25944648532SJens Axboe * waitqueue handlers, we know we have a dependency between eventfd or 26044648532SJens Axboe * epoll and should terminate multishot poll at that point. 261f3b44f92SJens Axboe */ 2626e7248adSPavel Begunkov if (wq_has_sleeper(&ctx->cq_wait)) 26344648532SJens Axboe __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, 26444648532SJens Axboe poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); 265f3b44f92SJens Axboe } 266f3b44f92SJens Axboe 26717437f31SJens Axboe static inline bool io_sqring_full(struct io_ring_ctx *ctx) 26817437f31SJens Axboe { 26917437f31SJens Axboe struct io_rings *r = ctx->rings; 27017437f31SJens Axboe 27117437f31SJens Axboe return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; 27217437f31SJens Axboe } 27317437f31SJens Axboe 27417437f31SJens Axboe static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) 27517437f31SJens Axboe { 27617437f31SJens Axboe struct io_rings *rings = ctx->rings; 277e3ef728fSJens Axboe unsigned int entries; 27817437f31SJens Axboe 27917437f31SJens Axboe /* make sure SQ entry isn't read before tail */ 280e3ef728fSJens Axboe entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; 281e3ef728fSJens Axboe return min(entries, ctx->sq_entries); 28217437f31SJens Axboe } 28317437f31SJens Axboe 284c0e0d6baSDylan Yudaken static inline int io_run_task_work(void) 28517437f31SJens Axboe { 2867cfe7a09SJens Axboe /* 2877cfe7a09SJens Axboe * Always check-and-clear the task_work notification signal. With how 2887cfe7a09SJens Axboe * signaling works for task_work, we can find it set with nothing to 2897cfe7a09SJens Axboe * run. We need to clear it for that case, like get_signal() does. 2907cfe7a09SJens Axboe */ 29146a525e1SJens Axboe if (test_thread_flag(TIF_NOTIFY_SIGNAL)) 29217437f31SJens Axboe clear_notify_signal(); 293b5d3ae20SJens Axboe /* 294b5d3ae20SJens Axboe * PF_IO_WORKER never returns to userspace, so check here if we have 295b5d3ae20SJens Axboe * notify work that needs processing. 296b5d3ae20SJens Axboe */ 297b5d3ae20SJens Axboe if (current->flags & PF_IO_WORKER && 2982f2bb1ffSJens Axboe test_thread_flag(TIF_NOTIFY_RESUME)) { 2992f2bb1ffSJens Axboe __set_current_state(TASK_RUNNING); 300b5d3ae20SJens Axboe resume_user_mode_work(NULL); 3012f2bb1ffSJens Axboe } 3027cfe7a09SJens Axboe if (task_work_pending(current)) { 30346a525e1SJens Axboe __set_current_state(TASK_RUNNING); 30417437f31SJens Axboe task_work_run(); 305c0e0d6baSDylan Yudaken return 1; 30617437f31SJens Axboe } 30717437f31SJens Axboe 308c0e0d6baSDylan Yudaken return 0; 309c0e0d6baSDylan Yudaken } 310c0e0d6baSDylan Yudaken 311dac6a0eaSJens Axboe static inline bool io_task_work_pending(struct io_ring_ctx *ctx) 312dac6a0eaSJens Axboe { 3136434ec01SJens Axboe return task_work_pending(current) || !wq_list_empty(&ctx->work_llist); 314dac6a0eaSJens Axboe } 315dac6a0eaSJens Axboe 316a282967cSPavel Begunkov static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts) 317aa1e90f6SPavel Begunkov { 318a282967cSPavel Begunkov if (!ts->locked) { 319aa1e90f6SPavel Begunkov mutex_lock(&ctx->uring_lock); 320a282967cSPavel Begunkov ts->locked = true; 321aa1e90f6SPavel Begunkov } 322aa1e90f6SPavel Begunkov } 323aa1e90f6SPavel Begunkov 3249da070b1SPavel Begunkov /* 3259da070b1SPavel Begunkov * Don't complete immediately but use deferred completion infrastructure. 3269da070b1SPavel Begunkov * Protected by ->uring_lock and can only be used either with 3279da070b1SPavel Begunkov * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. 3289da070b1SPavel Begunkov */ 3299da070b1SPavel Begunkov static inline void io_req_complete_defer(struct io_kiocb *req) 3309da070b1SPavel Begunkov __must_hold(&req->ctx->uring_lock) 331aa1e90f6SPavel Begunkov { 332aa1e90f6SPavel Begunkov struct io_submit_state *state = &req->ctx->submit_state; 333aa1e90f6SPavel Begunkov 3349da070b1SPavel Begunkov lockdep_assert_held(&req->ctx->uring_lock); 3359da070b1SPavel Begunkov 336aa1e90f6SPavel Begunkov wq_list_add_tail(&req->comp_list, &state->compl_reqs); 337aa1e90f6SPavel Begunkov } 338aa1e90f6SPavel Begunkov 33946929b08SPavel Begunkov static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) 34046929b08SPavel Begunkov { 341bca39f39SPavel Begunkov if (unlikely(ctx->off_timeout_used || ctx->drain_active || 342bca39f39SPavel Begunkov ctx->has_evfd || ctx->poll_activated)) 34346929b08SPavel Begunkov __io_commit_cqring_flush(ctx); 34446929b08SPavel Begunkov } 34546929b08SPavel Begunkov 34663809137SPavel Begunkov static inline void io_get_task_refs(int nr) 34763809137SPavel Begunkov { 34863809137SPavel Begunkov struct io_uring_task *tctx = current->io_uring; 34963809137SPavel Begunkov 35063809137SPavel Begunkov tctx->cached_refs -= nr; 35163809137SPavel Begunkov if (unlikely(tctx->cached_refs < 0)) 35263809137SPavel Begunkov io_task_refs_refill(tctx); 35363809137SPavel Begunkov } 35463809137SPavel Begunkov 355bd1a3783SPavel Begunkov static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) 356bd1a3783SPavel Begunkov { 357bd1a3783SPavel Begunkov return !ctx->submit_state.free_list.next; 358bd1a3783SPavel Begunkov } 359bd1a3783SPavel Begunkov 360c1755c25SBreno Leitao extern struct kmem_cache *req_cachep; 361c1755c25SBreno Leitao 362c8576f3eSPavel Begunkov static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx) 363bd1a3783SPavel Begunkov { 364c1755c25SBreno Leitao struct io_kiocb *req; 365bd1a3783SPavel Begunkov 366c1755c25SBreno Leitao req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list); 367c1755c25SBreno Leitao kasan_unpoison_object_data(req_cachep, req); 368c1755c25SBreno Leitao wq_stack_extract(&ctx->submit_state.free_list); 369c1755c25SBreno Leitao return req; 370bd1a3783SPavel Begunkov } 371bd1a3783SPavel Begunkov 372c8576f3eSPavel Begunkov static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req) 373c8576f3eSPavel Begunkov { 374c8576f3eSPavel Begunkov if (unlikely(io_req_cache_empty(ctx))) { 375c8576f3eSPavel Begunkov if (!__io_alloc_req_refill(ctx)) 376c8576f3eSPavel Begunkov return false; 377c8576f3eSPavel Begunkov } 378c8576f3eSPavel Begunkov *req = io_extract_req(ctx); 379c8576f3eSPavel Begunkov return true; 380c8576f3eSPavel Begunkov } 381c8576f3eSPavel Begunkov 382140102aeSPavel Begunkov static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) 383140102aeSPavel Begunkov { 384140102aeSPavel Begunkov return likely(ctx->submitter_task == current); 385140102aeSPavel Begunkov } 386140102aeSPavel Begunkov 38776de6749SPavel Begunkov static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) 38876de6749SPavel Begunkov { 3896567506bSPavel Begunkov return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || 3906567506bSPavel Begunkov ctx->submitter_task == current); 39176de6749SPavel Begunkov } 39276de6749SPavel Begunkov 393833b5dffSPavel Begunkov static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) 394833b5dffSPavel Begunkov { 395833b5dffSPavel Begunkov io_req_set_res(req, res, 0); 396833b5dffSPavel Begunkov req->io_task_work.func = io_req_task_complete; 397833b5dffSPavel Begunkov io_req_task_work_add(req); 398833b5dffSPavel Begunkov } 399833b5dffSPavel Begunkov 40096c7d4f8SBreno Leitao /* 40196c7d4f8SBreno Leitao * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each 40296c7d4f8SBreno Leitao * slot. 40396c7d4f8SBreno Leitao */ 40496c7d4f8SBreno Leitao static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) 40596c7d4f8SBreno Leitao { 40696c7d4f8SBreno Leitao if (ctx->flags & IORING_SETUP_SQE128) 40796c7d4f8SBreno Leitao return 2 * sizeof(struct io_uring_sqe); 40896c7d4f8SBreno Leitao return sizeof(struct io_uring_sqe); 40996c7d4f8SBreno Leitao } 410de23077eSJens Axboe #endif 411