1 #ifndef IOU_CORE_H 2 #define IOU_CORE_H 3 4 #include <linux/errno.h> 5 #include <linux/lockdep.h> 6 #include <linux/resume_user_mode.h> 7 #include <linux/kasan.h> 8 #include <linux/io_uring_types.h> 9 #include <uapi/linux/eventpoll.h> 10 #include "io-wq.h" 11 #include "slist.h" 12 #include "filetable.h" 13 14 #ifndef CREATE_TRACE_POINTS 15 #include <trace/events/io_uring.h> 16 #endif 17 18 enum { 19 /* 20 * A hint to not wake right away but delay until there are enough of 21 * tw's queued to match the number of CQEs the task is waiting for. 22 * 23 * Must not be used wirh requests generating more than one CQE. 24 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set. 25 */ 26 IOU_F_TWQ_LAZY_WAKE = 1, 27 }; 28 29 enum { 30 IOU_OK = 0, 31 IOU_ISSUE_SKIP_COMPLETE = -EIOCBQUEUED, 32 33 /* 34 * Requeue the task_work to restart operations on this request. The 35 * actual value isn't important, should just be not an otherwise 36 * valid error code, yet less than -MAX_ERRNO and valid internally. 37 */ 38 IOU_REQUEUE = -3072, 39 40 /* 41 * Intended only when both IO_URING_F_MULTISHOT is passed 42 * to indicate to the poll runner that multishot should be 43 * removed and the result is set on req->cqe.res. 44 */ 45 IOU_STOP_MULTISHOT = -ECANCELED, 46 }; 47 48 bool io_cqe_cache_refill(struct io_ring_ctx *ctx, bool overflow); 49 void io_req_cqe_overflow(struct io_kiocb *req); 50 int io_run_task_work_sig(struct io_ring_ctx *ctx); 51 void io_req_defer_failed(struct io_kiocb *req, s32 res); 52 void io_req_complete_post(struct io_kiocb *req, unsigned issue_flags); 53 bool io_post_aux_cqe(struct io_ring_ctx *ctx, u64 user_data, s32 res, u32 cflags); 54 bool io_fill_cqe_req_aux(struct io_kiocb *req, bool defer, s32 res, u32 cflags); 55 void __io_commit_cqring_flush(struct io_ring_ctx *ctx); 56 57 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages); 58 59 struct file *io_file_get_normal(struct io_kiocb *req, int fd); 60 struct file *io_file_get_fixed(struct io_kiocb *req, int fd, 61 unsigned issue_flags); 62 63 void __io_req_task_work_add(struct io_kiocb *req, unsigned flags); 64 bool io_is_uring_fops(struct file *file); 65 bool io_alloc_async_data(struct io_kiocb *req); 66 void io_req_task_queue(struct io_kiocb *req); 67 void io_queue_iowq(struct io_kiocb *req, struct io_tw_state *ts_dont_use); 68 void io_req_task_complete(struct io_kiocb *req, struct io_tw_state *ts); 69 void io_req_task_queue_fail(struct io_kiocb *req, int ret); 70 void io_req_task_submit(struct io_kiocb *req, struct io_tw_state *ts); 71 void tctx_task_work(struct callback_head *cb); 72 __cold void io_uring_cancel_generic(bool cancel_all, struct io_sq_data *sqd); 73 int io_uring_alloc_task_context(struct task_struct *task, 74 struct io_ring_ctx *ctx); 75 76 int io_ring_add_registered_file(struct io_uring_task *tctx, struct file *file, 77 int start, int end); 78 79 int io_poll_issue(struct io_kiocb *req, struct io_tw_state *ts); 80 int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr); 81 int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin); 82 void __io_submit_flush_completions(struct io_ring_ctx *ctx); 83 int io_req_prep_async(struct io_kiocb *req); 84 85 struct io_wq_work *io_wq_free_work(struct io_wq_work *work); 86 void io_wq_submit_work(struct io_wq_work *work); 87 88 void io_free_req(struct io_kiocb *req); 89 void io_queue_next(struct io_kiocb *req); 90 void io_task_refs_refill(struct io_uring_task *tctx); 91 bool __io_alloc_req_refill(struct io_ring_ctx *ctx); 92 93 bool io_match_task_safe(struct io_kiocb *head, struct task_struct *task, 94 bool cancel_all); 95 96 void *io_mem_alloc(size_t size); 97 void io_mem_free(void *ptr); 98 99 #if defined(CONFIG_PROVE_LOCKING) 100 static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) 101 { 102 lockdep_assert(in_task()); 103 104 if (ctx->flags & IORING_SETUP_IOPOLL) { 105 lockdep_assert_held(&ctx->uring_lock); 106 } else if (!ctx->task_complete) { 107 lockdep_assert_held(&ctx->completion_lock); 108 } else if (ctx->submitter_task) { 109 /* 110 * ->submitter_task may be NULL and we can still post a CQE, 111 * if the ring has been setup with IORING_SETUP_R_DISABLED. 112 * Not from an SQE, as those cannot be submitted, but via 113 * updating tagged resources. 114 */ 115 if (ctx->submitter_task->flags & PF_EXITING) 116 lockdep_assert(current_work()); 117 else 118 lockdep_assert(current == ctx->submitter_task); 119 } 120 } 121 #else 122 static inline void io_lockdep_assert_cq_locked(struct io_ring_ctx *ctx) 123 { 124 } 125 #endif 126 127 static inline void io_req_task_work_add(struct io_kiocb *req) 128 { 129 __io_req_task_work_add(req, 0); 130 } 131 132 #define io_for_each_link(pos, head) \ 133 for (pos = (head); pos; pos = pos->link) 134 135 static inline bool io_get_cqe_overflow(struct io_ring_ctx *ctx, 136 struct io_uring_cqe **ret, 137 bool overflow) 138 { 139 io_lockdep_assert_cq_locked(ctx); 140 141 if (unlikely(ctx->cqe_cached >= ctx->cqe_sentinel)) { 142 if (unlikely(!io_cqe_cache_refill(ctx, overflow))) 143 return false; 144 } 145 *ret = ctx->cqe_cached; 146 ctx->cached_cq_tail++; 147 ctx->cqe_cached++; 148 if (ctx->flags & IORING_SETUP_CQE32) 149 ctx->cqe_cached++; 150 return true; 151 } 152 153 static inline bool io_get_cqe(struct io_ring_ctx *ctx, struct io_uring_cqe **ret) 154 { 155 return io_get_cqe_overflow(ctx, ret, false); 156 } 157 158 static __always_inline bool io_fill_cqe_req(struct io_ring_ctx *ctx, 159 struct io_kiocb *req) 160 { 161 struct io_uring_cqe *cqe; 162 163 /* 164 * If we can't get a cq entry, userspace overflowed the 165 * submission (by quite a lot). Increment the overflow count in 166 * the ring. 167 */ 168 if (unlikely(!io_get_cqe(ctx, &cqe))) 169 return false; 170 171 if (trace_io_uring_complete_enabled()) 172 trace_io_uring_complete(req->ctx, req, req->cqe.user_data, 173 req->cqe.res, req->cqe.flags, 174 req->big_cqe.extra1, req->big_cqe.extra2); 175 176 memcpy(cqe, &req->cqe, sizeof(*cqe)); 177 if (ctx->flags & IORING_SETUP_CQE32) { 178 memcpy(cqe->big_cqe, &req->big_cqe, sizeof(*cqe)); 179 memset(&req->big_cqe, 0, sizeof(req->big_cqe)); 180 } 181 return true; 182 } 183 184 static inline void req_set_fail(struct io_kiocb *req) 185 { 186 req->flags |= REQ_F_FAIL; 187 if (req->flags & REQ_F_CQE_SKIP) { 188 req->flags &= ~REQ_F_CQE_SKIP; 189 req->flags |= REQ_F_SKIP_LINK_CQES; 190 } 191 } 192 193 static inline void io_req_set_res(struct io_kiocb *req, s32 res, u32 cflags) 194 { 195 req->cqe.res = res; 196 req->cqe.flags = cflags; 197 } 198 199 static inline bool req_has_async_data(struct io_kiocb *req) 200 { 201 return req->flags & REQ_F_ASYNC_DATA; 202 } 203 204 static inline void io_put_file(struct io_kiocb *req) 205 { 206 if (!(req->flags & REQ_F_FIXED_FILE) && req->file) 207 fput(req->file); 208 } 209 210 static inline void io_ring_submit_unlock(struct io_ring_ctx *ctx, 211 unsigned issue_flags) 212 { 213 lockdep_assert_held(&ctx->uring_lock); 214 if (issue_flags & IO_URING_F_UNLOCKED) 215 mutex_unlock(&ctx->uring_lock); 216 } 217 218 static inline void io_ring_submit_lock(struct io_ring_ctx *ctx, 219 unsigned issue_flags) 220 { 221 /* 222 * "Normal" inline submissions always hold the uring_lock, since we 223 * grab it from the system call. Same is true for the SQPOLL offload. 224 * The only exception is when we've detached the request and issue it 225 * from an async worker thread, grab the lock for that case. 226 */ 227 if (issue_flags & IO_URING_F_UNLOCKED) 228 mutex_lock(&ctx->uring_lock); 229 lockdep_assert_held(&ctx->uring_lock); 230 } 231 232 static inline void io_commit_cqring(struct io_ring_ctx *ctx) 233 { 234 /* order cqe stores with ring update */ 235 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); 236 } 237 238 static inline void io_poll_wq_wake(struct io_ring_ctx *ctx) 239 { 240 if (wq_has_sleeper(&ctx->poll_wq)) 241 __wake_up(&ctx->poll_wq, TASK_NORMAL, 0, 242 poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); 243 } 244 245 static inline void io_cqring_wake(struct io_ring_ctx *ctx) 246 { 247 /* 248 * Trigger waitqueue handler on all waiters on our waitqueue. This 249 * won't necessarily wake up all the tasks, io_should_wake() will make 250 * that decision. 251 * 252 * Pass in EPOLLIN|EPOLL_URING_WAKE as the poll wakeup key. The latter 253 * set in the mask so that if we recurse back into our own poll 254 * waitqueue handlers, we know we have a dependency between eventfd or 255 * epoll and should terminate multishot poll at that point. 256 */ 257 if (wq_has_sleeper(&ctx->cq_wait)) 258 __wake_up(&ctx->cq_wait, TASK_NORMAL, 0, 259 poll_to_key(EPOLL_URING_WAKE | EPOLLIN)); 260 } 261 262 static inline bool io_sqring_full(struct io_ring_ctx *ctx) 263 { 264 struct io_rings *r = ctx->rings; 265 266 return READ_ONCE(r->sq.tail) - ctx->cached_sq_head == ctx->sq_entries; 267 } 268 269 static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx) 270 { 271 struct io_rings *rings = ctx->rings; 272 unsigned int entries; 273 274 /* make sure SQ entry isn't read before tail */ 275 entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; 276 return min(entries, ctx->sq_entries); 277 } 278 279 static inline int io_run_task_work(void) 280 { 281 /* 282 * Always check-and-clear the task_work notification signal. With how 283 * signaling works for task_work, we can find it set with nothing to 284 * run. We need to clear it for that case, like get_signal() does. 285 */ 286 if (test_thread_flag(TIF_NOTIFY_SIGNAL)) 287 clear_notify_signal(); 288 /* 289 * PF_IO_WORKER never returns to userspace, so check here if we have 290 * notify work that needs processing. 291 */ 292 if (current->flags & PF_IO_WORKER && 293 test_thread_flag(TIF_NOTIFY_RESUME)) { 294 __set_current_state(TASK_RUNNING); 295 resume_user_mode_work(NULL); 296 } 297 if (task_work_pending(current)) { 298 __set_current_state(TASK_RUNNING); 299 task_work_run(); 300 return 1; 301 } 302 303 return 0; 304 } 305 306 static inline bool io_task_work_pending(struct io_ring_ctx *ctx) 307 { 308 return task_work_pending(current) || !wq_list_empty(&ctx->work_llist); 309 } 310 311 static inline void io_tw_lock(struct io_ring_ctx *ctx, struct io_tw_state *ts) 312 { 313 if (!ts->locked) { 314 mutex_lock(&ctx->uring_lock); 315 ts->locked = true; 316 } 317 } 318 319 /* 320 * Don't complete immediately but use deferred completion infrastructure. 321 * Protected by ->uring_lock and can only be used either with 322 * IO_URING_F_COMPLETE_DEFER or inside a tw handler holding the mutex. 323 */ 324 static inline void io_req_complete_defer(struct io_kiocb *req) 325 __must_hold(&req->ctx->uring_lock) 326 { 327 struct io_submit_state *state = &req->ctx->submit_state; 328 329 lockdep_assert_held(&req->ctx->uring_lock); 330 331 wq_list_add_tail(&req->comp_list, &state->compl_reqs); 332 } 333 334 static inline void io_commit_cqring_flush(struct io_ring_ctx *ctx) 335 { 336 if (unlikely(ctx->off_timeout_used || ctx->drain_active || 337 ctx->has_evfd || ctx->poll_activated)) 338 __io_commit_cqring_flush(ctx); 339 } 340 341 static inline void io_get_task_refs(int nr) 342 { 343 struct io_uring_task *tctx = current->io_uring; 344 345 tctx->cached_refs -= nr; 346 if (unlikely(tctx->cached_refs < 0)) 347 io_task_refs_refill(tctx); 348 } 349 350 static inline bool io_req_cache_empty(struct io_ring_ctx *ctx) 351 { 352 return !ctx->submit_state.free_list.next; 353 } 354 355 extern struct kmem_cache *req_cachep; 356 357 static inline struct io_kiocb *io_extract_req(struct io_ring_ctx *ctx) 358 { 359 struct io_kiocb *req; 360 361 req = container_of(ctx->submit_state.free_list.next, struct io_kiocb, comp_list); 362 wq_stack_extract(&ctx->submit_state.free_list); 363 return req; 364 } 365 366 static inline bool io_alloc_req(struct io_ring_ctx *ctx, struct io_kiocb **req) 367 { 368 if (unlikely(io_req_cache_empty(ctx))) { 369 if (!__io_alloc_req_refill(ctx)) 370 return false; 371 } 372 *req = io_extract_req(ctx); 373 return true; 374 } 375 376 static inline bool io_allowed_defer_tw_run(struct io_ring_ctx *ctx) 377 { 378 return likely(ctx->submitter_task == current); 379 } 380 381 static inline bool io_allowed_run_tw(struct io_ring_ctx *ctx) 382 { 383 return likely(!(ctx->flags & IORING_SETUP_DEFER_TASKRUN) || 384 ctx->submitter_task == current); 385 } 386 387 static inline void io_req_queue_tw_complete(struct io_kiocb *req, s32 res) 388 { 389 io_req_set_res(req, res, 0); 390 req->io_task_work.func = io_req_task_complete; 391 io_req_task_work_add(req); 392 } 393 394 /* 395 * IORING_SETUP_SQE128 contexts allocate twice the normal SQE size for each 396 * slot. 397 */ 398 static inline size_t uring_sqe_size(struct io_ring_ctx *ctx) 399 { 400 if (ctx->flags & IORING_SETUP_SQE128) 401 return 2 * sizeof(struct io_uring_sqe); 402 return sizeof(struct io_uring_sqe); 403 } 404 #endif 405