1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/io_uring.h> 6 7 #include <trace/events/io_uring.h> 8 9 #include <uapi/linux/io_uring.h> 10 11 #include "io_uring.h" 12 #include "refs.h" 13 #include "cancel.h" 14 #include "timeout.h" 15 16 struct io_timeout { 17 struct file *file; 18 u32 off; 19 u32 target_seq; 20 struct list_head list; 21 /* head of the link, used by linked timeouts only */ 22 struct io_kiocb *head; 23 /* for linked completions */ 24 struct io_kiocb *prev; 25 }; 26 27 struct io_timeout_rem { 28 struct file *file; 29 u64 addr; 30 31 /* timeout update */ 32 struct timespec64 ts; 33 u32 flags; 34 bool ltimeout; 35 }; 36 37 static inline bool io_is_timeout_noseq(struct io_kiocb *req) 38 { 39 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 40 41 return !timeout->off; 42 } 43 44 static inline void io_put_req(struct io_kiocb *req) 45 { 46 if (req_ref_put_and_test(req)) { 47 io_queue_next(req); 48 io_free_req(req); 49 } 50 } 51 52 static bool io_kill_timeout(struct io_kiocb *req, int status) 53 __must_hold(&req->ctx->timeout_lock) 54 { 55 struct io_timeout_data *io = req->async_data; 56 57 if (hrtimer_try_to_cancel(&io->timer) != -1) { 58 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 59 60 if (status) 61 req_set_fail(req); 62 atomic_set(&req->ctx->cq_timeouts, 63 atomic_read(&req->ctx->cq_timeouts) + 1); 64 list_del_init(&timeout->list); 65 io_req_queue_tw_complete(req, status); 66 return true; 67 } 68 return false; 69 } 70 71 __cold void io_flush_timeouts(struct io_ring_ctx *ctx) 72 { 73 u32 seq; 74 struct io_timeout *timeout, *tmp; 75 76 spin_lock_irq(&ctx->timeout_lock); 77 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); 78 79 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 80 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 81 u32 events_needed, events_got; 82 83 if (io_is_timeout_noseq(req)) 84 break; 85 86 /* 87 * Since seq can easily wrap around over time, subtract 88 * the last seq at which timeouts were flushed before comparing. 89 * Assuming not more than 2^31-1 events have happened since, 90 * these subtractions won't have wrapped, so we can check if 91 * target is in [last_seq, current_seq] by comparing the two. 92 */ 93 events_needed = timeout->target_seq - ctx->cq_last_tm_flush; 94 events_got = seq - ctx->cq_last_tm_flush; 95 if (events_got < events_needed) 96 break; 97 98 io_kill_timeout(req, 0); 99 } 100 ctx->cq_last_tm_flush = seq; 101 spin_unlock_irq(&ctx->timeout_lock); 102 } 103 104 static void io_req_tw_fail_links(struct io_kiocb *link, bool *locked) 105 { 106 io_tw_lock(link->ctx, locked); 107 while (link) { 108 struct io_kiocb *nxt = link->link; 109 long res = -ECANCELED; 110 111 if (link->flags & REQ_F_FAIL) 112 res = link->cqe.res; 113 link->link = NULL; 114 io_req_set_res(link, res, 0); 115 io_req_task_complete(link, locked); 116 link = nxt; 117 } 118 } 119 120 static void io_fail_links(struct io_kiocb *req) 121 __must_hold(&req->ctx->completion_lock) 122 { 123 struct io_kiocb *link = req->link; 124 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES; 125 126 if (!link) 127 return; 128 129 while (link) { 130 if (ignore_cqes) 131 link->flags |= REQ_F_CQE_SKIP; 132 else 133 link->flags &= ~REQ_F_CQE_SKIP; 134 trace_io_uring_fail_link(req, link); 135 link = link->link; 136 } 137 138 link = req->link; 139 link->io_task_work.func = io_req_tw_fail_links; 140 io_req_task_work_add(link); 141 req->link = NULL; 142 } 143 144 static inline void io_remove_next_linked(struct io_kiocb *req) 145 { 146 struct io_kiocb *nxt = req->link; 147 148 req->link = nxt->link; 149 nxt->link = NULL; 150 } 151 152 void io_disarm_next(struct io_kiocb *req) 153 __must_hold(&req->ctx->completion_lock) 154 { 155 struct io_kiocb *link = NULL; 156 157 if (req->flags & REQ_F_ARM_LTIMEOUT) { 158 link = req->link; 159 req->flags &= ~REQ_F_ARM_LTIMEOUT; 160 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { 161 io_remove_next_linked(req); 162 io_req_queue_tw_complete(link, -ECANCELED); 163 } 164 } else if (req->flags & REQ_F_LINK_TIMEOUT) { 165 struct io_ring_ctx *ctx = req->ctx; 166 167 spin_lock_irq(&ctx->timeout_lock); 168 link = io_disarm_linked_timeout(req); 169 spin_unlock_irq(&ctx->timeout_lock); 170 if (link) 171 io_req_queue_tw_complete(link, -ECANCELED); 172 } 173 if (unlikely((req->flags & REQ_F_FAIL) && 174 !(req->flags & REQ_F_HARDLINK))) 175 io_fail_links(req); 176 } 177 178 struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, 179 struct io_kiocb *link) 180 __must_hold(&req->ctx->completion_lock) 181 __must_hold(&req->ctx->timeout_lock) 182 { 183 struct io_timeout_data *io = link->async_data; 184 struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout); 185 186 io_remove_next_linked(req); 187 timeout->head = NULL; 188 if (hrtimer_try_to_cancel(&io->timer) != -1) { 189 list_del(&timeout->list); 190 return link; 191 } 192 193 return NULL; 194 } 195 196 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) 197 { 198 struct io_timeout_data *data = container_of(timer, 199 struct io_timeout_data, timer); 200 struct io_kiocb *req = data->req; 201 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 202 struct io_ring_ctx *ctx = req->ctx; 203 unsigned long flags; 204 205 spin_lock_irqsave(&ctx->timeout_lock, flags); 206 list_del_init(&timeout->list); 207 atomic_set(&req->ctx->cq_timeouts, 208 atomic_read(&req->ctx->cq_timeouts) + 1); 209 spin_unlock_irqrestore(&ctx->timeout_lock, flags); 210 211 if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS)) 212 req_set_fail(req); 213 214 io_req_set_res(req, -ETIME, 0); 215 req->io_task_work.func = io_req_task_complete; 216 io_req_task_work_add(req); 217 return HRTIMER_NORESTART; 218 } 219 220 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, 221 struct io_cancel_data *cd) 222 __must_hold(&ctx->timeout_lock) 223 { 224 struct io_timeout *timeout; 225 struct io_timeout_data *io; 226 struct io_kiocb *req = NULL; 227 228 list_for_each_entry(timeout, &ctx->timeout_list, list) { 229 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 230 231 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) && 232 cd->data != tmp->cqe.user_data) 233 continue; 234 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) { 235 if (cd->seq == tmp->work.cancel_seq) 236 continue; 237 tmp->work.cancel_seq = cd->seq; 238 } 239 req = tmp; 240 break; 241 } 242 if (!req) 243 return ERR_PTR(-ENOENT); 244 245 io = req->async_data; 246 if (hrtimer_try_to_cancel(&io->timer) == -1) 247 return ERR_PTR(-EALREADY); 248 timeout = io_kiocb_to_cmd(req, struct io_timeout); 249 list_del_init(&timeout->list); 250 return req; 251 } 252 253 int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) 254 __must_hold(&ctx->completion_lock) 255 { 256 struct io_kiocb *req; 257 258 spin_lock_irq(&ctx->timeout_lock); 259 req = io_timeout_extract(ctx, cd); 260 spin_unlock_irq(&ctx->timeout_lock); 261 262 if (IS_ERR(req)) 263 return PTR_ERR(req); 264 io_req_task_queue_fail(req, -ECANCELED); 265 return 0; 266 } 267 268 static void io_req_task_link_timeout(struct io_kiocb *req, bool *locked) 269 { 270 unsigned issue_flags = *locked ? 0 : IO_URING_F_UNLOCKED; 271 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 272 struct io_kiocb *prev = timeout->prev; 273 int ret = -ENOENT; 274 275 if (prev) { 276 if (!(req->task->flags & PF_EXITING)) { 277 struct io_cancel_data cd = { 278 .ctx = req->ctx, 279 .data = prev->cqe.user_data, 280 }; 281 282 ret = io_try_cancel(req->task->io_uring, &cd, issue_flags); 283 } 284 io_req_set_res(req, ret ?: -ETIME, 0); 285 io_req_task_complete(req, locked); 286 io_put_req(prev); 287 } else { 288 io_req_set_res(req, -ETIME, 0); 289 io_req_task_complete(req, locked); 290 } 291 } 292 293 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) 294 { 295 struct io_timeout_data *data = container_of(timer, 296 struct io_timeout_data, timer); 297 struct io_kiocb *prev, *req = data->req; 298 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 299 struct io_ring_ctx *ctx = req->ctx; 300 unsigned long flags; 301 302 spin_lock_irqsave(&ctx->timeout_lock, flags); 303 prev = timeout->head; 304 timeout->head = NULL; 305 306 /* 307 * We don't expect the list to be empty, that will only happen if we 308 * race with the completion of the linked work. 309 */ 310 if (prev) { 311 io_remove_next_linked(prev); 312 if (!req_ref_inc_not_zero(prev)) 313 prev = NULL; 314 } 315 list_del(&timeout->list); 316 timeout->prev = prev; 317 spin_unlock_irqrestore(&ctx->timeout_lock, flags); 318 319 req->io_task_work.func = io_req_task_link_timeout; 320 io_req_task_work_add(req); 321 return HRTIMER_NORESTART; 322 } 323 324 static clockid_t io_timeout_get_clock(struct io_timeout_data *data) 325 { 326 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) { 327 case IORING_TIMEOUT_BOOTTIME: 328 return CLOCK_BOOTTIME; 329 case IORING_TIMEOUT_REALTIME: 330 return CLOCK_REALTIME; 331 default: 332 /* can't happen, vetted at prep time */ 333 WARN_ON_ONCE(1); 334 fallthrough; 335 case 0: 336 return CLOCK_MONOTONIC; 337 } 338 } 339 340 static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 341 struct timespec64 *ts, enum hrtimer_mode mode) 342 __must_hold(&ctx->timeout_lock) 343 { 344 struct io_timeout_data *io; 345 struct io_timeout *timeout; 346 struct io_kiocb *req = NULL; 347 348 list_for_each_entry(timeout, &ctx->ltimeout_list, list) { 349 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 350 351 if (user_data == tmp->cqe.user_data) { 352 req = tmp; 353 break; 354 } 355 } 356 if (!req) 357 return -ENOENT; 358 359 io = req->async_data; 360 if (hrtimer_try_to_cancel(&io->timer) == -1) 361 return -EALREADY; 362 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode); 363 io->timer.function = io_link_timeout_fn; 364 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode); 365 return 0; 366 } 367 368 static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 369 struct timespec64 *ts, enum hrtimer_mode mode) 370 __must_hold(&ctx->timeout_lock) 371 { 372 struct io_cancel_data cd = { .data = user_data, }; 373 struct io_kiocb *req = io_timeout_extract(ctx, &cd); 374 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 375 struct io_timeout_data *data; 376 377 if (IS_ERR(req)) 378 return PTR_ERR(req); 379 380 timeout->off = 0; /* noseq */ 381 data = req->async_data; 382 list_add_tail(&timeout->list, &ctx->timeout_list); 383 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode); 384 data->timer.function = io_timeout_fn; 385 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode); 386 return 0; 387 } 388 389 int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 390 { 391 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 392 393 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 394 return -EINVAL; 395 if (sqe->buf_index || sqe->len || sqe->splice_fd_in) 396 return -EINVAL; 397 398 tr->ltimeout = false; 399 tr->addr = READ_ONCE(sqe->addr); 400 tr->flags = READ_ONCE(sqe->timeout_flags); 401 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) { 402 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 403 return -EINVAL; 404 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE) 405 tr->ltimeout = true; 406 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS)) 407 return -EINVAL; 408 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) 409 return -EFAULT; 410 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0) 411 return -EINVAL; 412 } else if (tr->flags) { 413 /* timeout removal doesn't support flags */ 414 return -EINVAL; 415 } 416 417 return 0; 418 } 419 420 static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags) 421 { 422 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS 423 : HRTIMER_MODE_REL; 424 } 425 426 /* 427 * Remove or update an existing timeout command 428 */ 429 int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) 430 { 431 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 432 struct io_ring_ctx *ctx = req->ctx; 433 int ret; 434 435 if (!(tr->flags & IORING_TIMEOUT_UPDATE)) { 436 struct io_cancel_data cd = { .data = tr->addr, }; 437 438 spin_lock(&ctx->completion_lock); 439 ret = io_timeout_cancel(ctx, &cd); 440 spin_unlock(&ctx->completion_lock); 441 } else { 442 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags); 443 444 spin_lock_irq(&ctx->timeout_lock); 445 if (tr->ltimeout) 446 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode); 447 else 448 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode); 449 spin_unlock_irq(&ctx->timeout_lock); 450 } 451 452 if (ret < 0) 453 req_set_fail(req); 454 io_req_set_res(req, ret, 0); 455 return IOU_OK; 456 } 457 458 static int __io_timeout_prep(struct io_kiocb *req, 459 const struct io_uring_sqe *sqe, 460 bool is_timeout_link) 461 { 462 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 463 struct io_timeout_data *data; 464 unsigned flags; 465 u32 off = READ_ONCE(sqe->off); 466 467 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in) 468 return -EINVAL; 469 if (off && is_timeout_link) 470 return -EINVAL; 471 flags = READ_ONCE(sqe->timeout_flags); 472 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK | 473 IORING_TIMEOUT_ETIME_SUCCESS)) 474 return -EINVAL; 475 /* more than one clock specified is invalid, obviously */ 476 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 477 return -EINVAL; 478 479 INIT_LIST_HEAD(&timeout->list); 480 timeout->off = off; 481 if (unlikely(off && !req->ctx->off_timeout_used)) 482 req->ctx->off_timeout_used = true; 483 484 if (WARN_ON_ONCE(req_has_async_data(req))) 485 return -EFAULT; 486 if (io_alloc_async_data(req)) 487 return -ENOMEM; 488 489 data = req->async_data; 490 data->req = req; 491 data->flags = flags; 492 493 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) 494 return -EFAULT; 495 496 if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0) 497 return -EINVAL; 498 499 INIT_LIST_HEAD(&timeout->list); 500 data->mode = io_translate_timeout_mode(flags); 501 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode); 502 503 if (is_timeout_link) { 504 struct io_submit_link *link = &req->ctx->submit_state.link; 505 506 if (!link->head) 507 return -EINVAL; 508 if (link->last->opcode == IORING_OP_LINK_TIMEOUT) 509 return -EINVAL; 510 timeout->head = link->last; 511 link->last->flags |= REQ_F_ARM_LTIMEOUT; 512 } 513 return 0; 514 } 515 516 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 517 { 518 return __io_timeout_prep(req, sqe, false); 519 } 520 521 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 522 { 523 return __io_timeout_prep(req, sqe, true); 524 } 525 526 int io_timeout(struct io_kiocb *req, unsigned int issue_flags) 527 { 528 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 529 struct io_ring_ctx *ctx = req->ctx; 530 struct io_timeout_data *data = req->async_data; 531 struct list_head *entry; 532 u32 tail, off = timeout->off; 533 534 spin_lock_irq(&ctx->timeout_lock); 535 536 /* 537 * sqe->off holds how many events that need to occur for this 538 * timeout event to be satisfied. If it isn't set, then this is 539 * a pure timeout request, sequence isn't used. 540 */ 541 if (io_is_timeout_noseq(req)) { 542 entry = ctx->timeout_list.prev; 543 goto add; 544 } 545 546 tail = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); 547 timeout->target_seq = tail + off; 548 549 /* Update the last seq here in case io_flush_timeouts() hasn't. 550 * This is safe because ->completion_lock is held, and submissions 551 * and completions are never mixed in the same ->completion_lock section. 552 */ 553 ctx->cq_last_tm_flush = tail; 554 555 /* 556 * Insertion sort, ensuring the first entry in the list is always 557 * the one we need first. 558 */ 559 list_for_each_prev(entry, &ctx->timeout_list) { 560 struct io_timeout *nextt = list_entry(entry, struct io_timeout, list); 561 struct io_kiocb *nxt = cmd_to_io_kiocb(nextt); 562 563 if (io_is_timeout_noseq(nxt)) 564 continue; 565 /* nxt.seq is behind @tail, otherwise would've been completed */ 566 if (off >= nextt->target_seq - tail) 567 break; 568 } 569 add: 570 list_add(&timeout->list, entry); 571 data->timer.function = io_timeout_fn; 572 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 573 spin_unlock_irq(&ctx->timeout_lock); 574 return IOU_ISSUE_SKIP_COMPLETE; 575 } 576 577 void io_queue_linked_timeout(struct io_kiocb *req) 578 { 579 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 580 struct io_ring_ctx *ctx = req->ctx; 581 582 spin_lock_irq(&ctx->timeout_lock); 583 /* 584 * If the back reference is NULL, then our linked request finished 585 * before we got a chance to setup the timer 586 */ 587 if (timeout->head) { 588 struct io_timeout_data *data = req->async_data; 589 590 data->timer.function = io_link_timeout_fn; 591 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), 592 data->mode); 593 list_add_tail(&timeout->list, &ctx->ltimeout_list); 594 } 595 spin_unlock_irq(&ctx->timeout_lock); 596 /* drop submission reference */ 597 io_put_req(req); 598 } 599 600 static bool io_match_task(struct io_kiocb *head, struct task_struct *task, 601 bool cancel_all) 602 __must_hold(&req->ctx->timeout_lock) 603 { 604 struct io_kiocb *req; 605 606 if (task && head->task != task) 607 return false; 608 if (cancel_all) 609 return true; 610 611 io_for_each_link(req, head) { 612 if (req->flags & REQ_F_INFLIGHT) 613 return true; 614 } 615 return false; 616 } 617 618 /* Returns true if we found and killed one or more timeouts */ 619 __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, 620 bool cancel_all) 621 { 622 struct io_timeout *timeout, *tmp; 623 int canceled = 0; 624 625 /* 626 * completion_lock is needed for io_match_task(). Take it before 627 * timeout_lockfirst to keep locking ordering. 628 */ 629 spin_lock(&ctx->completion_lock); 630 spin_lock_irq(&ctx->timeout_lock); 631 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 632 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 633 634 if (io_match_task(req, tsk, cancel_all) && 635 io_kill_timeout(req, -ECANCELED)) 636 canceled++; 637 } 638 spin_unlock_irq(&ctx->timeout_lock); 639 spin_unlock(&ctx->completion_lock); 640 return canceled != 0; 641 } 642