1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/io_uring.h> 6 7 #include <trace/events/io_uring.h> 8 9 #include <uapi/linux/io_uring.h> 10 11 #include "io_uring.h" 12 #include "refs.h" 13 #include "cancel.h" 14 #include "timeout.h" 15 16 struct io_timeout { 17 struct file *file; 18 u32 off; 19 u32 target_seq; 20 u32 repeats; 21 struct list_head list; 22 /* head of the link, used by linked timeouts only */ 23 struct io_kiocb *head; 24 /* for linked completions */ 25 struct io_kiocb *prev; 26 }; 27 28 struct io_timeout_rem { 29 struct file *file; 30 u64 addr; 31 32 /* timeout update */ 33 struct timespec64 ts; 34 u32 flags; 35 bool ltimeout; 36 }; 37 38 static inline bool io_is_timeout_noseq(struct io_kiocb *req) 39 { 40 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 41 struct io_timeout_data *data = req->async_data; 42 43 return !timeout->off || data->flags & IORING_TIMEOUT_MULTISHOT; 44 } 45 46 static inline void io_put_req(struct io_kiocb *req) 47 { 48 if (req_ref_put_and_test(req)) { 49 io_queue_next(req); 50 io_free_req(req); 51 } 52 } 53 54 static inline bool io_timeout_finish(struct io_timeout *timeout, 55 struct io_timeout_data *data) 56 { 57 if (!(data->flags & IORING_TIMEOUT_MULTISHOT)) 58 return true; 59 60 if (!timeout->off || (timeout->repeats && --timeout->repeats)) 61 return false; 62 63 return true; 64 } 65 66 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer); 67 68 static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts) 69 { 70 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 71 struct io_timeout_data *data = req->async_data; 72 struct io_ring_ctx *ctx = req->ctx; 73 74 if (!io_timeout_finish(timeout, data)) { 75 bool filled; 76 filled = io_aux_cqe(req, ts->locked, -ETIME, IORING_CQE_F_MORE, 77 false); 78 if (filled) { 79 /* re-arm timer */ 80 spin_lock_irq(&ctx->timeout_lock); 81 list_add(&timeout->list, ctx->timeout_list.prev); 82 data->timer.function = io_timeout_fn; 83 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 84 spin_unlock_irq(&ctx->timeout_lock); 85 return; 86 } 87 } 88 89 io_req_task_complete(req, ts); 90 } 91 92 static bool io_kill_timeout(struct io_kiocb *req, int status) 93 __must_hold(&req->ctx->timeout_lock) 94 { 95 struct io_timeout_data *io = req->async_data; 96 97 if (hrtimer_try_to_cancel(&io->timer) != -1) { 98 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 99 100 if (status) 101 req_set_fail(req); 102 atomic_set(&req->ctx->cq_timeouts, 103 atomic_read(&req->ctx->cq_timeouts) + 1); 104 list_del_init(&timeout->list); 105 io_req_queue_tw_complete(req, status); 106 return true; 107 } 108 return false; 109 } 110 111 __cold void io_flush_timeouts(struct io_ring_ctx *ctx) 112 { 113 u32 seq; 114 struct io_timeout *timeout, *tmp; 115 116 spin_lock_irq(&ctx->timeout_lock); 117 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); 118 119 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 120 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 121 u32 events_needed, events_got; 122 123 if (io_is_timeout_noseq(req)) 124 break; 125 126 /* 127 * Since seq can easily wrap around over time, subtract 128 * the last seq at which timeouts were flushed before comparing. 129 * Assuming not more than 2^31-1 events have happened since, 130 * these subtractions won't have wrapped, so we can check if 131 * target is in [last_seq, current_seq] by comparing the two. 132 */ 133 events_needed = timeout->target_seq - ctx->cq_last_tm_flush; 134 events_got = seq - ctx->cq_last_tm_flush; 135 if (events_got < events_needed) 136 break; 137 138 io_kill_timeout(req, 0); 139 } 140 ctx->cq_last_tm_flush = seq; 141 spin_unlock_irq(&ctx->timeout_lock); 142 } 143 144 static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts) 145 { 146 io_tw_lock(link->ctx, ts); 147 while (link) { 148 struct io_kiocb *nxt = link->link; 149 long res = -ECANCELED; 150 151 if (link->flags & REQ_F_FAIL) 152 res = link->cqe.res; 153 link->link = NULL; 154 io_req_set_res(link, res, 0); 155 io_req_task_complete(link, ts); 156 link = nxt; 157 } 158 } 159 160 static void io_fail_links(struct io_kiocb *req) 161 __must_hold(&req->ctx->completion_lock) 162 { 163 struct io_kiocb *link = req->link; 164 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES; 165 166 if (!link) 167 return; 168 169 while (link) { 170 if (ignore_cqes) 171 link->flags |= REQ_F_CQE_SKIP; 172 else 173 link->flags &= ~REQ_F_CQE_SKIP; 174 trace_io_uring_fail_link(req, link); 175 link = link->link; 176 } 177 178 link = req->link; 179 link->io_task_work.func = io_req_tw_fail_links; 180 io_req_task_work_add(link); 181 req->link = NULL; 182 } 183 184 static inline void io_remove_next_linked(struct io_kiocb *req) 185 { 186 struct io_kiocb *nxt = req->link; 187 188 req->link = nxt->link; 189 nxt->link = NULL; 190 } 191 192 void io_disarm_next(struct io_kiocb *req) 193 __must_hold(&req->ctx->completion_lock) 194 { 195 struct io_kiocb *link = NULL; 196 197 if (req->flags & REQ_F_ARM_LTIMEOUT) { 198 link = req->link; 199 req->flags &= ~REQ_F_ARM_LTIMEOUT; 200 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { 201 io_remove_next_linked(req); 202 io_req_queue_tw_complete(link, -ECANCELED); 203 } 204 } else if (req->flags & REQ_F_LINK_TIMEOUT) { 205 struct io_ring_ctx *ctx = req->ctx; 206 207 spin_lock_irq(&ctx->timeout_lock); 208 link = io_disarm_linked_timeout(req); 209 spin_unlock_irq(&ctx->timeout_lock); 210 if (link) 211 io_req_queue_tw_complete(link, -ECANCELED); 212 } 213 if (unlikely((req->flags & REQ_F_FAIL) && 214 !(req->flags & REQ_F_HARDLINK))) 215 io_fail_links(req); 216 } 217 218 struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, 219 struct io_kiocb *link) 220 __must_hold(&req->ctx->completion_lock) 221 __must_hold(&req->ctx->timeout_lock) 222 { 223 struct io_timeout_data *io = link->async_data; 224 struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout); 225 226 io_remove_next_linked(req); 227 timeout->head = NULL; 228 if (hrtimer_try_to_cancel(&io->timer) != -1) { 229 list_del(&timeout->list); 230 return link; 231 } 232 233 return NULL; 234 } 235 236 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) 237 { 238 struct io_timeout_data *data = container_of(timer, 239 struct io_timeout_data, timer); 240 struct io_kiocb *req = data->req; 241 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 242 struct io_ring_ctx *ctx = req->ctx; 243 unsigned long flags; 244 245 spin_lock_irqsave(&ctx->timeout_lock, flags); 246 list_del_init(&timeout->list); 247 atomic_set(&req->ctx->cq_timeouts, 248 atomic_read(&req->ctx->cq_timeouts) + 1); 249 spin_unlock_irqrestore(&ctx->timeout_lock, flags); 250 251 if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS)) 252 req_set_fail(req); 253 254 io_req_set_res(req, -ETIME, 0); 255 req->io_task_work.func = io_timeout_complete; 256 io_req_task_work_add(req); 257 return HRTIMER_NORESTART; 258 } 259 260 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, 261 struct io_cancel_data *cd) 262 __must_hold(&ctx->timeout_lock) 263 { 264 struct io_timeout *timeout; 265 struct io_timeout_data *io; 266 struct io_kiocb *req = NULL; 267 268 list_for_each_entry(timeout, &ctx->timeout_list, list) { 269 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 270 271 if (!(cd->flags & IORING_ASYNC_CANCEL_ANY) && 272 cd->data != tmp->cqe.user_data) 273 continue; 274 if (cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY)) { 275 if (cd->seq == tmp->work.cancel_seq) 276 continue; 277 tmp->work.cancel_seq = cd->seq; 278 } 279 req = tmp; 280 break; 281 } 282 if (!req) 283 return ERR_PTR(-ENOENT); 284 285 io = req->async_data; 286 if (hrtimer_try_to_cancel(&io->timer) == -1) 287 return ERR_PTR(-EALREADY); 288 timeout = io_kiocb_to_cmd(req, struct io_timeout); 289 list_del_init(&timeout->list); 290 return req; 291 } 292 293 int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) 294 __must_hold(&ctx->completion_lock) 295 { 296 struct io_kiocb *req; 297 298 spin_lock_irq(&ctx->timeout_lock); 299 req = io_timeout_extract(ctx, cd); 300 spin_unlock_irq(&ctx->timeout_lock); 301 302 if (IS_ERR(req)) 303 return PTR_ERR(req); 304 io_req_task_queue_fail(req, -ECANCELED); 305 return 0; 306 } 307 308 static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts) 309 { 310 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED; 311 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 312 struct io_kiocb *prev = timeout->prev; 313 int ret = -ENOENT; 314 315 if (prev) { 316 if (!(req->task->flags & PF_EXITING)) { 317 struct io_cancel_data cd = { 318 .ctx = req->ctx, 319 .data = prev->cqe.user_data, 320 }; 321 322 ret = io_try_cancel(req->task->io_uring, &cd, issue_flags); 323 } 324 io_req_set_res(req, ret ?: -ETIME, 0); 325 io_req_task_complete(req, ts); 326 io_put_req(prev); 327 } else { 328 io_req_set_res(req, -ETIME, 0); 329 io_req_task_complete(req, ts); 330 } 331 } 332 333 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) 334 { 335 struct io_timeout_data *data = container_of(timer, 336 struct io_timeout_data, timer); 337 struct io_kiocb *prev, *req = data->req; 338 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 339 struct io_ring_ctx *ctx = req->ctx; 340 unsigned long flags; 341 342 spin_lock_irqsave(&ctx->timeout_lock, flags); 343 prev = timeout->head; 344 timeout->head = NULL; 345 346 /* 347 * We don't expect the list to be empty, that will only happen if we 348 * race with the completion of the linked work. 349 */ 350 if (prev) { 351 io_remove_next_linked(prev); 352 if (!req_ref_inc_not_zero(prev)) 353 prev = NULL; 354 } 355 list_del(&timeout->list); 356 timeout->prev = prev; 357 spin_unlock_irqrestore(&ctx->timeout_lock, flags); 358 359 req->io_task_work.func = io_req_task_link_timeout; 360 io_req_task_work_add(req); 361 return HRTIMER_NORESTART; 362 } 363 364 static clockid_t io_timeout_get_clock(struct io_timeout_data *data) 365 { 366 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) { 367 case IORING_TIMEOUT_BOOTTIME: 368 return CLOCK_BOOTTIME; 369 case IORING_TIMEOUT_REALTIME: 370 return CLOCK_REALTIME; 371 default: 372 /* can't happen, vetted at prep time */ 373 WARN_ON_ONCE(1); 374 fallthrough; 375 case 0: 376 return CLOCK_MONOTONIC; 377 } 378 } 379 380 static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 381 struct timespec64 *ts, enum hrtimer_mode mode) 382 __must_hold(&ctx->timeout_lock) 383 { 384 struct io_timeout_data *io; 385 struct io_timeout *timeout; 386 struct io_kiocb *req = NULL; 387 388 list_for_each_entry(timeout, &ctx->ltimeout_list, list) { 389 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 390 391 if (user_data == tmp->cqe.user_data) { 392 req = tmp; 393 break; 394 } 395 } 396 if (!req) 397 return -ENOENT; 398 399 io = req->async_data; 400 if (hrtimer_try_to_cancel(&io->timer) == -1) 401 return -EALREADY; 402 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode); 403 io->timer.function = io_link_timeout_fn; 404 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode); 405 return 0; 406 } 407 408 static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 409 struct timespec64 *ts, enum hrtimer_mode mode) 410 __must_hold(&ctx->timeout_lock) 411 { 412 struct io_cancel_data cd = { .data = user_data, }; 413 struct io_kiocb *req = io_timeout_extract(ctx, &cd); 414 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 415 struct io_timeout_data *data; 416 417 if (IS_ERR(req)) 418 return PTR_ERR(req); 419 420 timeout->off = 0; /* noseq */ 421 data = req->async_data; 422 list_add_tail(&timeout->list, &ctx->timeout_list); 423 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode); 424 data->timer.function = io_timeout_fn; 425 hrtimer_start(&data->timer, timespec64_to_ktime(*ts), mode); 426 return 0; 427 } 428 429 int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 430 { 431 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 432 433 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 434 return -EINVAL; 435 if (sqe->buf_index || sqe->len || sqe->splice_fd_in) 436 return -EINVAL; 437 438 tr->ltimeout = false; 439 tr->addr = READ_ONCE(sqe->addr); 440 tr->flags = READ_ONCE(sqe->timeout_flags); 441 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) { 442 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 443 return -EINVAL; 444 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE) 445 tr->ltimeout = true; 446 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS)) 447 return -EINVAL; 448 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) 449 return -EFAULT; 450 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0) 451 return -EINVAL; 452 } else if (tr->flags) { 453 /* timeout removal doesn't support flags */ 454 return -EINVAL; 455 } 456 457 return 0; 458 } 459 460 static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags) 461 { 462 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS 463 : HRTIMER_MODE_REL; 464 } 465 466 /* 467 * Remove or update an existing timeout command 468 */ 469 int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) 470 { 471 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 472 struct io_ring_ctx *ctx = req->ctx; 473 int ret; 474 475 if (!(tr->flags & IORING_TIMEOUT_UPDATE)) { 476 struct io_cancel_data cd = { .data = tr->addr, }; 477 478 spin_lock(&ctx->completion_lock); 479 ret = io_timeout_cancel(ctx, &cd); 480 spin_unlock(&ctx->completion_lock); 481 } else { 482 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags); 483 484 spin_lock_irq(&ctx->timeout_lock); 485 if (tr->ltimeout) 486 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode); 487 else 488 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode); 489 spin_unlock_irq(&ctx->timeout_lock); 490 } 491 492 if (ret < 0) 493 req_set_fail(req); 494 io_req_set_res(req, ret, 0); 495 return IOU_OK; 496 } 497 498 static int __io_timeout_prep(struct io_kiocb *req, 499 const struct io_uring_sqe *sqe, 500 bool is_timeout_link) 501 { 502 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 503 struct io_timeout_data *data; 504 unsigned flags; 505 u32 off = READ_ONCE(sqe->off); 506 507 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in) 508 return -EINVAL; 509 if (off && is_timeout_link) 510 return -EINVAL; 511 flags = READ_ONCE(sqe->timeout_flags); 512 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK | 513 IORING_TIMEOUT_ETIME_SUCCESS | 514 IORING_TIMEOUT_MULTISHOT)) 515 return -EINVAL; 516 /* more than one clock specified is invalid, obviously */ 517 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 518 return -EINVAL; 519 /* multishot requests only make sense with rel values */ 520 if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS))) 521 return -EINVAL; 522 523 INIT_LIST_HEAD(&timeout->list); 524 timeout->off = off; 525 if (unlikely(off && !req->ctx->off_timeout_used)) 526 req->ctx->off_timeout_used = true; 527 /* 528 * for multishot reqs w/ fixed nr of repeats, repeats tracks the 529 * remaining nr 530 */ 531 timeout->repeats = 0; 532 if ((flags & IORING_TIMEOUT_MULTISHOT) && off > 0) 533 timeout->repeats = off; 534 535 if (WARN_ON_ONCE(req_has_async_data(req))) 536 return -EFAULT; 537 if (io_alloc_async_data(req)) 538 return -ENOMEM; 539 540 data = req->async_data; 541 data->req = req; 542 data->flags = flags; 543 544 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) 545 return -EFAULT; 546 547 if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0) 548 return -EINVAL; 549 550 INIT_LIST_HEAD(&timeout->list); 551 data->mode = io_translate_timeout_mode(flags); 552 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode); 553 554 if (is_timeout_link) { 555 struct io_submit_link *link = &req->ctx->submit_state.link; 556 557 if (!link->head) 558 return -EINVAL; 559 if (link->last->opcode == IORING_OP_LINK_TIMEOUT) 560 return -EINVAL; 561 timeout->head = link->last; 562 link->last->flags |= REQ_F_ARM_LTIMEOUT; 563 } 564 return 0; 565 } 566 567 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 568 { 569 return __io_timeout_prep(req, sqe, false); 570 } 571 572 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 573 { 574 return __io_timeout_prep(req, sqe, true); 575 } 576 577 int io_timeout(struct io_kiocb *req, unsigned int issue_flags) 578 { 579 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 580 struct io_ring_ctx *ctx = req->ctx; 581 struct io_timeout_data *data = req->async_data; 582 struct list_head *entry; 583 u32 tail, off = timeout->off; 584 585 spin_lock_irq(&ctx->timeout_lock); 586 587 /* 588 * sqe->off holds how many events that need to occur for this 589 * timeout event to be satisfied. If it isn't set, then this is 590 * a pure timeout request, sequence isn't used. 591 */ 592 if (io_is_timeout_noseq(req)) { 593 entry = ctx->timeout_list.prev; 594 goto add; 595 } 596 597 tail = data_race(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts); 598 timeout->target_seq = tail + off; 599 600 /* Update the last seq here in case io_flush_timeouts() hasn't. 601 * This is safe because ->completion_lock is held, and submissions 602 * and completions are never mixed in the same ->completion_lock section. 603 */ 604 ctx->cq_last_tm_flush = tail; 605 606 /* 607 * Insertion sort, ensuring the first entry in the list is always 608 * the one we need first. 609 */ 610 list_for_each_prev(entry, &ctx->timeout_list) { 611 struct io_timeout *nextt = list_entry(entry, struct io_timeout, list); 612 struct io_kiocb *nxt = cmd_to_io_kiocb(nextt); 613 614 if (io_is_timeout_noseq(nxt)) 615 continue; 616 /* nxt.seq is behind @tail, otherwise would've been completed */ 617 if (off >= nextt->target_seq - tail) 618 break; 619 } 620 add: 621 list_add(&timeout->list, entry); 622 data->timer.function = io_timeout_fn; 623 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 624 spin_unlock_irq(&ctx->timeout_lock); 625 return IOU_ISSUE_SKIP_COMPLETE; 626 } 627 628 void io_queue_linked_timeout(struct io_kiocb *req) 629 { 630 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 631 struct io_ring_ctx *ctx = req->ctx; 632 633 spin_lock_irq(&ctx->timeout_lock); 634 /* 635 * If the back reference is NULL, then our linked request finished 636 * before we got a chance to setup the timer 637 */ 638 if (timeout->head) { 639 struct io_timeout_data *data = req->async_data; 640 641 data->timer.function = io_link_timeout_fn; 642 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), 643 data->mode); 644 list_add_tail(&timeout->list, &ctx->ltimeout_list); 645 } 646 spin_unlock_irq(&ctx->timeout_lock); 647 /* drop submission reference */ 648 io_put_req(req); 649 } 650 651 static bool io_match_task(struct io_kiocb *head, struct task_struct *task, 652 bool cancel_all) 653 __must_hold(&req->ctx->timeout_lock) 654 { 655 struct io_kiocb *req; 656 657 if (task && head->task != task) 658 return false; 659 if (cancel_all) 660 return true; 661 662 io_for_each_link(req, head) { 663 if (req->flags & REQ_F_INFLIGHT) 664 return true; 665 } 666 return false; 667 } 668 669 /* Returns true if we found and killed one or more timeouts */ 670 __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, 671 bool cancel_all) 672 { 673 struct io_timeout *timeout, *tmp; 674 int canceled = 0; 675 676 /* 677 * completion_lock is needed for io_match_task(). Take it before 678 * timeout_lockfirst to keep locking ordering. 679 */ 680 spin_lock(&ctx->completion_lock); 681 spin_lock_irq(&ctx->timeout_lock); 682 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 683 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 684 685 if (io_match_task(req, tsk, cancel_all) && 686 io_kill_timeout(req, -ECANCELED)) 687 canceled++; 688 } 689 spin_unlock_irq(&ctx->timeout_lock); 690 spin_unlock(&ctx->completion_lock); 691 return canceled != 0; 692 } 693