1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/file.h> 5 #include <linux/io_uring.h> 6 7 #include <trace/events/io_uring.h> 8 9 #include <uapi/linux/io_uring.h> 10 11 #include "io_uring.h" 12 #include "refs.h" 13 #include "cancel.h" 14 #include "timeout.h" 15 16 struct io_timeout { 17 struct file *file; 18 u32 off; 19 u32 target_seq; 20 u32 repeats; 21 struct list_head list; 22 /* head of the link, used by linked timeouts only */ 23 struct io_kiocb *head; 24 /* for linked completions */ 25 struct io_kiocb *prev; 26 }; 27 28 struct io_timeout_rem { 29 struct file *file; 30 u64 addr; 31 32 /* timeout update */ 33 struct timespec64 ts; 34 u32 flags; 35 bool ltimeout; 36 }; 37 38 static inline bool io_is_timeout_noseq(struct io_kiocb *req) 39 { 40 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 41 struct io_timeout_data *data = req->async_data; 42 43 return !timeout->off || data->flags & IORING_TIMEOUT_MULTISHOT; 44 } 45 46 static inline void io_put_req(struct io_kiocb *req) 47 { 48 if (req_ref_put_and_test(req)) { 49 io_queue_next(req); 50 io_free_req(req); 51 } 52 } 53 54 static inline bool io_timeout_finish(struct io_timeout *timeout, 55 struct io_timeout_data *data) 56 { 57 if (!(data->flags & IORING_TIMEOUT_MULTISHOT)) 58 return true; 59 60 if (!timeout->off || (timeout->repeats && --timeout->repeats)) 61 return false; 62 63 return true; 64 } 65 66 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer); 67 68 static void io_timeout_complete(struct io_kiocb *req, struct io_tw_state *ts) 69 { 70 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 71 struct io_timeout_data *data = req->async_data; 72 struct io_ring_ctx *ctx = req->ctx; 73 74 if (!io_timeout_finish(timeout, data)) { 75 bool filled; 76 filled = io_fill_cqe_req_aux(req, ts->locked, -ETIME, 77 IORING_CQE_F_MORE); 78 if (filled) { 79 /* re-arm timer */ 80 spin_lock_irq(&ctx->timeout_lock); 81 list_add(&timeout->list, ctx->timeout_list.prev); 82 data->timer.function = io_timeout_fn; 83 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 84 spin_unlock_irq(&ctx->timeout_lock); 85 return; 86 } 87 } 88 89 io_req_task_complete(req, ts); 90 } 91 92 static bool io_kill_timeout(struct io_kiocb *req, int status) 93 __must_hold(&req->ctx->timeout_lock) 94 { 95 struct io_timeout_data *io = req->async_data; 96 97 if (hrtimer_try_to_cancel(&io->timer) != -1) { 98 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 99 100 if (status) 101 req_set_fail(req); 102 atomic_set(&req->ctx->cq_timeouts, 103 atomic_read(&req->ctx->cq_timeouts) + 1); 104 list_del_init(&timeout->list); 105 io_req_queue_tw_complete(req, status); 106 return true; 107 } 108 return false; 109 } 110 111 __cold void io_flush_timeouts(struct io_ring_ctx *ctx) 112 { 113 u32 seq; 114 struct io_timeout *timeout, *tmp; 115 116 spin_lock_irq(&ctx->timeout_lock); 117 seq = ctx->cached_cq_tail - atomic_read(&ctx->cq_timeouts); 118 119 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 120 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 121 u32 events_needed, events_got; 122 123 if (io_is_timeout_noseq(req)) 124 break; 125 126 /* 127 * Since seq can easily wrap around over time, subtract 128 * the last seq at which timeouts were flushed before comparing. 129 * Assuming not more than 2^31-1 events have happened since, 130 * these subtractions won't have wrapped, so we can check if 131 * target is in [last_seq, current_seq] by comparing the two. 132 */ 133 events_needed = timeout->target_seq - ctx->cq_last_tm_flush; 134 events_got = seq - ctx->cq_last_tm_flush; 135 if (events_got < events_needed) 136 break; 137 138 io_kill_timeout(req, 0); 139 } 140 ctx->cq_last_tm_flush = seq; 141 spin_unlock_irq(&ctx->timeout_lock); 142 } 143 144 static void io_req_tw_fail_links(struct io_kiocb *link, struct io_tw_state *ts) 145 { 146 io_tw_lock(link->ctx, ts); 147 while (link) { 148 struct io_kiocb *nxt = link->link; 149 long res = -ECANCELED; 150 151 if (link->flags & REQ_F_FAIL) 152 res = link->cqe.res; 153 link->link = NULL; 154 io_req_set_res(link, res, 0); 155 io_req_task_complete(link, ts); 156 link = nxt; 157 } 158 } 159 160 static void io_fail_links(struct io_kiocb *req) 161 __must_hold(&req->ctx->completion_lock) 162 { 163 struct io_kiocb *link = req->link; 164 bool ignore_cqes = req->flags & REQ_F_SKIP_LINK_CQES; 165 166 if (!link) 167 return; 168 169 while (link) { 170 if (ignore_cqes) 171 link->flags |= REQ_F_CQE_SKIP; 172 else 173 link->flags &= ~REQ_F_CQE_SKIP; 174 trace_io_uring_fail_link(req, link); 175 link = link->link; 176 } 177 178 link = req->link; 179 link->io_task_work.func = io_req_tw_fail_links; 180 io_req_task_work_add(link); 181 req->link = NULL; 182 } 183 184 static inline void io_remove_next_linked(struct io_kiocb *req) 185 { 186 struct io_kiocb *nxt = req->link; 187 188 req->link = nxt->link; 189 nxt->link = NULL; 190 } 191 192 void io_disarm_next(struct io_kiocb *req) 193 __must_hold(&req->ctx->completion_lock) 194 { 195 struct io_kiocb *link = NULL; 196 197 if (req->flags & REQ_F_ARM_LTIMEOUT) { 198 link = req->link; 199 req->flags &= ~REQ_F_ARM_LTIMEOUT; 200 if (link && link->opcode == IORING_OP_LINK_TIMEOUT) { 201 io_remove_next_linked(req); 202 io_req_queue_tw_complete(link, -ECANCELED); 203 } 204 } else if (req->flags & REQ_F_LINK_TIMEOUT) { 205 struct io_ring_ctx *ctx = req->ctx; 206 207 spin_lock_irq(&ctx->timeout_lock); 208 link = io_disarm_linked_timeout(req); 209 spin_unlock_irq(&ctx->timeout_lock); 210 if (link) 211 io_req_queue_tw_complete(link, -ECANCELED); 212 } 213 if (unlikely((req->flags & REQ_F_FAIL) && 214 !(req->flags & REQ_F_HARDLINK))) 215 io_fail_links(req); 216 } 217 218 struct io_kiocb *__io_disarm_linked_timeout(struct io_kiocb *req, 219 struct io_kiocb *link) 220 __must_hold(&req->ctx->completion_lock) 221 __must_hold(&req->ctx->timeout_lock) 222 { 223 struct io_timeout_data *io = link->async_data; 224 struct io_timeout *timeout = io_kiocb_to_cmd(link, struct io_timeout); 225 226 io_remove_next_linked(req); 227 timeout->head = NULL; 228 if (hrtimer_try_to_cancel(&io->timer) != -1) { 229 list_del(&timeout->list); 230 return link; 231 } 232 233 return NULL; 234 } 235 236 static enum hrtimer_restart io_timeout_fn(struct hrtimer *timer) 237 { 238 struct io_timeout_data *data = container_of(timer, 239 struct io_timeout_data, timer); 240 struct io_kiocb *req = data->req; 241 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 242 struct io_ring_ctx *ctx = req->ctx; 243 unsigned long flags; 244 245 spin_lock_irqsave(&ctx->timeout_lock, flags); 246 list_del_init(&timeout->list); 247 atomic_set(&req->ctx->cq_timeouts, 248 atomic_read(&req->ctx->cq_timeouts) + 1); 249 spin_unlock_irqrestore(&ctx->timeout_lock, flags); 250 251 if (!(data->flags & IORING_TIMEOUT_ETIME_SUCCESS)) 252 req_set_fail(req); 253 254 io_req_set_res(req, -ETIME, 0); 255 req->io_task_work.func = io_timeout_complete; 256 io_req_task_work_add(req); 257 return HRTIMER_NORESTART; 258 } 259 260 static struct io_kiocb *io_timeout_extract(struct io_ring_ctx *ctx, 261 struct io_cancel_data *cd) 262 __must_hold(&ctx->timeout_lock) 263 { 264 struct io_timeout *timeout; 265 struct io_timeout_data *io; 266 struct io_kiocb *req = NULL; 267 268 list_for_each_entry(timeout, &ctx->timeout_list, list) { 269 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 270 271 if (io_cancel_req_match(tmp, cd)) { 272 req = tmp; 273 break; 274 } 275 } 276 if (!req) 277 return ERR_PTR(-ENOENT); 278 279 io = req->async_data; 280 if (hrtimer_try_to_cancel(&io->timer) == -1) 281 return ERR_PTR(-EALREADY); 282 timeout = io_kiocb_to_cmd(req, struct io_timeout); 283 list_del_init(&timeout->list); 284 return req; 285 } 286 287 int io_timeout_cancel(struct io_ring_ctx *ctx, struct io_cancel_data *cd) 288 __must_hold(&ctx->completion_lock) 289 { 290 struct io_kiocb *req; 291 292 spin_lock_irq(&ctx->timeout_lock); 293 req = io_timeout_extract(ctx, cd); 294 spin_unlock_irq(&ctx->timeout_lock); 295 296 if (IS_ERR(req)) 297 return PTR_ERR(req); 298 io_req_task_queue_fail(req, -ECANCELED); 299 return 0; 300 } 301 302 static void io_req_task_link_timeout(struct io_kiocb *req, struct io_tw_state *ts) 303 { 304 unsigned issue_flags = ts->locked ? 0 : IO_URING_F_UNLOCKED; 305 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 306 struct io_kiocb *prev = timeout->prev; 307 int ret = -ENOENT; 308 309 if (prev) { 310 if (!(req->task->flags & PF_EXITING)) { 311 struct io_cancel_data cd = { 312 .ctx = req->ctx, 313 .data = prev->cqe.user_data, 314 }; 315 316 ret = io_try_cancel(req->task->io_uring, &cd, issue_flags); 317 } 318 io_req_set_res(req, ret ?: -ETIME, 0); 319 io_req_task_complete(req, ts); 320 io_put_req(prev); 321 } else { 322 io_req_set_res(req, -ETIME, 0); 323 io_req_task_complete(req, ts); 324 } 325 } 326 327 static enum hrtimer_restart io_link_timeout_fn(struct hrtimer *timer) 328 { 329 struct io_timeout_data *data = container_of(timer, 330 struct io_timeout_data, timer); 331 struct io_kiocb *prev, *req = data->req; 332 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 333 struct io_ring_ctx *ctx = req->ctx; 334 unsigned long flags; 335 336 spin_lock_irqsave(&ctx->timeout_lock, flags); 337 prev = timeout->head; 338 timeout->head = NULL; 339 340 /* 341 * We don't expect the list to be empty, that will only happen if we 342 * race with the completion of the linked work. 343 */ 344 if (prev) { 345 io_remove_next_linked(prev); 346 if (!req_ref_inc_not_zero(prev)) 347 prev = NULL; 348 } 349 list_del(&timeout->list); 350 timeout->prev = prev; 351 spin_unlock_irqrestore(&ctx->timeout_lock, flags); 352 353 req->io_task_work.func = io_req_task_link_timeout; 354 io_req_task_work_add(req); 355 return HRTIMER_NORESTART; 356 } 357 358 static clockid_t io_timeout_get_clock(struct io_timeout_data *data) 359 { 360 switch (data->flags & IORING_TIMEOUT_CLOCK_MASK) { 361 case IORING_TIMEOUT_BOOTTIME: 362 return CLOCK_BOOTTIME; 363 case IORING_TIMEOUT_REALTIME: 364 return CLOCK_REALTIME; 365 default: 366 /* can't happen, vetted at prep time */ 367 WARN_ON_ONCE(1); 368 fallthrough; 369 case 0: 370 return CLOCK_MONOTONIC; 371 } 372 } 373 374 static int io_linked_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 375 struct timespec64 *ts, enum hrtimer_mode mode) 376 __must_hold(&ctx->timeout_lock) 377 { 378 struct io_timeout_data *io; 379 struct io_timeout *timeout; 380 struct io_kiocb *req = NULL; 381 382 list_for_each_entry(timeout, &ctx->ltimeout_list, list) { 383 struct io_kiocb *tmp = cmd_to_io_kiocb(timeout); 384 385 if (user_data == tmp->cqe.user_data) { 386 req = tmp; 387 break; 388 } 389 } 390 if (!req) 391 return -ENOENT; 392 393 io = req->async_data; 394 if (hrtimer_try_to_cancel(&io->timer) == -1) 395 return -EALREADY; 396 hrtimer_init(&io->timer, io_timeout_get_clock(io), mode); 397 io->timer.function = io_link_timeout_fn; 398 hrtimer_start(&io->timer, timespec64_to_ktime(*ts), mode); 399 return 0; 400 } 401 402 static int io_timeout_update(struct io_ring_ctx *ctx, __u64 user_data, 403 struct timespec64 *ts, enum hrtimer_mode mode) 404 __must_hold(&ctx->timeout_lock) 405 { 406 struct io_cancel_data cd = { .ctx = ctx, .data = user_data, }; 407 struct io_kiocb *req = io_timeout_extract(ctx, &cd); 408 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 409 struct io_timeout_data *data; 410 411 if (IS_ERR(req)) 412 return PTR_ERR(req); 413 414 timeout->off = 0; /* noseq */ 415 data = req->async_data; 416 data->ts = *ts; 417 418 list_add_tail(&timeout->list, &ctx->timeout_list); 419 hrtimer_init(&data->timer, io_timeout_get_clock(data), mode); 420 data->timer.function = io_timeout_fn; 421 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), mode); 422 return 0; 423 } 424 425 int io_timeout_remove_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 426 { 427 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 428 429 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 430 return -EINVAL; 431 if (sqe->buf_index || sqe->len || sqe->splice_fd_in) 432 return -EINVAL; 433 434 tr->ltimeout = false; 435 tr->addr = READ_ONCE(sqe->addr); 436 tr->flags = READ_ONCE(sqe->timeout_flags); 437 if (tr->flags & IORING_TIMEOUT_UPDATE_MASK) { 438 if (hweight32(tr->flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 439 return -EINVAL; 440 if (tr->flags & IORING_LINK_TIMEOUT_UPDATE) 441 tr->ltimeout = true; 442 if (tr->flags & ~(IORING_TIMEOUT_UPDATE_MASK|IORING_TIMEOUT_ABS)) 443 return -EINVAL; 444 if (get_timespec64(&tr->ts, u64_to_user_ptr(sqe->addr2))) 445 return -EFAULT; 446 if (tr->ts.tv_sec < 0 || tr->ts.tv_nsec < 0) 447 return -EINVAL; 448 } else if (tr->flags) { 449 /* timeout removal doesn't support flags */ 450 return -EINVAL; 451 } 452 453 return 0; 454 } 455 456 static inline enum hrtimer_mode io_translate_timeout_mode(unsigned int flags) 457 { 458 return (flags & IORING_TIMEOUT_ABS) ? HRTIMER_MODE_ABS 459 : HRTIMER_MODE_REL; 460 } 461 462 /* 463 * Remove or update an existing timeout command 464 */ 465 int io_timeout_remove(struct io_kiocb *req, unsigned int issue_flags) 466 { 467 struct io_timeout_rem *tr = io_kiocb_to_cmd(req, struct io_timeout_rem); 468 struct io_ring_ctx *ctx = req->ctx; 469 int ret; 470 471 if (!(tr->flags & IORING_TIMEOUT_UPDATE)) { 472 struct io_cancel_data cd = { .ctx = ctx, .data = tr->addr, }; 473 474 spin_lock(&ctx->completion_lock); 475 ret = io_timeout_cancel(ctx, &cd); 476 spin_unlock(&ctx->completion_lock); 477 } else { 478 enum hrtimer_mode mode = io_translate_timeout_mode(tr->flags); 479 480 spin_lock_irq(&ctx->timeout_lock); 481 if (tr->ltimeout) 482 ret = io_linked_timeout_update(ctx, tr->addr, &tr->ts, mode); 483 else 484 ret = io_timeout_update(ctx, tr->addr, &tr->ts, mode); 485 spin_unlock_irq(&ctx->timeout_lock); 486 } 487 488 if (ret < 0) 489 req_set_fail(req); 490 io_req_set_res(req, ret, 0); 491 return IOU_OK; 492 } 493 494 static int __io_timeout_prep(struct io_kiocb *req, 495 const struct io_uring_sqe *sqe, 496 bool is_timeout_link) 497 { 498 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 499 struct io_timeout_data *data; 500 unsigned flags; 501 u32 off = READ_ONCE(sqe->off); 502 503 if (sqe->buf_index || sqe->len != 1 || sqe->splice_fd_in) 504 return -EINVAL; 505 if (off && is_timeout_link) 506 return -EINVAL; 507 flags = READ_ONCE(sqe->timeout_flags); 508 if (flags & ~(IORING_TIMEOUT_ABS | IORING_TIMEOUT_CLOCK_MASK | 509 IORING_TIMEOUT_ETIME_SUCCESS | 510 IORING_TIMEOUT_MULTISHOT)) 511 return -EINVAL; 512 /* more than one clock specified is invalid, obviously */ 513 if (hweight32(flags & IORING_TIMEOUT_CLOCK_MASK) > 1) 514 return -EINVAL; 515 /* multishot requests only make sense with rel values */ 516 if (!(~flags & (IORING_TIMEOUT_MULTISHOT | IORING_TIMEOUT_ABS))) 517 return -EINVAL; 518 519 INIT_LIST_HEAD(&timeout->list); 520 timeout->off = off; 521 if (unlikely(off && !req->ctx->off_timeout_used)) 522 req->ctx->off_timeout_used = true; 523 /* 524 * for multishot reqs w/ fixed nr of repeats, repeats tracks the 525 * remaining nr 526 */ 527 timeout->repeats = 0; 528 if ((flags & IORING_TIMEOUT_MULTISHOT) && off > 0) 529 timeout->repeats = off; 530 531 if (WARN_ON_ONCE(req_has_async_data(req))) 532 return -EFAULT; 533 if (io_alloc_async_data(req)) 534 return -ENOMEM; 535 536 data = req->async_data; 537 data->req = req; 538 data->flags = flags; 539 540 if (get_timespec64(&data->ts, u64_to_user_ptr(sqe->addr))) 541 return -EFAULT; 542 543 if (data->ts.tv_sec < 0 || data->ts.tv_nsec < 0) 544 return -EINVAL; 545 546 INIT_LIST_HEAD(&timeout->list); 547 data->mode = io_translate_timeout_mode(flags); 548 hrtimer_init(&data->timer, io_timeout_get_clock(data), data->mode); 549 550 if (is_timeout_link) { 551 struct io_submit_link *link = &req->ctx->submit_state.link; 552 553 if (!link->head) 554 return -EINVAL; 555 if (link->last->opcode == IORING_OP_LINK_TIMEOUT) 556 return -EINVAL; 557 timeout->head = link->last; 558 link->last->flags |= REQ_F_ARM_LTIMEOUT; 559 } 560 return 0; 561 } 562 563 int io_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 564 { 565 return __io_timeout_prep(req, sqe, false); 566 } 567 568 int io_link_timeout_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 569 { 570 return __io_timeout_prep(req, sqe, true); 571 } 572 573 int io_timeout(struct io_kiocb *req, unsigned int issue_flags) 574 { 575 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 576 struct io_ring_ctx *ctx = req->ctx; 577 struct io_timeout_data *data = req->async_data; 578 struct list_head *entry; 579 u32 tail, off = timeout->off; 580 581 spin_lock_irq(&ctx->timeout_lock); 582 583 /* 584 * sqe->off holds how many events that need to occur for this 585 * timeout event to be satisfied. If it isn't set, then this is 586 * a pure timeout request, sequence isn't used. 587 */ 588 if (io_is_timeout_noseq(req)) { 589 entry = ctx->timeout_list.prev; 590 goto add; 591 } 592 593 tail = data_race(ctx->cached_cq_tail) - atomic_read(&ctx->cq_timeouts); 594 timeout->target_seq = tail + off; 595 596 /* Update the last seq here in case io_flush_timeouts() hasn't. 597 * This is safe because ->completion_lock is held, and submissions 598 * and completions are never mixed in the same ->completion_lock section. 599 */ 600 ctx->cq_last_tm_flush = tail; 601 602 /* 603 * Insertion sort, ensuring the first entry in the list is always 604 * the one we need first. 605 */ 606 list_for_each_prev(entry, &ctx->timeout_list) { 607 struct io_timeout *nextt = list_entry(entry, struct io_timeout, list); 608 struct io_kiocb *nxt = cmd_to_io_kiocb(nextt); 609 610 if (io_is_timeout_noseq(nxt)) 611 continue; 612 /* nxt.seq is behind @tail, otherwise would've been completed */ 613 if (off >= nextt->target_seq - tail) 614 break; 615 } 616 add: 617 list_add(&timeout->list, entry); 618 data->timer.function = io_timeout_fn; 619 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), data->mode); 620 spin_unlock_irq(&ctx->timeout_lock); 621 return IOU_ISSUE_SKIP_COMPLETE; 622 } 623 624 void io_queue_linked_timeout(struct io_kiocb *req) 625 { 626 struct io_timeout *timeout = io_kiocb_to_cmd(req, struct io_timeout); 627 struct io_ring_ctx *ctx = req->ctx; 628 629 spin_lock_irq(&ctx->timeout_lock); 630 /* 631 * If the back reference is NULL, then our linked request finished 632 * before we got a chance to setup the timer 633 */ 634 if (timeout->head) { 635 struct io_timeout_data *data = req->async_data; 636 637 data->timer.function = io_link_timeout_fn; 638 hrtimer_start(&data->timer, timespec64_to_ktime(data->ts), 639 data->mode); 640 list_add_tail(&timeout->list, &ctx->ltimeout_list); 641 } 642 spin_unlock_irq(&ctx->timeout_lock); 643 /* drop submission reference */ 644 io_put_req(req); 645 } 646 647 static bool io_match_task(struct io_kiocb *head, struct task_struct *task, 648 bool cancel_all) 649 __must_hold(&head->ctx->timeout_lock) 650 { 651 struct io_kiocb *req; 652 653 if (task && head->task != task) 654 return false; 655 if (cancel_all) 656 return true; 657 658 io_for_each_link(req, head) { 659 if (req->flags & REQ_F_INFLIGHT) 660 return true; 661 } 662 return false; 663 } 664 665 /* Returns true if we found and killed one or more timeouts */ 666 __cold bool io_kill_timeouts(struct io_ring_ctx *ctx, struct task_struct *tsk, 667 bool cancel_all) 668 { 669 struct io_timeout *timeout, *tmp; 670 int canceled = 0; 671 672 /* 673 * completion_lock is needed for io_match_task(). Take it before 674 * timeout_lockfirst to keep locking ordering. 675 */ 676 spin_lock(&ctx->completion_lock); 677 spin_lock_irq(&ctx->timeout_lock); 678 list_for_each_entry_safe(timeout, tmp, &ctx->timeout_list, list) { 679 struct io_kiocb *req = cmd_to_io_kiocb(timeout); 680 681 if (io_match_task(req, tsk, cancel_all) && 682 io_kill_timeout(req, -ECANCELED)) 683 canceled++; 684 } 685 spin_unlock_irq(&ctx->timeout_lock); 686 spin_unlock(&ctx->completion_lock); 687 return canceled != 0; 688 } 689