1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/mm.h> 7 #include <linux/slab.h> 8 #include <linux/nospec.h> 9 #include <linux/hugetlb.h> 10 #include <linux/compat.h> 11 #include <linux/io_uring.h> 12 13 #include <uapi/linux/io_uring.h> 14 15 #include "io_uring.h" 16 #include "openclose.h" 17 #include "rsrc.h" 18 19 struct io_rsrc_update { 20 struct file *file; 21 u64 arg; 22 u32 nr_args; 23 u32 offset; 24 }; 25 26 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, 27 struct io_mapped_ubuf **pimu, 28 struct page **last_hpage); 29 30 #define IO_RSRC_REF_BATCH 100 31 32 /* only define max */ 33 #define IORING_MAX_FIXED_FILES (1U << 20) 34 #define IORING_MAX_REG_BUFFERS (1U << 14) 35 36 void io_rsrc_refs_drop(struct io_ring_ctx *ctx) 37 __must_hold(&ctx->uring_lock) 38 { 39 if (ctx->rsrc_cached_refs) { 40 io_rsrc_put_node(ctx->rsrc_node, ctx->rsrc_cached_refs); 41 ctx->rsrc_cached_refs = 0; 42 } 43 } 44 45 int __io_account_mem(struct user_struct *user, unsigned long nr_pages) 46 { 47 unsigned long page_limit, cur_pages, new_pages; 48 49 if (!nr_pages) 50 return 0; 51 52 /* Don't allow more pages than we can safely lock */ 53 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 54 55 cur_pages = atomic_long_read(&user->locked_vm); 56 do { 57 new_pages = cur_pages + nr_pages; 58 if (new_pages > page_limit) 59 return -ENOMEM; 60 } while (!atomic_long_try_cmpxchg(&user->locked_vm, 61 &cur_pages, new_pages)); 62 return 0; 63 } 64 65 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) 66 { 67 if (ctx->user) 68 __io_unaccount_mem(ctx->user, nr_pages); 69 70 if (ctx->mm_account) 71 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm); 72 } 73 74 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) 75 { 76 int ret; 77 78 if (ctx->user) { 79 ret = __io_account_mem(ctx->user, nr_pages); 80 if (ret) 81 return ret; 82 } 83 84 if (ctx->mm_account) 85 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm); 86 87 return 0; 88 } 89 90 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst, 91 void __user *arg, unsigned index) 92 { 93 struct iovec __user *src; 94 95 #ifdef CONFIG_COMPAT 96 if (ctx->compat) { 97 struct compat_iovec __user *ciovs; 98 struct compat_iovec ciov; 99 100 ciovs = (struct compat_iovec __user *) arg; 101 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov))) 102 return -EFAULT; 103 104 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base); 105 dst->iov_len = ciov.iov_len; 106 return 0; 107 } 108 #endif 109 src = (struct iovec __user *) arg; 110 if (copy_from_user(dst, &src[index], sizeof(*dst))) 111 return -EFAULT; 112 return 0; 113 } 114 115 static int io_buffer_validate(struct iovec *iov) 116 { 117 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1); 118 119 /* 120 * Don't impose further limits on the size and buffer 121 * constraints here, we'll -EINVAL later when IO is 122 * submitted if they are wrong. 123 */ 124 if (!iov->iov_base) 125 return iov->iov_len ? -EFAULT : 0; 126 if (!iov->iov_len) 127 return -EFAULT; 128 129 /* arbitrary limit, but we need something */ 130 if (iov->iov_len > SZ_1G) 131 return -EFAULT; 132 133 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp)) 134 return -EOVERFLOW; 135 136 return 0; 137 } 138 139 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot) 140 { 141 struct io_mapped_ubuf *imu = *slot; 142 unsigned int i; 143 144 if (imu != ctx->dummy_ubuf) { 145 for (i = 0; i < imu->nr_bvecs; i++) 146 unpin_user_page(imu->bvec[i].bv_page); 147 if (imu->acct_pages) 148 io_unaccount_mem(ctx, imu->acct_pages); 149 kvfree(imu); 150 } 151 *slot = NULL; 152 } 153 154 void io_rsrc_refs_refill(struct io_ring_ctx *ctx) 155 __must_hold(&ctx->uring_lock) 156 { 157 ctx->rsrc_cached_refs += IO_RSRC_REF_BATCH; 158 percpu_ref_get_many(&ctx->rsrc_node->refs, IO_RSRC_REF_BATCH); 159 } 160 161 static void __io_rsrc_put_work(struct io_rsrc_node *ref_node) 162 { 163 struct io_rsrc_data *rsrc_data = ref_node->rsrc_data; 164 struct io_ring_ctx *ctx = rsrc_data->ctx; 165 struct io_rsrc_put *prsrc, *tmp; 166 167 list_for_each_entry_safe(prsrc, tmp, &ref_node->rsrc_list, list) { 168 list_del(&prsrc->list); 169 170 if (prsrc->tag) { 171 if (ctx->flags & IORING_SETUP_IOPOLL) { 172 mutex_lock(&ctx->uring_lock); 173 io_post_aux_cqe(ctx, prsrc->tag, 0, 0); 174 mutex_unlock(&ctx->uring_lock); 175 } else { 176 io_post_aux_cqe(ctx, prsrc->tag, 0, 0); 177 } 178 } 179 180 rsrc_data->do_put(ctx, prsrc); 181 kfree(prsrc); 182 } 183 184 io_rsrc_node_destroy(ref_node); 185 if (atomic_dec_and_test(&rsrc_data->refs)) 186 complete(&rsrc_data->done); 187 } 188 189 void io_rsrc_put_work(struct work_struct *work) 190 { 191 struct io_ring_ctx *ctx; 192 struct llist_node *node; 193 194 ctx = container_of(work, struct io_ring_ctx, rsrc_put_work.work); 195 node = llist_del_all(&ctx->rsrc_put_llist); 196 197 while (node) { 198 struct io_rsrc_node *ref_node; 199 struct llist_node *next = node->next; 200 201 ref_node = llist_entry(node, struct io_rsrc_node, llist); 202 __io_rsrc_put_work(ref_node); 203 node = next; 204 } 205 } 206 207 void io_rsrc_put_tw(struct callback_head *cb) 208 { 209 struct io_ring_ctx *ctx = container_of(cb, struct io_ring_ctx, 210 rsrc_put_tw); 211 212 io_rsrc_put_work(&ctx->rsrc_put_work.work); 213 } 214 215 void io_wait_rsrc_data(struct io_rsrc_data *data) 216 { 217 if (data && !atomic_dec_and_test(&data->refs)) 218 wait_for_completion(&data->done); 219 } 220 221 void io_rsrc_node_destroy(struct io_rsrc_node *ref_node) 222 { 223 percpu_ref_exit(&ref_node->refs); 224 kfree(ref_node); 225 } 226 227 static __cold void io_rsrc_node_ref_zero(struct percpu_ref *ref) 228 { 229 struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs); 230 struct io_ring_ctx *ctx = node->rsrc_data->ctx; 231 unsigned long flags; 232 bool first_add = false; 233 unsigned long delay = HZ; 234 235 spin_lock_irqsave(&ctx->rsrc_ref_lock, flags); 236 node->done = true; 237 238 /* if we are mid-quiesce then do not delay */ 239 if (node->rsrc_data->quiesce) 240 delay = 0; 241 242 while (!list_empty(&ctx->rsrc_ref_list)) { 243 node = list_first_entry(&ctx->rsrc_ref_list, 244 struct io_rsrc_node, node); 245 /* recycle ref nodes in order */ 246 if (!node->done) 247 break; 248 list_del(&node->node); 249 first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist); 250 } 251 spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags); 252 253 if (!first_add) 254 return; 255 256 if (ctx->submitter_task) { 257 if (!task_work_add(ctx->submitter_task, &ctx->rsrc_put_tw, 258 ctx->notify_method)) 259 return; 260 } 261 mod_delayed_work(system_wq, &ctx->rsrc_put_work, delay); 262 } 263 264 static struct io_rsrc_node *io_rsrc_node_alloc(void) 265 { 266 struct io_rsrc_node *ref_node; 267 268 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL); 269 if (!ref_node) 270 return NULL; 271 272 if (percpu_ref_init(&ref_node->refs, io_rsrc_node_ref_zero, 273 0, GFP_KERNEL)) { 274 kfree(ref_node); 275 return NULL; 276 } 277 INIT_LIST_HEAD(&ref_node->node); 278 INIT_LIST_HEAD(&ref_node->rsrc_list); 279 ref_node->done = false; 280 return ref_node; 281 } 282 283 void io_rsrc_node_switch(struct io_ring_ctx *ctx, 284 struct io_rsrc_data *data_to_kill) 285 __must_hold(&ctx->uring_lock) 286 { 287 WARN_ON_ONCE(!ctx->rsrc_backup_node); 288 WARN_ON_ONCE(data_to_kill && !ctx->rsrc_node); 289 290 io_rsrc_refs_drop(ctx); 291 292 if (data_to_kill) { 293 struct io_rsrc_node *rsrc_node = ctx->rsrc_node; 294 295 rsrc_node->rsrc_data = data_to_kill; 296 spin_lock_irq(&ctx->rsrc_ref_lock); 297 list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list); 298 spin_unlock_irq(&ctx->rsrc_ref_lock); 299 300 atomic_inc(&data_to_kill->refs); 301 percpu_ref_kill(&rsrc_node->refs); 302 ctx->rsrc_node = NULL; 303 } 304 305 if (!ctx->rsrc_node) { 306 ctx->rsrc_node = ctx->rsrc_backup_node; 307 ctx->rsrc_backup_node = NULL; 308 } 309 } 310 311 int io_rsrc_node_switch_start(struct io_ring_ctx *ctx) 312 { 313 if (ctx->rsrc_backup_node) 314 return 0; 315 ctx->rsrc_backup_node = io_rsrc_node_alloc(); 316 return ctx->rsrc_backup_node ? 0 : -ENOMEM; 317 } 318 319 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, 320 struct io_ring_ctx *ctx) 321 { 322 int ret; 323 324 /* As we may drop ->uring_lock, other task may have started quiesce */ 325 if (data->quiesce) 326 return -ENXIO; 327 ret = io_rsrc_node_switch_start(ctx); 328 if (ret) 329 return ret; 330 io_rsrc_node_switch(ctx, data); 331 332 /* kill initial ref, already quiesced if zero */ 333 if (atomic_dec_and_test(&data->refs)) 334 return 0; 335 336 data->quiesce = true; 337 mutex_unlock(&ctx->uring_lock); 338 do { 339 ret = io_run_task_work_sig(ctx); 340 if (ret < 0) { 341 atomic_inc(&data->refs); 342 /* wait for all works potentially completing data->done */ 343 flush_delayed_work(&ctx->rsrc_put_work); 344 reinit_completion(&data->done); 345 mutex_lock(&ctx->uring_lock); 346 break; 347 } 348 349 flush_delayed_work(&ctx->rsrc_put_work); 350 ret = wait_for_completion_interruptible(&data->done); 351 if (!ret) { 352 mutex_lock(&ctx->uring_lock); 353 if (atomic_read(&data->refs) <= 0) 354 break; 355 /* 356 * it has been revived by another thread while 357 * we were unlocked 358 */ 359 mutex_unlock(&ctx->uring_lock); 360 } 361 } while (1); 362 data->quiesce = false; 363 364 return ret; 365 } 366 367 static void io_free_page_table(void **table, size_t size) 368 { 369 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); 370 371 for (i = 0; i < nr_tables; i++) 372 kfree(table[i]); 373 kfree(table); 374 } 375 376 static void io_rsrc_data_free(struct io_rsrc_data *data) 377 { 378 size_t size = data->nr * sizeof(data->tags[0][0]); 379 380 if (data->tags) 381 io_free_page_table((void **)data->tags, size); 382 kfree(data); 383 } 384 385 static __cold void **io_alloc_page_table(size_t size) 386 { 387 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE); 388 size_t init_size = size; 389 void **table; 390 391 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT); 392 if (!table) 393 return NULL; 394 395 for (i = 0; i < nr_tables; i++) { 396 unsigned int this_size = min_t(size_t, size, PAGE_SIZE); 397 398 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT); 399 if (!table[i]) { 400 io_free_page_table(table, init_size); 401 return NULL; 402 } 403 size -= this_size; 404 } 405 return table; 406 } 407 408 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, 409 rsrc_put_fn *do_put, u64 __user *utags, 410 unsigned nr, struct io_rsrc_data **pdata) 411 { 412 struct io_rsrc_data *data; 413 int ret = 0; 414 unsigned i; 415 416 data = kzalloc(sizeof(*data), GFP_KERNEL); 417 if (!data) 418 return -ENOMEM; 419 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0])); 420 if (!data->tags) { 421 kfree(data); 422 return -ENOMEM; 423 } 424 425 data->nr = nr; 426 data->ctx = ctx; 427 data->do_put = do_put; 428 if (utags) { 429 ret = -EFAULT; 430 for (i = 0; i < nr; i++) { 431 u64 *tag_slot = io_get_tag_slot(data, i); 432 433 if (copy_from_user(tag_slot, &utags[i], 434 sizeof(*tag_slot))) 435 goto fail; 436 } 437 } 438 439 atomic_set(&data->refs, 1); 440 init_completion(&data->done); 441 *pdata = data; 442 return 0; 443 fail: 444 io_rsrc_data_free(data); 445 return ret; 446 } 447 448 static int __io_sqe_files_update(struct io_ring_ctx *ctx, 449 struct io_uring_rsrc_update2 *up, 450 unsigned nr_args) 451 { 452 u64 __user *tags = u64_to_user_ptr(up->tags); 453 __s32 __user *fds = u64_to_user_ptr(up->data); 454 struct io_rsrc_data *data = ctx->file_data; 455 struct io_fixed_file *file_slot; 456 struct file *file; 457 int fd, i, err = 0; 458 unsigned int done; 459 bool needs_switch = false; 460 461 if (!ctx->file_data) 462 return -ENXIO; 463 if (up->offset + nr_args > ctx->nr_user_files) 464 return -EINVAL; 465 466 for (done = 0; done < nr_args; done++) { 467 u64 tag = 0; 468 469 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) || 470 copy_from_user(&fd, &fds[done], sizeof(fd))) { 471 err = -EFAULT; 472 break; 473 } 474 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) { 475 err = -EINVAL; 476 break; 477 } 478 if (fd == IORING_REGISTER_FILES_SKIP) 479 continue; 480 481 i = array_index_nospec(up->offset + done, ctx->nr_user_files); 482 file_slot = io_fixed_file_slot(&ctx->file_table, i); 483 484 if (file_slot->file_ptr) { 485 file = (struct file *)(file_slot->file_ptr & FFS_MASK); 486 err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file); 487 if (err) 488 break; 489 file_slot->file_ptr = 0; 490 io_file_bitmap_clear(&ctx->file_table, i); 491 needs_switch = true; 492 } 493 if (fd != -1) { 494 file = fget(fd); 495 if (!file) { 496 err = -EBADF; 497 break; 498 } 499 /* 500 * Don't allow io_uring instances to be registered. If 501 * UNIX isn't enabled, then this causes a reference 502 * cycle and this instance can never get freed. If UNIX 503 * is enabled we'll handle it just fine, but there's 504 * still no point in allowing a ring fd as it doesn't 505 * support regular read/write anyway. 506 */ 507 if (io_is_uring_fops(file)) { 508 fput(file); 509 err = -EBADF; 510 break; 511 } 512 err = io_scm_file_account(ctx, file); 513 if (err) { 514 fput(file); 515 break; 516 } 517 *io_get_tag_slot(data, i) = tag; 518 io_fixed_file_set(file_slot, file); 519 io_file_bitmap_set(&ctx->file_table, i); 520 } 521 } 522 523 if (needs_switch) 524 io_rsrc_node_switch(ctx, data); 525 return done ? done : err; 526 } 527 528 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx, 529 struct io_uring_rsrc_update2 *up, 530 unsigned int nr_args) 531 { 532 u64 __user *tags = u64_to_user_ptr(up->tags); 533 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data); 534 struct page *last_hpage = NULL; 535 bool needs_switch = false; 536 __u32 done; 537 int i, err; 538 539 if (!ctx->buf_data) 540 return -ENXIO; 541 if (up->offset + nr_args > ctx->nr_user_bufs) 542 return -EINVAL; 543 544 for (done = 0; done < nr_args; done++) { 545 struct io_mapped_ubuf *imu; 546 int offset = up->offset + done; 547 u64 tag = 0; 548 549 err = io_copy_iov(ctx, &iov, iovs, done); 550 if (err) 551 break; 552 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) { 553 err = -EFAULT; 554 break; 555 } 556 err = io_buffer_validate(&iov); 557 if (err) 558 break; 559 if (!iov.iov_base && tag) { 560 err = -EINVAL; 561 break; 562 } 563 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage); 564 if (err) 565 break; 566 567 i = array_index_nospec(offset, ctx->nr_user_bufs); 568 if (ctx->user_bufs[i] != ctx->dummy_ubuf) { 569 err = io_queue_rsrc_removal(ctx->buf_data, i, 570 ctx->rsrc_node, ctx->user_bufs[i]); 571 if (unlikely(err)) { 572 io_buffer_unmap(ctx, &imu); 573 break; 574 } 575 ctx->user_bufs[i] = ctx->dummy_ubuf; 576 needs_switch = true; 577 } 578 579 ctx->user_bufs[i] = imu; 580 *io_get_tag_slot(ctx->buf_data, offset) = tag; 581 } 582 583 if (needs_switch) 584 io_rsrc_node_switch(ctx, ctx->buf_data); 585 return done ? done : err; 586 } 587 588 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type, 589 struct io_uring_rsrc_update2 *up, 590 unsigned nr_args) 591 { 592 __u32 tmp; 593 int err; 594 595 if (check_add_overflow(up->offset, nr_args, &tmp)) 596 return -EOVERFLOW; 597 err = io_rsrc_node_switch_start(ctx); 598 if (err) 599 return err; 600 601 switch (type) { 602 case IORING_RSRC_FILE: 603 return __io_sqe_files_update(ctx, up, nr_args); 604 case IORING_RSRC_BUFFER: 605 return __io_sqe_buffers_update(ctx, up, nr_args); 606 } 607 return -EINVAL; 608 } 609 610 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg, 611 unsigned nr_args) 612 { 613 struct io_uring_rsrc_update2 up; 614 615 if (!nr_args) 616 return -EINVAL; 617 memset(&up, 0, sizeof(up)); 618 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update))) 619 return -EFAULT; 620 if (up.resv || up.resv2) 621 return -EINVAL; 622 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args); 623 } 624 625 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, 626 unsigned size, unsigned type) 627 { 628 struct io_uring_rsrc_update2 up; 629 630 if (size != sizeof(up)) 631 return -EINVAL; 632 if (copy_from_user(&up, arg, sizeof(up))) 633 return -EFAULT; 634 if (!up.nr || up.resv || up.resv2) 635 return -EINVAL; 636 return __io_register_rsrc_update(ctx, type, &up, up.nr); 637 } 638 639 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, 640 unsigned int size, unsigned int type) 641 { 642 struct io_uring_rsrc_register rr; 643 644 /* keep it extendible */ 645 if (size != sizeof(rr)) 646 return -EINVAL; 647 648 memset(&rr, 0, sizeof(rr)); 649 if (copy_from_user(&rr, arg, size)) 650 return -EFAULT; 651 if (!rr.nr || rr.resv2) 652 return -EINVAL; 653 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE) 654 return -EINVAL; 655 656 switch (type) { 657 case IORING_RSRC_FILE: 658 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data) 659 break; 660 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data), 661 rr.nr, u64_to_user_ptr(rr.tags)); 662 case IORING_RSRC_BUFFER: 663 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data) 664 break; 665 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data), 666 rr.nr, u64_to_user_ptr(rr.tags)); 667 } 668 return -EINVAL; 669 } 670 671 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) 672 { 673 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); 674 675 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) 676 return -EINVAL; 677 if (sqe->rw_flags || sqe->splice_fd_in) 678 return -EINVAL; 679 680 up->offset = READ_ONCE(sqe->off); 681 up->nr_args = READ_ONCE(sqe->len); 682 if (!up->nr_args) 683 return -EINVAL; 684 up->arg = READ_ONCE(sqe->addr); 685 return 0; 686 } 687 688 static int io_files_update_with_index_alloc(struct io_kiocb *req, 689 unsigned int issue_flags) 690 { 691 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); 692 __s32 __user *fds = u64_to_user_ptr(up->arg); 693 unsigned int done; 694 struct file *file; 695 int ret, fd; 696 697 if (!req->ctx->file_data) 698 return -ENXIO; 699 700 for (done = 0; done < up->nr_args; done++) { 701 if (copy_from_user(&fd, &fds[done], sizeof(fd))) { 702 ret = -EFAULT; 703 break; 704 } 705 706 file = fget(fd); 707 if (!file) { 708 ret = -EBADF; 709 break; 710 } 711 ret = io_fixed_fd_install(req, issue_flags, file, 712 IORING_FILE_INDEX_ALLOC); 713 if (ret < 0) 714 break; 715 if (copy_to_user(&fds[done], &ret, sizeof(ret))) { 716 __io_close_fixed(req->ctx, issue_flags, ret); 717 ret = -EFAULT; 718 break; 719 } 720 } 721 722 if (done) 723 return done; 724 return ret; 725 } 726 727 int io_files_update(struct io_kiocb *req, unsigned int issue_flags) 728 { 729 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update); 730 struct io_ring_ctx *ctx = req->ctx; 731 struct io_uring_rsrc_update2 up2; 732 int ret; 733 734 up2.offset = up->offset; 735 up2.data = up->arg; 736 up2.nr = 0; 737 up2.tags = 0; 738 up2.resv = 0; 739 up2.resv2 = 0; 740 741 if (up->offset == IORING_FILE_INDEX_ALLOC) { 742 ret = io_files_update_with_index_alloc(req, issue_flags); 743 } else { 744 io_ring_submit_lock(ctx, issue_flags); 745 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE, 746 &up2, up->nr_args); 747 io_ring_submit_unlock(ctx, issue_flags); 748 } 749 750 if (ret < 0) 751 req_set_fail(req); 752 io_req_set_res(req, ret, 0); 753 return IOU_OK; 754 } 755 756 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, 757 struct io_rsrc_node *node, void *rsrc) 758 { 759 u64 *tag_slot = io_get_tag_slot(data, idx); 760 struct io_rsrc_put *prsrc; 761 762 prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL); 763 if (!prsrc) 764 return -ENOMEM; 765 766 prsrc->tag = *tag_slot; 767 *tag_slot = 0; 768 prsrc->rsrc = rsrc; 769 list_add(&prsrc->list, &node->rsrc_list); 770 return 0; 771 } 772 773 void __io_sqe_files_unregister(struct io_ring_ctx *ctx) 774 { 775 int i; 776 777 for (i = 0; i < ctx->nr_user_files; i++) { 778 struct file *file = io_file_from_index(&ctx->file_table, i); 779 780 /* skip scm accounted files, they'll be freed by ->ring_sock */ 781 if (!file || io_file_need_scm(file)) 782 continue; 783 io_file_bitmap_clear(&ctx->file_table, i); 784 fput(file); 785 } 786 787 #if defined(CONFIG_UNIX) 788 if (ctx->ring_sock) { 789 struct sock *sock = ctx->ring_sock->sk; 790 struct sk_buff *skb; 791 792 while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL) 793 kfree_skb(skb); 794 } 795 #endif 796 io_free_file_tables(&ctx->file_table); 797 io_file_table_set_alloc_range(ctx, 0, 0); 798 io_rsrc_data_free(ctx->file_data); 799 ctx->file_data = NULL; 800 ctx->nr_user_files = 0; 801 } 802 803 int io_sqe_files_unregister(struct io_ring_ctx *ctx) 804 { 805 unsigned nr = ctx->nr_user_files; 806 int ret; 807 808 if (!ctx->file_data) 809 return -ENXIO; 810 811 /* 812 * Quiesce may unlock ->uring_lock, and while it's not held 813 * prevent new requests using the table. 814 */ 815 ctx->nr_user_files = 0; 816 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx); 817 ctx->nr_user_files = nr; 818 if (!ret) 819 __io_sqe_files_unregister(ctx); 820 return ret; 821 } 822 823 /* 824 * Ensure the UNIX gc is aware of our file set, so we are certain that 825 * the io_uring can be safely unregistered on process exit, even if we have 826 * loops in the file referencing. We account only files that can hold other 827 * files because otherwise they can't form a loop and so are not interesting 828 * for GC. 829 */ 830 int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file) 831 { 832 #if defined(CONFIG_UNIX) 833 struct sock *sk = ctx->ring_sock->sk; 834 struct sk_buff_head *head = &sk->sk_receive_queue; 835 struct scm_fp_list *fpl; 836 struct sk_buff *skb; 837 838 if (likely(!io_file_need_scm(file))) 839 return 0; 840 841 /* 842 * See if we can merge this file into an existing skb SCM_RIGHTS 843 * file set. If there's no room, fall back to allocating a new skb 844 * and filling it in. 845 */ 846 spin_lock_irq(&head->lock); 847 skb = skb_peek(head); 848 if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD) 849 __skb_unlink(skb, head); 850 else 851 skb = NULL; 852 spin_unlock_irq(&head->lock); 853 854 if (!skb) { 855 fpl = kzalloc(sizeof(*fpl), GFP_KERNEL); 856 if (!fpl) 857 return -ENOMEM; 858 859 skb = alloc_skb(0, GFP_KERNEL); 860 if (!skb) { 861 kfree(fpl); 862 return -ENOMEM; 863 } 864 865 fpl->user = get_uid(current_user()); 866 fpl->max = SCM_MAX_FD; 867 fpl->count = 0; 868 869 UNIXCB(skb).fp = fpl; 870 skb->sk = sk; 871 skb->scm_io_uring = 1; 872 skb->destructor = unix_destruct_scm; 873 refcount_add(skb->truesize, &sk->sk_wmem_alloc); 874 } 875 876 fpl = UNIXCB(skb).fp; 877 fpl->fp[fpl->count++] = get_file(file); 878 unix_inflight(fpl->user, file); 879 skb_queue_head(head, skb); 880 fput(file); 881 #endif 882 return 0; 883 } 884 885 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) 886 { 887 struct file *file = prsrc->file; 888 #if defined(CONFIG_UNIX) 889 struct sock *sock = ctx->ring_sock->sk; 890 struct sk_buff_head list, *head = &sock->sk_receive_queue; 891 struct sk_buff *skb; 892 int i; 893 894 if (!io_file_need_scm(file)) { 895 fput(file); 896 return; 897 } 898 899 __skb_queue_head_init(&list); 900 901 /* 902 * Find the skb that holds this file in its SCM_RIGHTS. When found, 903 * remove this entry and rearrange the file array. 904 */ 905 skb = skb_dequeue(head); 906 while (skb) { 907 struct scm_fp_list *fp; 908 909 fp = UNIXCB(skb).fp; 910 for (i = 0; i < fp->count; i++) { 911 int left; 912 913 if (fp->fp[i] != file) 914 continue; 915 916 unix_notinflight(fp->user, fp->fp[i]); 917 left = fp->count - 1 - i; 918 if (left) { 919 memmove(&fp->fp[i], &fp->fp[i + 1], 920 left * sizeof(struct file *)); 921 } 922 fp->count--; 923 if (!fp->count) { 924 kfree_skb(skb); 925 skb = NULL; 926 } else { 927 __skb_queue_tail(&list, skb); 928 } 929 fput(file); 930 file = NULL; 931 break; 932 } 933 934 if (!file) 935 break; 936 937 __skb_queue_tail(&list, skb); 938 939 skb = skb_dequeue(head); 940 } 941 942 if (skb_peek(&list)) { 943 spin_lock_irq(&head->lock); 944 while ((skb = __skb_dequeue(&list)) != NULL) 945 __skb_queue_tail(head, skb); 946 spin_unlock_irq(&head->lock); 947 } 948 #else 949 fput(file); 950 #endif 951 } 952 953 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, 954 unsigned nr_args, u64 __user *tags) 955 { 956 __s32 __user *fds = (__s32 __user *) arg; 957 struct file *file; 958 int fd, ret; 959 unsigned i; 960 961 if (ctx->file_data) 962 return -EBUSY; 963 if (!nr_args) 964 return -EINVAL; 965 if (nr_args > IORING_MAX_FIXED_FILES) 966 return -EMFILE; 967 if (nr_args > rlimit(RLIMIT_NOFILE)) 968 return -EMFILE; 969 ret = io_rsrc_node_switch_start(ctx); 970 if (ret) 971 return ret; 972 ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args, 973 &ctx->file_data); 974 if (ret) 975 return ret; 976 977 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) { 978 io_rsrc_data_free(ctx->file_data); 979 ctx->file_data = NULL; 980 return -ENOMEM; 981 } 982 983 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) { 984 struct io_fixed_file *file_slot; 985 986 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) { 987 ret = -EFAULT; 988 goto fail; 989 } 990 /* allow sparse sets */ 991 if (!fds || fd == -1) { 992 ret = -EINVAL; 993 if (unlikely(*io_get_tag_slot(ctx->file_data, i))) 994 goto fail; 995 continue; 996 } 997 998 file = fget(fd); 999 ret = -EBADF; 1000 if (unlikely(!file)) 1001 goto fail; 1002 1003 /* 1004 * Don't allow io_uring instances to be registered. If UNIX 1005 * isn't enabled, then this causes a reference cycle and this 1006 * instance can never get freed. If UNIX is enabled we'll 1007 * handle it just fine, but there's still no point in allowing 1008 * a ring fd as it doesn't support regular read/write anyway. 1009 */ 1010 if (io_is_uring_fops(file)) { 1011 fput(file); 1012 goto fail; 1013 } 1014 ret = io_scm_file_account(ctx, file); 1015 if (ret) { 1016 fput(file); 1017 goto fail; 1018 } 1019 file_slot = io_fixed_file_slot(&ctx->file_table, i); 1020 io_fixed_file_set(file_slot, file); 1021 io_file_bitmap_set(&ctx->file_table, i); 1022 } 1023 1024 /* default it to the whole table */ 1025 io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files); 1026 io_rsrc_node_switch(ctx, NULL); 1027 return 0; 1028 fail: 1029 __io_sqe_files_unregister(ctx); 1030 return ret; 1031 } 1032 1033 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc) 1034 { 1035 io_buffer_unmap(ctx, &prsrc->buf); 1036 prsrc->buf = NULL; 1037 } 1038 1039 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx) 1040 { 1041 unsigned int i; 1042 1043 for (i = 0; i < ctx->nr_user_bufs; i++) 1044 io_buffer_unmap(ctx, &ctx->user_bufs[i]); 1045 kfree(ctx->user_bufs); 1046 io_rsrc_data_free(ctx->buf_data); 1047 ctx->user_bufs = NULL; 1048 ctx->buf_data = NULL; 1049 ctx->nr_user_bufs = 0; 1050 } 1051 1052 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx) 1053 { 1054 unsigned nr = ctx->nr_user_bufs; 1055 int ret; 1056 1057 if (!ctx->buf_data) 1058 return -ENXIO; 1059 1060 /* 1061 * Quiesce may unlock ->uring_lock, and while it's not held 1062 * prevent new requests using the table. 1063 */ 1064 ctx->nr_user_bufs = 0; 1065 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx); 1066 ctx->nr_user_bufs = nr; 1067 if (!ret) 1068 __io_sqe_buffers_unregister(ctx); 1069 return ret; 1070 } 1071 1072 /* 1073 * Not super efficient, but this is just a registration time. And we do cache 1074 * the last compound head, so generally we'll only do a full search if we don't 1075 * match that one. 1076 * 1077 * We check if the given compound head page has already been accounted, to 1078 * avoid double accounting it. This allows us to account the full size of the 1079 * page, not just the constituent pages of a huge page. 1080 */ 1081 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages, 1082 int nr_pages, struct page *hpage) 1083 { 1084 int i, j; 1085 1086 /* check current page array */ 1087 for (i = 0; i < nr_pages; i++) { 1088 if (!PageCompound(pages[i])) 1089 continue; 1090 if (compound_head(pages[i]) == hpage) 1091 return true; 1092 } 1093 1094 /* check previously registered pages */ 1095 for (i = 0; i < ctx->nr_user_bufs; i++) { 1096 struct io_mapped_ubuf *imu = ctx->user_bufs[i]; 1097 1098 for (j = 0; j < imu->nr_bvecs; j++) { 1099 if (!PageCompound(imu->bvec[j].bv_page)) 1100 continue; 1101 if (compound_head(imu->bvec[j].bv_page) == hpage) 1102 return true; 1103 } 1104 } 1105 1106 return false; 1107 } 1108 1109 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages, 1110 int nr_pages, struct io_mapped_ubuf *imu, 1111 struct page **last_hpage) 1112 { 1113 int i, ret; 1114 1115 imu->acct_pages = 0; 1116 for (i = 0; i < nr_pages; i++) { 1117 if (!PageCompound(pages[i])) { 1118 imu->acct_pages++; 1119 } else { 1120 struct page *hpage; 1121 1122 hpage = compound_head(pages[i]); 1123 if (hpage == *last_hpage) 1124 continue; 1125 *last_hpage = hpage; 1126 if (headpage_already_acct(ctx, pages, i, hpage)) 1127 continue; 1128 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT; 1129 } 1130 } 1131 1132 if (!imu->acct_pages) 1133 return 0; 1134 1135 ret = io_account_mem(ctx, imu->acct_pages); 1136 if (ret) 1137 imu->acct_pages = 0; 1138 return ret; 1139 } 1140 1141 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages) 1142 { 1143 unsigned long start, end, nr_pages; 1144 struct vm_area_struct **vmas = NULL; 1145 struct page **pages = NULL; 1146 int i, pret, ret = -ENOMEM; 1147 1148 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 1149 start = ubuf >> PAGE_SHIFT; 1150 nr_pages = end - start; 1151 1152 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); 1153 if (!pages) 1154 goto done; 1155 1156 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *), 1157 GFP_KERNEL); 1158 if (!vmas) 1159 goto done; 1160 1161 ret = 0; 1162 mmap_read_lock(current->mm); 1163 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, 1164 pages, vmas); 1165 if (pret == nr_pages) { 1166 struct file *file = vmas[0]->vm_file; 1167 1168 /* don't support file backed memory */ 1169 for (i = 0; i < nr_pages; i++) { 1170 if (vmas[i]->vm_file != file) { 1171 ret = -EINVAL; 1172 break; 1173 } 1174 if (!file) 1175 continue; 1176 if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) { 1177 ret = -EOPNOTSUPP; 1178 break; 1179 } 1180 } 1181 *npages = nr_pages; 1182 } else { 1183 ret = pret < 0 ? pret : -EFAULT; 1184 } 1185 mmap_read_unlock(current->mm); 1186 if (ret) { 1187 /* 1188 * if we did partial map, or found file backed vmas, 1189 * release any pages we did get 1190 */ 1191 if (pret > 0) 1192 unpin_user_pages(pages, pret); 1193 goto done; 1194 } 1195 ret = 0; 1196 done: 1197 kvfree(vmas); 1198 if (ret < 0) { 1199 kvfree(pages); 1200 pages = ERR_PTR(ret); 1201 } 1202 return pages; 1203 } 1204 1205 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, 1206 struct io_mapped_ubuf **pimu, 1207 struct page **last_hpage) 1208 { 1209 struct io_mapped_ubuf *imu = NULL; 1210 struct page **pages = NULL; 1211 unsigned long off; 1212 size_t size; 1213 int ret, nr_pages, i; 1214 struct folio *folio = NULL; 1215 1216 *pimu = ctx->dummy_ubuf; 1217 if (!iov->iov_base) 1218 return 0; 1219 1220 ret = -ENOMEM; 1221 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len, 1222 &nr_pages); 1223 if (IS_ERR(pages)) { 1224 ret = PTR_ERR(pages); 1225 pages = NULL; 1226 goto done; 1227 } 1228 1229 /* If it's a huge page, try to coalesce them into a single bvec entry */ 1230 if (nr_pages > 1) { 1231 folio = page_folio(pages[0]); 1232 for (i = 1; i < nr_pages; i++) { 1233 if (page_folio(pages[i]) != folio) { 1234 folio = NULL; 1235 break; 1236 } 1237 } 1238 if (folio) { 1239 /* 1240 * The pages are bound to the folio, it doesn't 1241 * actually unpin them but drops all but one reference, 1242 * which is usually put down by io_buffer_unmap(). 1243 * Note, needs a better helper. 1244 */ 1245 unpin_user_pages(&pages[1], nr_pages - 1); 1246 nr_pages = 1; 1247 } 1248 } 1249 1250 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL); 1251 if (!imu) 1252 goto done; 1253 1254 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage); 1255 if (ret) { 1256 unpin_user_pages(pages, nr_pages); 1257 goto done; 1258 } 1259 1260 off = (unsigned long) iov->iov_base & ~PAGE_MASK; 1261 size = iov->iov_len; 1262 /* store original address for later verification */ 1263 imu->ubuf = (unsigned long) iov->iov_base; 1264 imu->ubuf_end = imu->ubuf + iov->iov_len; 1265 imu->nr_bvecs = nr_pages; 1266 *pimu = imu; 1267 ret = 0; 1268 1269 if (folio) { 1270 bvec_set_page(&imu->bvec[0], pages[0], size, off); 1271 goto done; 1272 } 1273 for (i = 0; i < nr_pages; i++) { 1274 size_t vec_len; 1275 1276 vec_len = min_t(size_t, size, PAGE_SIZE - off); 1277 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off); 1278 off = 0; 1279 size -= vec_len; 1280 } 1281 done: 1282 if (ret) 1283 kvfree(imu); 1284 kvfree(pages); 1285 return ret; 1286 } 1287 1288 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args) 1289 { 1290 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL); 1291 return ctx->user_bufs ? 0 : -ENOMEM; 1292 } 1293 1294 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, 1295 unsigned int nr_args, u64 __user *tags) 1296 { 1297 struct page *last_hpage = NULL; 1298 struct io_rsrc_data *data; 1299 int i, ret; 1300 struct iovec iov; 1301 1302 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16)); 1303 1304 if (ctx->user_bufs) 1305 return -EBUSY; 1306 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS) 1307 return -EINVAL; 1308 ret = io_rsrc_node_switch_start(ctx); 1309 if (ret) 1310 return ret; 1311 ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data); 1312 if (ret) 1313 return ret; 1314 ret = io_buffers_map_alloc(ctx, nr_args); 1315 if (ret) { 1316 io_rsrc_data_free(data); 1317 return ret; 1318 } 1319 1320 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) { 1321 if (arg) { 1322 ret = io_copy_iov(ctx, &iov, arg, i); 1323 if (ret) 1324 break; 1325 ret = io_buffer_validate(&iov); 1326 if (ret) 1327 break; 1328 } else { 1329 memset(&iov, 0, sizeof(iov)); 1330 } 1331 1332 if (!iov.iov_base && *io_get_tag_slot(data, i)) { 1333 ret = -EINVAL; 1334 break; 1335 } 1336 1337 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i], 1338 &last_hpage); 1339 if (ret) 1340 break; 1341 } 1342 1343 WARN_ON_ONCE(ctx->buf_data); 1344 1345 ctx->buf_data = data; 1346 if (ret) 1347 __io_sqe_buffers_unregister(ctx); 1348 else 1349 io_rsrc_node_switch(ctx, NULL); 1350 return ret; 1351 } 1352 1353 int io_import_fixed(int ddir, struct iov_iter *iter, 1354 struct io_mapped_ubuf *imu, 1355 u64 buf_addr, size_t len) 1356 { 1357 u64 buf_end; 1358 size_t offset; 1359 1360 if (WARN_ON_ONCE(!imu)) 1361 return -EFAULT; 1362 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end))) 1363 return -EFAULT; 1364 /* not inside the mapped region */ 1365 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end)) 1366 return -EFAULT; 1367 1368 /* 1369 * Might not be a start of buffer, set size appropriately 1370 * and advance us to the beginning. 1371 */ 1372 offset = buf_addr - imu->ubuf; 1373 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len); 1374 1375 if (offset) { 1376 /* 1377 * Don't use iov_iter_advance() here, as it's really slow for 1378 * using the latter parts of a big fixed buffer - it iterates 1379 * over each segment manually. We can cheat a bit here, because 1380 * we know that: 1381 * 1382 * 1) it's a BVEC iter, we set it up 1383 * 2) all bvecs are PAGE_SIZE in size, except potentially the 1384 * first and last bvec 1385 * 1386 * So just find our index, and adjust the iterator afterwards. 1387 * If the offset is within the first bvec (or the whole first 1388 * bvec, just use iov_iter_advance(). This makes it easier 1389 * since we can just skip the first segment, which may not 1390 * be PAGE_SIZE aligned. 1391 */ 1392 const struct bio_vec *bvec = imu->bvec; 1393 1394 if (offset <= bvec->bv_len) { 1395 /* 1396 * Note, huge pages buffers consists of one large 1397 * bvec entry and should always go this way. The other 1398 * branch doesn't expect non PAGE_SIZE'd chunks. 1399 */ 1400 iter->bvec = bvec; 1401 iter->nr_segs = bvec->bv_len; 1402 iter->count -= offset; 1403 iter->iov_offset = offset; 1404 } else { 1405 unsigned long seg_skip; 1406 1407 /* skip first vec */ 1408 offset -= bvec->bv_len; 1409 seg_skip = 1 + (offset >> PAGE_SHIFT); 1410 1411 iter->bvec = bvec + seg_skip; 1412 iter->nr_segs -= seg_skip; 1413 iter->count -= bvec->bv_len + offset; 1414 iter->iov_offset = offset & ~PAGE_MASK; 1415 } 1416 } 1417 1418 return 0; 1419 } 1420