1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
12
13 #include <uapi/linux/io_uring.h>
14
15 #include "io_uring.h"
16 #include "openclose.h"
17 #include "rsrc.h"
18
19 struct io_rsrc_update {
20 struct file *file;
21 u64 arg;
22 u32 nr_args;
23 u32 offset;
24 };
25
26 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc);
27 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
28 struct io_mapped_ubuf **pimu,
29 struct page **last_hpage);
30
31 /* only define max */
32 #define IORING_MAX_FIXED_FILES (1U << 20)
33 #define IORING_MAX_REG_BUFFERS (1U << 14)
34
35 static const struct io_mapped_ubuf dummy_ubuf = {
36 /* set invalid range, so io_import_fixed() fails meeting it */
37 .ubuf = -1UL,
38 .ubuf_end = 0,
39 };
40
__io_account_mem(struct user_struct * user,unsigned long nr_pages)41 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
42 {
43 unsigned long page_limit, cur_pages, new_pages;
44
45 if (!nr_pages)
46 return 0;
47
48 /* Don't allow more pages than we can safely lock */
49 page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
50
51 cur_pages = atomic_long_read(&user->locked_vm);
52 do {
53 new_pages = cur_pages + nr_pages;
54 if (new_pages > page_limit)
55 return -ENOMEM;
56 } while (!atomic_long_try_cmpxchg(&user->locked_vm,
57 &cur_pages, new_pages));
58 return 0;
59 }
60
io_unaccount_mem(struct io_ring_ctx * ctx,unsigned long nr_pages)61 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
62 {
63 if (ctx->user)
64 __io_unaccount_mem(ctx->user, nr_pages);
65
66 if (ctx->mm_account)
67 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
68 }
69
io_account_mem(struct io_ring_ctx * ctx,unsigned long nr_pages)70 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
71 {
72 int ret;
73
74 if (ctx->user) {
75 ret = __io_account_mem(ctx->user, nr_pages);
76 if (ret)
77 return ret;
78 }
79
80 if (ctx->mm_account)
81 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
82
83 return 0;
84 }
85
io_copy_iov(struct io_ring_ctx * ctx,struct iovec * dst,void __user * arg,unsigned index)86 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
87 void __user *arg, unsigned index)
88 {
89 struct iovec __user *src;
90
91 #ifdef CONFIG_COMPAT
92 if (ctx->compat) {
93 struct compat_iovec __user *ciovs;
94 struct compat_iovec ciov;
95
96 ciovs = (struct compat_iovec __user *) arg;
97 if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
98 return -EFAULT;
99
100 dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
101 dst->iov_len = ciov.iov_len;
102 return 0;
103 }
104 #endif
105 src = (struct iovec __user *) arg;
106 if (copy_from_user(dst, &src[index], sizeof(*dst)))
107 return -EFAULT;
108 return 0;
109 }
110
io_buffer_validate(struct iovec * iov)111 static int io_buffer_validate(struct iovec *iov)
112 {
113 unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
114
115 /*
116 * Don't impose further limits on the size and buffer
117 * constraints here, we'll -EINVAL later when IO is
118 * submitted if they are wrong.
119 */
120 if (!iov->iov_base)
121 return iov->iov_len ? -EFAULT : 0;
122 if (!iov->iov_len)
123 return -EFAULT;
124
125 /* arbitrary limit, but we need something */
126 if (iov->iov_len > SZ_1G)
127 return -EFAULT;
128
129 if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
130 return -EOVERFLOW;
131
132 return 0;
133 }
134
io_buffer_unmap(struct io_ring_ctx * ctx,struct io_mapped_ubuf ** slot)135 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
136 {
137 struct io_mapped_ubuf *imu = *slot;
138 unsigned int i;
139
140 if (imu != &dummy_ubuf) {
141 for (i = 0; i < imu->nr_bvecs; i++)
142 unpin_user_page(imu->bvec[i].bv_page);
143 if (imu->acct_pages)
144 io_unaccount_mem(ctx, imu->acct_pages);
145 kvfree(imu);
146 }
147 *slot = NULL;
148 }
149
io_rsrc_put_work(struct io_rsrc_node * node)150 static void io_rsrc_put_work(struct io_rsrc_node *node)
151 {
152 struct io_rsrc_put *prsrc = &node->item;
153
154 if (prsrc->tag)
155 io_post_aux_cqe(node->ctx, prsrc->tag, 0, 0);
156
157 switch (node->type) {
158 case IORING_RSRC_FILE:
159 fput(prsrc->file);
160 break;
161 case IORING_RSRC_BUFFER:
162 io_rsrc_buf_put(node->ctx, prsrc);
163 break;
164 default:
165 WARN_ON_ONCE(1);
166 break;
167 }
168 }
169
io_rsrc_node_destroy(struct io_ring_ctx * ctx,struct io_rsrc_node * node)170 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
171 {
172 if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache))
173 kfree(node);
174 }
175
io_rsrc_node_ref_zero(struct io_rsrc_node * node)176 void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
177 __must_hold(&node->ctx->uring_lock)
178 {
179 struct io_ring_ctx *ctx = node->ctx;
180
181 while (!list_empty(&ctx->rsrc_ref_list)) {
182 node = list_first_entry(&ctx->rsrc_ref_list,
183 struct io_rsrc_node, node);
184 /* recycle ref nodes in order */
185 if (node->refs)
186 break;
187 list_del(&node->node);
188
189 if (likely(!node->empty))
190 io_rsrc_put_work(node);
191 io_rsrc_node_destroy(ctx, node);
192 }
193 if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
194 wake_up_all(&ctx->rsrc_quiesce_wq);
195 }
196
io_rsrc_node_alloc(struct io_ring_ctx * ctx)197 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
198 {
199 struct io_rsrc_node *ref_node;
200 struct io_cache_entry *entry;
201
202 entry = io_alloc_cache_get(&ctx->rsrc_node_cache);
203 if (entry) {
204 ref_node = container_of(entry, struct io_rsrc_node, cache);
205 } else {
206 ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
207 if (!ref_node)
208 return NULL;
209 }
210
211 ref_node->ctx = ctx;
212 ref_node->empty = 0;
213 ref_node->refs = 1;
214 return ref_node;
215 }
216
io_rsrc_ref_quiesce(struct io_rsrc_data * data,struct io_ring_ctx * ctx)217 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
218 struct io_ring_ctx *ctx)
219 {
220 struct io_rsrc_node *backup;
221 DEFINE_WAIT(we);
222 int ret;
223
224 /* As We may drop ->uring_lock, other task may have started quiesce */
225 if (data->quiesce)
226 return -ENXIO;
227
228 backup = io_rsrc_node_alloc(ctx);
229 if (!backup)
230 return -ENOMEM;
231 ctx->rsrc_node->empty = true;
232 ctx->rsrc_node->type = -1;
233 list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list);
234 io_put_rsrc_node(ctx, ctx->rsrc_node);
235 ctx->rsrc_node = backup;
236
237 if (list_empty(&ctx->rsrc_ref_list))
238 return 0;
239
240 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
241 atomic_set(&ctx->cq_wait_nr, 1);
242 smp_mb();
243 }
244
245 ctx->rsrc_quiesce++;
246 data->quiesce = true;
247 do {
248 prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
249 mutex_unlock(&ctx->uring_lock);
250
251 ret = io_run_task_work_sig(ctx);
252 if (ret < 0) {
253 __set_current_state(TASK_RUNNING);
254 mutex_lock(&ctx->uring_lock);
255 if (list_empty(&ctx->rsrc_ref_list))
256 ret = 0;
257 break;
258 }
259
260 schedule();
261 __set_current_state(TASK_RUNNING);
262 mutex_lock(&ctx->uring_lock);
263 ret = 0;
264 } while (!list_empty(&ctx->rsrc_ref_list));
265
266 finish_wait(&ctx->rsrc_quiesce_wq, &we);
267 data->quiesce = false;
268 ctx->rsrc_quiesce--;
269
270 if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
271 atomic_set(&ctx->cq_wait_nr, 0);
272 smp_mb();
273 }
274 return ret;
275 }
276
io_free_page_table(void ** table,size_t size)277 static void io_free_page_table(void **table, size_t size)
278 {
279 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
280
281 for (i = 0; i < nr_tables; i++)
282 kfree(table[i]);
283 kfree(table);
284 }
285
io_rsrc_data_free(struct io_rsrc_data * data)286 static void io_rsrc_data_free(struct io_rsrc_data *data)
287 {
288 size_t size = data->nr * sizeof(data->tags[0][0]);
289
290 if (data->tags)
291 io_free_page_table((void **)data->tags, size);
292 kfree(data);
293 }
294
io_alloc_page_table(size_t size)295 static __cold void **io_alloc_page_table(size_t size)
296 {
297 unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
298 size_t init_size = size;
299 void **table;
300
301 table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
302 if (!table)
303 return NULL;
304
305 for (i = 0; i < nr_tables; i++) {
306 unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
307
308 table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
309 if (!table[i]) {
310 io_free_page_table(table, init_size);
311 return NULL;
312 }
313 size -= this_size;
314 }
315 return table;
316 }
317
io_rsrc_data_alloc(struct io_ring_ctx * ctx,int type,u64 __user * utags,unsigned nr,struct io_rsrc_data ** pdata)318 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, int type,
319 u64 __user *utags,
320 unsigned nr, struct io_rsrc_data **pdata)
321 {
322 struct io_rsrc_data *data;
323 int ret = 0;
324 unsigned i;
325
326 data = kzalloc(sizeof(*data), GFP_KERNEL);
327 if (!data)
328 return -ENOMEM;
329 data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
330 if (!data->tags) {
331 kfree(data);
332 return -ENOMEM;
333 }
334
335 data->nr = nr;
336 data->ctx = ctx;
337 data->rsrc_type = type;
338 if (utags) {
339 ret = -EFAULT;
340 for (i = 0; i < nr; i++) {
341 u64 *tag_slot = io_get_tag_slot(data, i);
342
343 if (copy_from_user(tag_slot, &utags[i],
344 sizeof(*tag_slot)))
345 goto fail;
346 }
347 }
348 *pdata = data;
349 return 0;
350 fail:
351 io_rsrc_data_free(data);
352 return ret;
353 }
354
__io_sqe_files_update(struct io_ring_ctx * ctx,struct io_uring_rsrc_update2 * up,unsigned nr_args)355 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
356 struct io_uring_rsrc_update2 *up,
357 unsigned nr_args)
358 {
359 u64 __user *tags = u64_to_user_ptr(up->tags);
360 __s32 __user *fds = u64_to_user_ptr(up->data);
361 struct io_rsrc_data *data = ctx->file_data;
362 struct io_fixed_file *file_slot;
363 int fd, i, err = 0;
364 unsigned int done;
365
366 if (!ctx->file_data)
367 return -ENXIO;
368 if (up->offset + nr_args > ctx->nr_user_files)
369 return -EINVAL;
370
371 for (done = 0; done < nr_args; done++) {
372 u64 tag = 0;
373
374 if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
375 copy_from_user(&fd, &fds[done], sizeof(fd))) {
376 err = -EFAULT;
377 break;
378 }
379 if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
380 err = -EINVAL;
381 break;
382 }
383 if (fd == IORING_REGISTER_FILES_SKIP)
384 continue;
385
386 i = array_index_nospec(up->offset + done, ctx->nr_user_files);
387 file_slot = io_fixed_file_slot(&ctx->file_table, i);
388
389 if (file_slot->file_ptr) {
390 err = io_queue_rsrc_removal(data, i,
391 io_slot_file(file_slot));
392 if (err)
393 break;
394 file_slot->file_ptr = 0;
395 io_file_bitmap_clear(&ctx->file_table, i);
396 }
397 if (fd != -1) {
398 struct file *file = fget(fd);
399
400 if (!file) {
401 err = -EBADF;
402 break;
403 }
404 /*
405 * Don't allow io_uring instances to be registered.
406 */
407 if (io_is_uring_fops(file)) {
408 fput(file);
409 err = -EBADF;
410 break;
411 }
412 *io_get_tag_slot(data, i) = tag;
413 io_fixed_file_set(file_slot, file);
414 io_file_bitmap_set(&ctx->file_table, i);
415 }
416 }
417 return done ? done : err;
418 }
419
__io_sqe_buffers_update(struct io_ring_ctx * ctx,struct io_uring_rsrc_update2 * up,unsigned int nr_args)420 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
421 struct io_uring_rsrc_update2 *up,
422 unsigned int nr_args)
423 {
424 u64 __user *tags = u64_to_user_ptr(up->tags);
425 struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
426 struct page *last_hpage = NULL;
427 __u32 done;
428 int i, err;
429
430 if (!ctx->buf_data)
431 return -ENXIO;
432 if (up->offset + nr_args > ctx->nr_user_bufs)
433 return -EINVAL;
434
435 for (done = 0; done < nr_args; done++) {
436 struct io_mapped_ubuf *imu;
437 u64 tag = 0;
438
439 err = io_copy_iov(ctx, &iov, iovs, done);
440 if (err)
441 break;
442 if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
443 err = -EFAULT;
444 break;
445 }
446 err = io_buffer_validate(&iov);
447 if (err)
448 break;
449 if (!iov.iov_base && tag) {
450 err = -EINVAL;
451 break;
452 }
453 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
454 if (err)
455 break;
456
457 i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
458 if (ctx->user_bufs[i] != &dummy_ubuf) {
459 err = io_queue_rsrc_removal(ctx->buf_data, i,
460 ctx->user_bufs[i]);
461 if (unlikely(err)) {
462 io_buffer_unmap(ctx, &imu);
463 break;
464 }
465 ctx->user_bufs[i] = (struct io_mapped_ubuf *)&dummy_ubuf;
466 }
467
468 ctx->user_bufs[i] = imu;
469 *io_get_tag_slot(ctx->buf_data, i) = tag;
470 }
471 return done ? done : err;
472 }
473
__io_register_rsrc_update(struct io_ring_ctx * ctx,unsigned type,struct io_uring_rsrc_update2 * up,unsigned nr_args)474 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
475 struct io_uring_rsrc_update2 *up,
476 unsigned nr_args)
477 {
478 __u32 tmp;
479
480 lockdep_assert_held(&ctx->uring_lock);
481
482 if (check_add_overflow(up->offset, nr_args, &tmp))
483 return -EOVERFLOW;
484
485 switch (type) {
486 case IORING_RSRC_FILE:
487 return __io_sqe_files_update(ctx, up, nr_args);
488 case IORING_RSRC_BUFFER:
489 return __io_sqe_buffers_update(ctx, up, nr_args);
490 }
491 return -EINVAL;
492 }
493
io_register_files_update(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args)494 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
495 unsigned nr_args)
496 {
497 struct io_uring_rsrc_update2 up;
498
499 if (!nr_args)
500 return -EINVAL;
501 memset(&up, 0, sizeof(up));
502 if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
503 return -EFAULT;
504 if (up.resv || up.resv2)
505 return -EINVAL;
506 return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
507 }
508
io_register_rsrc_update(struct io_ring_ctx * ctx,void __user * arg,unsigned size,unsigned type)509 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
510 unsigned size, unsigned type)
511 {
512 struct io_uring_rsrc_update2 up;
513
514 if (size != sizeof(up))
515 return -EINVAL;
516 if (copy_from_user(&up, arg, sizeof(up)))
517 return -EFAULT;
518 if (!up.nr || up.resv || up.resv2)
519 return -EINVAL;
520 return __io_register_rsrc_update(ctx, type, &up, up.nr);
521 }
522
io_register_rsrc(struct io_ring_ctx * ctx,void __user * arg,unsigned int size,unsigned int type)523 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
524 unsigned int size, unsigned int type)
525 {
526 struct io_uring_rsrc_register rr;
527
528 /* keep it extendible */
529 if (size != sizeof(rr))
530 return -EINVAL;
531
532 memset(&rr, 0, sizeof(rr));
533 if (copy_from_user(&rr, arg, size))
534 return -EFAULT;
535 if (!rr.nr || rr.resv2)
536 return -EINVAL;
537 if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
538 return -EINVAL;
539
540 switch (type) {
541 case IORING_RSRC_FILE:
542 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
543 break;
544 return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
545 rr.nr, u64_to_user_ptr(rr.tags));
546 case IORING_RSRC_BUFFER:
547 if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
548 break;
549 return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
550 rr.nr, u64_to_user_ptr(rr.tags));
551 }
552 return -EINVAL;
553 }
554
io_files_update_prep(struct io_kiocb * req,const struct io_uring_sqe * sqe)555 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
556 {
557 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
558
559 if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
560 return -EINVAL;
561 if (sqe->rw_flags || sqe->splice_fd_in)
562 return -EINVAL;
563
564 up->offset = READ_ONCE(sqe->off);
565 up->nr_args = READ_ONCE(sqe->len);
566 if (!up->nr_args)
567 return -EINVAL;
568 up->arg = READ_ONCE(sqe->addr);
569 return 0;
570 }
571
io_files_update_with_index_alloc(struct io_kiocb * req,unsigned int issue_flags)572 static int io_files_update_with_index_alloc(struct io_kiocb *req,
573 unsigned int issue_flags)
574 {
575 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
576 __s32 __user *fds = u64_to_user_ptr(up->arg);
577 unsigned int done;
578 struct file *file;
579 int ret, fd;
580
581 if (!req->ctx->file_data)
582 return -ENXIO;
583
584 for (done = 0; done < up->nr_args; done++) {
585 if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
586 ret = -EFAULT;
587 break;
588 }
589
590 file = fget(fd);
591 if (!file) {
592 ret = -EBADF;
593 break;
594 }
595 ret = io_fixed_fd_install(req, issue_flags, file,
596 IORING_FILE_INDEX_ALLOC);
597 if (ret < 0)
598 break;
599 if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
600 __io_close_fixed(req->ctx, issue_flags, ret);
601 ret = -EFAULT;
602 break;
603 }
604 }
605
606 if (done)
607 return done;
608 return ret;
609 }
610
io_files_update(struct io_kiocb * req,unsigned int issue_flags)611 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
612 {
613 struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
614 struct io_ring_ctx *ctx = req->ctx;
615 struct io_uring_rsrc_update2 up2;
616 int ret;
617
618 up2.offset = up->offset;
619 up2.data = up->arg;
620 up2.nr = 0;
621 up2.tags = 0;
622 up2.resv = 0;
623 up2.resv2 = 0;
624
625 if (up->offset == IORING_FILE_INDEX_ALLOC) {
626 ret = io_files_update_with_index_alloc(req, issue_flags);
627 } else {
628 io_ring_submit_lock(ctx, issue_flags);
629 ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
630 &up2, up->nr_args);
631 io_ring_submit_unlock(ctx, issue_flags);
632 }
633
634 if (ret < 0)
635 req_set_fail(req);
636 io_req_set_res(req, ret, 0);
637 return IOU_OK;
638 }
639
io_queue_rsrc_removal(struct io_rsrc_data * data,unsigned idx,void * rsrc)640 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)
641 {
642 struct io_ring_ctx *ctx = data->ctx;
643 struct io_rsrc_node *node = ctx->rsrc_node;
644 u64 *tag_slot = io_get_tag_slot(data, idx);
645
646 ctx->rsrc_node = io_rsrc_node_alloc(ctx);
647 if (unlikely(!ctx->rsrc_node)) {
648 ctx->rsrc_node = node;
649 return -ENOMEM;
650 }
651
652 node->item.rsrc = rsrc;
653 node->type = data->rsrc_type;
654 node->item.tag = *tag_slot;
655 *tag_slot = 0;
656 list_add_tail(&node->node, &ctx->rsrc_ref_list);
657 io_put_rsrc_node(ctx, node);
658 return 0;
659 }
660
__io_sqe_files_unregister(struct io_ring_ctx * ctx)661 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
662 {
663 int i;
664
665 for (i = 0; i < ctx->nr_user_files; i++) {
666 struct file *file = io_file_from_index(&ctx->file_table, i);
667
668 if (!file)
669 continue;
670 io_file_bitmap_clear(&ctx->file_table, i);
671 fput(file);
672 }
673
674 io_free_file_tables(&ctx->file_table);
675 io_file_table_set_alloc_range(ctx, 0, 0);
676 io_rsrc_data_free(ctx->file_data);
677 ctx->file_data = NULL;
678 ctx->nr_user_files = 0;
679 }
680
io_sqe_files_unregister(struct io_ring_ctx * ctx)681 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
682 {
683 unsigned nr = ctx->nr_user_files;
684 int ret;
685
686 if (!ctx->file_data)
687 return -ENXIO;
688
689 /*
690 * Quiesce may unlock ->uring_lock, and while it's not held
691 * prevent new requests using the table.
692 */
693 ctx->nr_user_files = 0;
694 ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
695 ctx->nr_user_files = nr;
696 if (!ret)
697 __io_sqe_files_unregister(ctx);
698 return ret;
699 }
700
io_sqe_files_register(struct io_ring_ctx * ctx,void __user * arg,unsigned nr_args,u64 __user * tags)701 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
702 unsigned nr_args, u64 __user *tags)
703 {
704 __s32 __user *fds = (__s32 __user *) arg;
705 struct file *file;
706 int fd, ret;
707 unsigned i;
708
709 if (ctx->file_data)
710 return -EBUSY;
711 if (!nr_args)
712 return -EINVAL;
713 if (nr_args > IORING_MAX_FIXED_FILES)
714 return -EMFILE;
715 if (nr_args > rlimit(RLIMIT_NOFILE))
716 return -EMFILE;
717 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_FILE, tags, nr_args,
718 &ctx->file_data);
719 if (ret)
720 return ret;
721
722 if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
723 io_rsrc_data_free(ctx->file_data);
724 ctx->file_data = NULL;
725 return -ENOMEM;
726 }
727
728 for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
729 struct io_fixed_file *file_slot;
730
731 if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
732 ret = -EFAULT;
733 goto fail;
734 }
735 /* allow sparse sets */
736 if (!fds || fd == -1) {
737 ret = -EINVAL;
738 if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
739 goto fail;
740 continue;
741 }
742
743 file = fget(fd);
744 ret = -EBADF;
745 if (unlikely(!file))
746 goto fail;
747
748 /*
749 * Don't allow io_uring instances to be registered.
750 */
751 if (io_is_uring_fops(file)) {
752 fput(file);
753 goto fail;
754 }
755 file_slot = io_fixed_file_slot(&ctx->file_table, i);
756 io_fixed_file_set(file_slot, file);
757 io_file_bitmap_set(&ctx->file_table, i);
758 }
759
760 /* default it to the whole table */
761 io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
762 return 0;
763 fail:
764 __io_sqe_files_unregister(ctx);
765 return ret;
766 }
767
io_rsrc_buf_put(struct io_ring_ctx * ctx,struct io_rsrc_put * prsrc)768 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
769 {
770 io_buffer_unmap(ctx, &prsrc->buf);
771 prsrc->buf = NULL;
772 }
773
__io_sqe_buffers_unregister(struct io_ring_ctx * ctx)774 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
775 {
776 unsigned int i;
777
778 for (i = 0; i < ctx->nr_user_bufs; i++)
779 io_buffer_unmap(ctx, &ctx->user_bufs[i]);
780 kfree(ctx->user_bufs);
781 io_rsrc_data_free(ctx->buf_data);
782 ctx->user_bufs = NULL;
783 ctx->buf_data = NULL;
784 ctx->nr_user_bufs = 0;
785 }
786
io_sqe_buffers_unregister(struct io_ring_ctx * ctx)787 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
788 {
789 unsigned nr = ctx->nr_user_bufs;
790 int ret;
791
792 if (!ctx->buf_data)
793 return -ENXIO;
794
795 /*
796 * Quiesce may unlock ->uring_lock, and while it's not held
797 * prevent new requests using the table.
798 */
799 ctx->nr_user_bufs = 0;
800 ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
801 ctx->nr_user_bufs = nr;
802 if (!ret)
803 __io_sqe_buffers_unregister(ctx);
804 return ret;
805 }
806
807 /*
808 * Not super efficient, but this is just a registration time. And we do cache
809 * the last compound head, so generally we'll only do a full search if we don't
810 * match that one.
811 *
812 * We check if the given compound head page has already been accounted, to
813 * avoid double accounting it. This allows us to account the full size of the
814 * page, not just the constituent pages of a huge page.
815 */
headpage_already_acct(struct io_ring_ctx * ctx,struct page ** pages,int nr_pages,struct page * hpage)816 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
817 int nr_pages, struct page *hpage)
818 {
819 int i, j;
820
821 /* check current page array */
822 for (i = 0; i < nr_pages; i++) {
823 if (!PageCompound(pages[i]))
824 continue;
825 if (compound_head(pages[i]) == hpage)
826 return true;
827 }
828
829 /* check previously registered pages */
830 for (i = 0; i < ctx->nr_user_bufs; i++) {
831 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
832
833 for (j = 0; j < imu->nr_bvecs; j++) {
834 if (!PageCompound(imu->bvec[j].bv_page))
835 continue;
836 if (compound_head(imu->bvec[j].bv_page) == hpage)
837 return true;
838 }
839 }
840
841 return false;
842 }
843
io_buffer_account_pin(struct io_ring_ctx * ctx,struct page ** pages,int nr_pages,struct io_mapped_ubuf * imu,struct page ** last_hpage)844 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
845 int nr_pages, struct io_mapped_ubuf *imu,
846 struct page **last_hpage)
847 {
848 int i, ret;
849
850 imu->acct_pages = 0;
851 for (i = 0; i < nr_pages; i++) {
852 if (!PageCompound(pages[i])) {
853 imu->acct_pages++;
854 } else {
855 struct page *hpage;
856
857 hpage = compound_head(pages[i]);
858 if (hpage == *last_hpage)
859 continue;
860 *last_hpage = hpage;
861 if (headpage_already_acct(ctx, pages, i, hpage))
862 continue;
863 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
864 }
865 }
866
867 if (!imu->acct_pages)
868 return 0;
869
870 ret = io_account_mem(ctx, imu->acct_pages);
871 if (ret)
872 imu->acct_pages = 0;
873 return ret;
874 }
875
io_pin_pages(unsigned long ubuf,unsigned long len,int * npages)876 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
877 {
878 unsigned long start, end, nr_pages;
879 struct page **pages = NULL;
880 int pret, ret = -ENOMEM;
881
882 end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
883 start = ubuf >> PAGE_SHIFT;
884 nr_pages = end - start;
885
886 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
887 if (!pages)
888 goto done;
889
890 ret = 0;
891 mmap_read_lock(current->mm);
892 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
893 pages);
894 if (pret == nr_pages)
895 *npages = nr_pages;
896 else
897 ret = pret < 0 ? pret : -EFAULT;
898
899 mmap_read_unlock(current->mm);
900 if (ret) {
901 /* if we did partial map, release any pages we did get */
902 if (pret > 0)
903 unpin_user_pages(pages, pret);
904 goto done;
905 }
906 ret = 0;
907 done:
908 if (ret < 0) {
909 kvfree(pages);
910 pages = ERR_PTR(ret);
911 }
912 return pages;
913 }
914
io_sqe_buffer_register(struct io_ring_ctx * ctx,struct iovec * iov,struct io_mapped_ubuf ** pimu,struct page ** last_hpage)915 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
916 struct io_mapped_ubuf **pimu,
917 struct page **last_hpage)
918 {
919 struct io_mapped_ubuf *imu = NULL;
920 struct page **pages = NULL;
921 unsigned long off;
922 size_t size;
923 int ret, nr_pages, i;
924 struct folio *folio = NULL;
925
926 *pimu = (struct io_mapped_ubuf *)&dummy_ubuf;
927 if (!iov->iov_base)
928 return 0;
929
930 ret = -ENOMEM;
931 pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
932 &nr_pages);
933 if (IS_ERR(pages)) {
934 ret = PTR_ERR(pages);
935 pages = NULL;
936 goto done;
937 }
938
939 /* If it's a huge page, try to coalesce them into a single bvec entry */
940 if (nr_pages > 1) {
941 folio = page_folio(pages[0]);
942 for (i = 1; i < nr_pages; i++) {
943 /*
944 * Pages must be consecutive and on the same folio for
945 * this to work
946 */
947 if (page_folio(pages[i]) != folio ||
948 pages[i] != pages[i - 1] + 1) {
949 folio = NULL;
950 break;
951 }
952 }
953 if (folio) {
954 /*
955 * The pages are bound to the folio, it doesn't
956 * actually unpin them but drops all but one reference,
957 * which is usually put down by io_buffer_unmap().
958 * Note, needs a better helper.
959 */
960 unpin_user_pages(&pages[1], nr_pages - 1);
961 nr_pages = 1;
962 }
963 }
964
965 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
966 if (!imu)
967 goto done;
968
969 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
970 if (ret) {
971 unpin_user_pages(pages, nr_pages);
972 goto done;
973 }
974
975 off = (unsigned long) iov->iov_base & ~PAGE_MASK;
976 size = iov->iov_len;
977 /* store original address for later verification */
978 imu->ubuf = (unsigned long) iov->iov_base;
979 imu->ubuf_end = imu->ubuf + iov->iov_len;
980 imu->nr_bvecs = nr_pages;
981 *pimu = imu;
982 ret = 0;
983
984 if (folio) {
985 bvec_set_page(&imu->bvec[0], pages[0], size, off);
986 goto done;
987 }
988 for (i = 0; i < nr_pages; i++) {
989 size_t vec_len;
990
991 vec_len = min_t(size_t, size, PAGE_SIZE - off);
992 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
993 off = 0;
994 size -= vec_len;
995 }
996 done:
997 if (ret)
998 kvfree(imu);
999 kvfree(pages);
1000 return ret;
1001 }
1002
io_buffers_map_alloc(struct io_ring_ctx * ctx,unsigned int nr_args)1003 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1004 {
1005 ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1006 return ctx->user_bufs ? 0 : -ENOMEM;
1007 }
1008
io_sqe_buffers_register(struct io_ring_ctx * ctx,void __user * arg,unsigned int nr_args,u64 __user * tags)1009 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1010 unsigned int nr_args, u64 __user *tags)
1011 {
1012 struct page *last_hpage = NULL;
1013 struct io_rsrc_data *data;
1014 int i, ret;
1015 struct iovec iov;
1016
1017 BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1018
1019 if (ctx->user_bufs)
1020 return -EBUSY;
1021 if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1022 return -EINVAL;
1023 ret = io_rsrc_data_alloc(ctx, IORING_RSRC_BUFFER, tags, nr_args, &data);
1024 if (ret)
1025 return ret;
1026 ret = io_buffers_map_alloc(ctx, nr_args);
1027 if (ret) {
1028 io_rsrc_data_free(data);
1029 return ret;
1030 }
1031
1032 for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1033 if (arg) {
1034 ret = io_copy_iov(ctx, &iov, arg, i);
1035 if (ret)
1036 break;
1037 ret = io_buffer_validate(&iov);
1038 if (ret)
1039 break;
1040 } else {
1041 memset(&iov, 0, sizeof(iov));
1042 }
1043
1044 if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1045 ret = -EINVAL;
1046 break;
1047 }
1048
1049 ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1050 &last_hpage);
1051 if (ret)
1052 break;
1053 }
1054
1055 WARN_ON_ONCE(ctx->buf_data);
1056
1057 ctx->buf_data = data;
1058 if (ret)
1059 __io_sqe_buffers_unregister(ctx);
1060 return ret;
1061 }
1062
io_import_fixed(int ddir,struct iov_iter * iter,struct io_mapped_ubuf * imu,u64 buf_addr,size_t len)1063 int io_import_fixed(int ddir, struct iov_iter *iter,
1064 struct io_mapped_ubuf *imu,
1065 u64 buf_addr, size_t len)
1066 {
1067 u64 buf_end;
1068 size_t offset;
1069
1070 if (WARN_ON_ONCE(!imu))
1071 return -EFAULT;
1072 if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1073 return -EFAULT;
1074 /* not inside the mapped region */
1075 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1076 return -EFAULT;
1077
1078 /*
1079 * Might not be a start of buffer, set size appropriately
1080 * and advance us to the beginning.
1081 */
1082 offset = buf_addr - imu->ubuf;
1083 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1084
1085 if (offset) {
1086 /*
1087 * Don't use iov_iter_advance() here, as it's really slow for
1088 * using the latter parts of a big fixed buffer - it iterates
1089 * over each segment manually. We can cheat a bit here, because
1090 * we know that:
1091 *
1092 * 1) it's a BVEC iter, we set it up
1093 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1094 * first and last bvec
1095 *
1096 * So just find our index, and adjust the iterator afterwards.
1097 * If the offset is within the first bvec (or the whole first
1098 * bvec, just use iov_iter_advance(). This makes it easier
1099 * since we can just skip the first segment, which may not
1100 * be PAGE_SIZE aligned.
1101 */
1102 const struct bio_vec *bvec = imu->bvec;
1103
1104 if (offset < bvec->bv_len) {
1105 /*
1106 * Note, huge pages buffers consists of one large
1107 * bvec entry and should always go this way. The other
1108 * branch doesn't expect non PAGE_SIZE'd chunks.
1109 */
1110 iter->bvec = bvec;
1111 iter->count -= offset;
1112 iter->iov_offset = offset;
1113 } else {
1114 unsigned long seg_skip;
1115
1116 /* skip first vec */
1117 offset -= bvec->bv_len;
1118 seg_skip = 1 + (offset >> PAGE_SHIFT);
1119
1120 iter->bvec = bvec + seg_skip;
1121 iter->nr_segs -= seg_skip;
1122 iter->count -= bvec->bv_len + offset;
1123 iter->iov_offset = offset & ~PAGE_MASK;
1124 }
1125 }
1126
1127 return 0;
1128 }
1129