xref: /openbmc/linux/io_uring/rsrc.c (revision 2933ae6e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "io_uring.h"
16 #include "openclose.h"
17 #include "rsrc.h"
18 
19 struct io_rsrc_update {
20 	struct file			*file;
21 	u64				arg;
22 	u32				nr_args;
23 	u32				offset;
24 };
25 
26 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
27 				  struct io_mapped_ubuf **pimu,
28 				  struct page **last_hpage);
29 
30 /* only define max */
31 #define IORING_MAX_FIXED_FILES	(1U << 20)
32 #define IORING_MAX_REG_BUFFERS	(1U << 14)
33 
34 static inline bool io_put_rsrc_data_ref(struct io_rsrc_data *rsrc_data)
35 {
36 	return !--rsrc_data->refs;
37 }
38 
39 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
40 {
41 	unsigned long page_limit, cur_pages, new_pages;
42 
43 	if (!nr_pages)
44 		return 0;
45 
46 	/* Don't allow more pages than we can safely lock */
47 	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
48 
49 	cur_pages = atomic_long_read(&user->locked_vm);
50 	do {
51 		new_pages = cur_pages + nr_pages;
52 		if (new_pages > page_limit)
53 			return -ENOMEM;
54 	} while (!atomic_long_try_cmpxchg(&user->locked_vm,
55 					  &cur_pages, new_pages));
56 	return 0;
57 }
58 
59 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
60 {
61 	if (ctx->user)
62 		__io_unaccount_mem(ctx->user, nr_pages);
63 
64 	if (ctx->mm_account)
65 		atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
66 }
67 
68 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
69 {
70 	int ret;
71 
72 	if (ctx->user) {
73 		ret = __io_account_mem(ctx->user, nr_pages);
74 		if (ret)
75 			return ret;
76 	}
77 
78 	if (ctx->mm_account)
79 		atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
80 
81 	return 0;
82 }
83 
84 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
85 		       void __user *arg, unsigned index)
86 {
87 	struct iovec __user *src;
88 
89 #ifdef CONFIG_COMPAT
90 	if (ctx->compat) {
91 		struct compat_iovec __user *ciovs;
92 		struct compat_iovec ciov;
93 
94 		ciovs = (struct compat_iovec __user *) arg;
95 		if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
96 			return -EFAULT;
97 
98 		dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
99 		dst->iov_len = ciov.iov_len;
100 		return 0;
101 	}
102 #endif
103 	src = (struct iovec __user *) arg;
104 	if (copy_from_user(dst, &src[index], sizeof(*dst)))
105 		return -EFAULT;
106 	return 0;
107 }
108 
109 static int io_buffer_validate(struct iovec *iov)
110 {
111 	unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
112 
113 	/*
114 	 * Don't impose further limits on the size and buffer
115 	 * constraints here, we'll -EINVAL later when IO is
116 	 * submitted if they are wrong.
117 	 */
118 	if (!iov->iov_base)
119 		return iov->iov_len ? -EFAULT : 0;
120 	if (!iov->iov_len)
121 		return -EFAULT;
122 
123 	/* arbitrary limit, but we need something */
124 	if (iov->iov_len > SZ_1G)
125 		return -EFAULT;
126 
127 	if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
128 		return -EOVERFLOW;
129 
130 	return 0;
131 }
132 
133 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
134 {
135 	struct io_mapped_ubuf *imu = *slot;
136 	unsigned int i;
137 
138 	if (imu != ctx->dummy_ubuf) {
139 		for (i = 0; i < imu->nr_bvecs; i++)
140 			unpin_user_page(imu->bvec[i].bv_page);
141 		if (imu->acct_pages)
142 			io_unaccount_mem(ctx, imu->acct_pages);
143 		kvfree(imu);
144 	}
145 	*slot = NULL;
146 }
147 
148 static void io_rsrc_put_work_one(struct io_rsrc_data *rsrc_data,
149 				 struct io_rsrc_put *prsrc)
150 {
151 	struct io_ring_ctx *ctx = rsrc_data->ctx;
152 
153 	if (prsrc->tag)
154 		io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
155 	rsrc_data->do_put(ctx, prsrc);
156 }
157 
158 static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
159 {
160 	struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
161 	struct io_rsrc_put *prsrc, *tmp;
162 
163 	if (ref_node->inline_items)
164 		io_rsrc_put_work_one(rsrc_data, &ref_node->item);
165 
166 	list_for_each_entry_safe(prsrc, tmp, &ref_node->item_list, list) {
167 		list_del(&prsrc->list);
168 		io_rsrc_put_work_one(rsrc_data, prsrc);
169 		kfree(prsrc);
170 	}
171 
172 	io_rsrc_node_destroy(rsrc_data->ctx, ref_node);
173 	if (io_put_rsrc_data_ref(rsrc_data))
174 		complete(&rsrc_data->done);
175 }
176 
177 void io_wait_rsrc_data(struct io_rsrc_data *data)
178 {
179 	if (data && !io_put_rsrc_data_ref(data))
180 		wait_for_completion(&data->done);
181 }
182 
183 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
184 {
185 	if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache))
186 		kfree(node);
187 }
188 
189 void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
190 	__must_hold(&node->rsrc_data->ctx->uring_lock)
191 {
192 	struct io_ring_ctx *ctx = node->rsrc_data->ctx;
193 
194 	node->done = true;
195 	while (!list_empty(&ctx->rsrc_ref_list)) {
196 		node = list_first_entry(&ctx->rsrc_ref_list,
197 					    struct io_rsrc_node, node);
198 		/* recycle ref nodes in order */
199 		if (!node->done)
200 			break;
201 
202 		list_del(&node->node);
203 		__io_rsrc_put_work(node);
204 	}
205 }
206 
207 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
208 {
209 	struct io_rsrc_node *ref_node;
210 	struct io_cache_entry *entry;
211 
212 	entry = io_alloc_cache_get(&ctx->rsrc_node_cache);
213 	if (entry) {
214 		ref_node = container_of(entry, struct io_rsrc_node, cache);
215 	} else {
216 		ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
217 		if (!ref_node)
218 			return NULL;
219 	}
220 
221 	ref_node->rsrc_data = NULL;
222 	ref_node->refs = 1;
223 	INIT_LIST_HEAD(&ref_node->node);
224 	INIT_LIST_HEAD(&ref_node->item_list);
225 	ref_node->done = false;
226 	ref_node->inline_items = 0;
227 	return ref_node;
228 }
229 
230 void io_rsrc_node_switch(struct io_ring_ctx *ctx,
231 			 struct io_rsrc_data *data_to_kill)
232 	__must_hold(&ctx->uring_lock)
233 {
234 	struct io_rsrc_node *node = ctx->rsrc_node;
235 	struct io_rsrc_node *backup = io_rsrc_node_alloc(ctx);
236 
237 	if (WARN_ON_ONCE(!backup))
238 		return;
239 
240 	data_to_kill->refs++;
241 	node->rsrc_data = data_to_kill;
242 	list_add_tail(&node->node, &ctx->rsrc_ref_list);
243 	/* put master ref */
244 	io_put_rsrc_node(ctx, node);
245 	ctx->rsrc_node = backup;
246 }
247 
248 int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
249 {
250 	if (io_alloc_cache_empty(&ctx->rsrc_node_cache)) {
251 		struct io_rsrc_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
252 
253 		if (!node)
254 			return -ENOMEM;
255 		io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache);
256 	}
257 	return 0;
258 }
259 
260 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
261 				      struct io_ring_ctx *ctx)
262 {
263 	int ret;
264 
265 	/* As we may drop ->uring_lock, other task may have started quiesce */
266 	if (data->quiesce)
267 		return -ENXIO;
268 	ret = io_rsrc_node_switch_start(ctx);
269 	if (ret)
270 		return ret;
271 	io_rsrc_node_switch(ctx, data);
272 
273 	/* kill initial ref */
274 	if (io_put_rsrc_data_ref(data))
275 		return 0;
276 
277 	data->quiesce = true;
278 	mutex_unlock(&ctx->uring_lock);
279 	do {
280 		ret = io_run_task_work_sig(ctx);
281 		if (ret < 0) {
282 			mutex_lock(&ctx->uring_lock);
283 			if (!data->refs) {
284 				ret = 0;
285 			} else {
286 				/* restore the master reference */
287 				data->refs++;
288 			}
289 			break;
290 		}
291 		ret = wait_for_completion_interruptible(&data->done);
292 		if (!ret) {
293 			mutex_lock(&ctx->uring_lock);
294 			if (!data->refs)
295 				break;
296 			/*
297 			 * it has been revived by another thread while
298 			 * we were unlocked
299 			 */
300 			mutex_unlock(&ctx->uring_lock);
301 		}
302 	} while (1);
303 	data->quiesce = false;
304 
305 	return ret;
306 }
307 
308 static void io_free_page_table(void **table, size_t size)
309 {
310 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
311 
312 	for (i = 0; i < nr_tables; i++)
313 		kfree(table[i]);
314 	kfree(table);
315 }
316 
317 static void io_rsrc_data_free(struct io_rsrc_data *data)
318 {
319 	size_t size = data->nr * sizeof(data->tags[0][0]);
320 
321 	if (data->tags)
322 		io_free_page_table((void **)data->tags, size);
323 	kfree(data);
324 }
325 
326 static __cold void **io_alloc_page_table(size_t size)
327 {
328 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
329 	size_t init_size = size;
330 	void **table;
331 
332 	table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
333 	if (!table)
334 		return NULL;
335 
336 	for (i = 0; i < nr_tables; i++) {
337 		unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
338 
339 		table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
340 		if (!table[i]) {
341 			io_free_page_table(table, init_size);
342 			return NULL;
343 		}
344 		size -= this_size;
345 	}
346 	return table;
347 }
348 
349 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
350 				     rsrc_put_fn *do_put, u64 __user *utags,
351 				     unsigned nr, struct io_rsrc_data **pdata)
352 {
353 	struct io_rsrc_data *data;
354 	int ret = 0;
355 	unsigned i;
356 
357 	data = kzalloc(sizeof(*data), GFP_KERNEL);
358 	if (!data)
359 		return -ENOMEM;
360 	data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
361 	if (!data->tags) {
362 		kfree(data);
363 		return -ENOMEM;
364 	}
365 
366 	data->nr = nr;
367 	data->ctx = ctx;
368 	data->do_put = do_put;
369 	data->refs = 1;
370 	if (utags) {
371 		ret = -EFAULT;
372 		for (i = 0; i < nr; i++) {
373 			u64 *tag_slot = io_get_tag_slot(data, i);
374 
375 			if (copy_from_user(tag_slot, &utags[i],
376 					   sizeof(*tag_slot)))
377 				goto fail;
378 		}
379 	}
380 	init_completion(&data->done);
381 	*pdata = data;
382 	return 0;
383 fail:
384 	io_rsrc_data_free(data);
385 	return ret;
386 }
387 
388 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
389 				 struct io_uring_rsrc_update2 *up,
390 				 unsigned nr_args)
391 {
392 	u64 __user *tags = u64_to_user_ptr(up->tags);
393 	__s32 __user *fds = u64_to_user_ptr(up->data);
394 	struct io_rsrc_data *data = ctx->file_data;
395 	struct io_fixed_file *file_slot;
396 	struct file *file;
397 	int fd, i, err = 0;
398 	unsigned int done;
399 	bool needs_switch = false;
400 
401 	if (!ctx->file_data)
402 		return -ENXIO;
403 	if (up->offset + nr_args > ctx->nr_user_files)
404 		return -EINVAL;
405 
406 	for (done = 0; done < nr_args; done++) {
407 		u64 tag = 0;
408 
409 		if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
410 		    copy_from_user(&fd, &fds[done], sizeof(fd))) {
411 			err = -EFAULT;
412 			break;
413 		}
414 		if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
415 			err = -EINVAL;
416 			break;
417 		}
418 		if (fd == IORING_REGISTER_FILES_SKIP)
419 			continue;
420 
421 		i = array_index_nospec(up->offset + done, ctx->nr_user_files);
422 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
423 
424 		if (file_slot->file_ptr) {
425 			file = (struct file *)(file_slot->file_ptr & FFS_MASK);
426 			err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
427 			if (err)
428 				break;
429 			file_slot->file_ptr = 0;
430 			io_file_bitmap_clear(&ctx->file_table, i);
431 			needs_switch = true;
432 		}
433 		if (fd != -1) {
434 			file = fget(fd);
435 			if (!file) {
436 				err = -EBADF;
437 				break;
438 			}
439 			/*
440 			 * Don't allow io_uring instances to be registered. If
441 			 * UNIX isn't enabled, then this causes a reference
442 			 * cycle and this instance can never get freed. If UNIX
443 			 * is enabled we'll handle it just fine, but there's
444 			 * still no point in allowing a ring fd as it doesn't
445 			 * support regular read/write anyway.
446 			 */
447 			if (io_is_uring_fops(file)) {
448 				fput(file);
449 				err = -EBADF;
450 				break;
451 			}
452 			err = io_scm_file_account(ctx, file);
453 			if (err) {
454 				fput(file);
455 				break;
456 			}
457 			*io_get_tag_slot(data, i) = tag;
458 			io_fixed_file_set(file_slot, file);
459 			io_file_bitmap_set(&ctx->file_table, i);
460 		}
461 	}
462 
463 	if (needs_switch)
464 		io_rsrc_node_switch(ctx, data);
465 	return done ? done : err;
466 }
467 
468 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
469 				   struct io_uring_rsrc_update2 *up,
470 				   unsigned int nr_args)
471 {
472 	u64 __user *tags = u64_to_user_ptr(up->tags);
473 	struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
474 	struct page *last_hpage = NULL;
475 	bool needs_switch = false;
476 	__u32 done;
477 	int i, err;
478 
479 	if (!ctx->buf_data)
480 		return -ENXIO;
481 	if (up->offset + nr_args > ctx->nr_user_bufs)
482 		return -EINVAL;
483 
484 	for (done = 0; done < nr_args; done++) {
485 		struct io_mapped_ubuf *imu;
486 		int offset = up->offset + done;
487 		u64 tag = 0;
488 
489 		err = io_copy_iov(ctx, &iov, iovs, done);
490 		if (err)
491 			break;
492 		if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
493 			err = -EFAULT;
494 			break;
495 		}
496 		err = io_buffer_validate(&iov);
497 		if (err)
498 			break;
499 		if (!iov.iov_base && tag) {
500 			err = -EINVAL;
501 			break;
502 		}
503 		err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
504 		if (err)
505 			break;
506 
507 		i = array_index_nospec(offset, ctx->nr_user_bufs);
508 		if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
509 			err = io_queue_rsrc_removal(ctx->buf_data, i,
510 						    ctx->rsrc_node, ctx->user_bufs[i]);
511 			if (unlikely(err)) {
512 				io_buffer_unmap(ctx, &imu);
513 				break;
514 			}
515 			ctx->user_bufs[i] = ctx->dummy_ubuf;
516 			needs_switch = true;
517 		}
518 
519 		ctx->user_bufs[i] = imu;
520 		*io_get_tag_slot(ctx->buf_data, offset) = tag;
521 	}
522 
523 	if (needs_switch)
524 		io_rsrc_node_switch(ctx, ctx->buf_data);
525 	return done ? done : err;
526 }
527 
528 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
529 				     struct io_uring_rsrc_update2 *up,
530 				     unsigned nr_args)
531 {
532 	__u32 tmp;
533 	int err;
534 
535 	lockdep_assert_held(&ctx->uring_lock);
536 
537 	if (check_add_overflow(up->offset, nr_args, &tmp))
538 		return -EOVERFLOW;
539 	err = io_rsrc_node_switch_start(ctx);
540 	if (err)
541 		return err;
542 
543 	switch (type) {
544 	case IORING_RSRC_FILE:
545 		return __io_sqe_files_update(ctx, up, nr_args);
546 	case IORING_RSRC_BUFFER:
547 		return __io_sqe_buffers_update(ctx, up, nr_args);
548 	}
549 	return -EINVAL;
550 }
551 
552 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
553 			     unsigned nr_args)
554 {
555 	struct io_uring_rsrc_update2 up;
556 
557 	if (!nr_args)
558 		return -EINVAL;
559 	memset(&up, 0, sizeof(up));
560 	if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
561 		return -EFAULT;
562 	if (up.resv || up.resv2)
563 		return -EINVAL;
564 	return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
565 }
566 
567 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
568 			    unsigned size, unsigned type)
569 {
570 	struct io_uring_rsrc_update2 up;
571 
572 	if (size != sizeof(up))
573 		return -EINVAL;
574 	if (copy_from_user(&up, arg, sizeof(up)))
575 		return -EFAULT;
576 	if (!up.nr || up.resv || up.resv2)
577 		return -EINVAL;
578 	return __io_register_rsrc_update(ctx, type, &up, up.nr);
579 }
580 
581 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
582 			    unsigned int size, unsigned int type)
583 {
584 	struct io_uring_rsrc_register rr;
585 
586 	/* keep it extendible */
587 	if (size != sizeof(rr))
588 		return -EINVAL;
589 
590 	memset(&rr, 0, sizeof(rr));
591 	if (copy_from_user(&rr, arg, size))
592 		return -EFAULT;
593 	if (!rr.nr || rr.resv2)
594 		return -EINVAL;
595 	if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
596 		return -EINVAL;
597 
598 	switch (type) {
599 	case IORING_RSRC_FILE:
600 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
601 			break;
602 		return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
603 					     rr.nr, u64_to_user_ptr(rr.tags));
604 	case IORING_RSRC_BUFFER:
605 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
606 			break;
607 		return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
608 					       rr.nr, u64_to_user_ptr(rr.tags));
609 	}
610 	return -EINVAL;
611 }
612 
613 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
614 {
615 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
616 
617 	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
618 		return -EINVAL;
619 	if (sqe->rw_flags || sqe->splice_fd_in)
620 		return -EINVAL;
621 
622 	up->offset = READ_ONCE(sqe->off);
623 	up->nr_args = READ_ONCE(sqe->len);
624 	if (!up->nr_args)
625 		return -EINVAL;
626 	up->arg = READ_ONCE(sqe->addr);
627 	return 0;
628 }
629 
630 static int io_files_update_with_index_alloc(struct io_kiocb *req,
631 					    unsigned int issue_flags)
632 {
633 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
634 	__s32 __user *fds = u64_to_user_ptr(up->arg);
635 	unsigned int done;
636 	struct file *file;
637 	int ret, fd;
638 
639 	if (!req->ctx->file_data)
640 		return -ENXIO;
641 
642 	for (done = 0; done < up->nr_args; done++) {
643 		if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
644 			ret = -EFAULT;
645 			break;
646 		}
647 
648 		file = fget(fd);
649 		if (!file) {
650 			ret = -EBADF;
651 			break;
652 		}
653 		ret = io_fixed_fd_install(req, issue_flags, file,
654 					  IORING_FILE_INDEX_ALLOC);
655 		if (ret < 0)
656 			break;
657 		if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
658 			__io_close_fixed(req->ctx, issue_flags, ret);
659 			ret = -EFAULT;
660 			break;
661 		}
662 	}
663 
664 	if (done)
665 		return done;
666 	return ret;
667 }
668 
669 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
670 {
671 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
672 	struct io_ring_ctx *ctx = req->ctx;
673 	struct io_uring_rsrc_update2 up2;
674 	int ret;
675 
676 	up2.offset = up->offset;
677 	up2.data = up->arg;
678 	up2.nr = 0;
679 	up2.tags = 0;
680 	up2.resv = 0;
681 	up2.resv2 = 0;
682 
683 	if (up->offset == IORING_FILE_INDEX_ALLOC) {
684 		ret = io_files_update_with_index_alloc(req, issue_flags);
685 	} else {
686 		io_ring_submit_lock(ctx, issue_flags);
687 		ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
688 						&up2, up->nr_args);
689 		io_ring_submit_unlock(ctx, issue_flags);
690 	}
691 
692 	if (ret < 0)
693 		req_set_fail(req);
694 	io_req_set_res(req, ret, 0);
695 	return IOU_OK;
696 }
697 
698 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
699 			  struct io_rsrc_node *node, void *rsrc)
700 {
701 	u64 *tag_slot = io_get_tag_slot(data, idx);
702 	struct io_rsrc_put *prsrc;
703 	bool inline_item = true;
704 
705 	if (!node->inline_items) {
706 		prsrc = &node->item;
707 		node->inline_items++;
708 	} else {
709 		prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
710 		if (!prsrc)
711 			return -ENOMEM;
712 		inline_item = false;
713 	}
714 
715 	prsrc->tag = *tag_slot;
716 	*tag_slot = 0;
717 	prsrc->rsrc = rsrc;
718 	if (!inline_item)
719 		list_add(&prsrc->list, &node->item_list);
720 	return 0;
721 }
722 
723 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
724 {
725 	int i;
726 
727 	for (i = 0; i < ctx->nr_user_files; i++) {
728 		struct file *file = io_file_from_index(&ctx->file_table, i);
729 
730 		/* skip scm accounted files, they'll be freed by ->ring_sock */
731 		if (!file || io_file_need_scm(file))
732 			continue;
733 		io_file_bitmap_clear(&ctx->file_table, i);
734 		fput(file);
735 	}
736 
737 #if defined(CONFIG_UNIX)
738 	if (ctx->ring_sock) {
739 		struct sock *sock = ctx->ring_sock->sk;
740 		struct sk_buff *skb;
741 
742 		while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
743 			kfree_skb(skb);
744 	}
745 #endif
746 	io_free_file_tables(&ctx->file_table);
747 	io_file_table_set_alloc_range(ctx, 0, 0);
748 	io_rsrc_data_free(ctx->file_data);
749 	ctx->file_data = NULL;
750 	ctx->nr_user_files = 0;
751 }
752 
753 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
754 {
755 	unsigned nr = ctx->nr_user_files;
756 	int ret;
757 
758 	if (!ctx->file_data)
759 		return -ENXIO;
760 
761 	/*
762 	 * Quiesce may unlock ->uring_lock, and while it's not held
763 	 * prevent new requests using the table.
764 	 */
765 	ctx->nr_user_files = 0;
766 	ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
767 	ctx->nr_user_files = nr;
768 	if (!ret)
769 		__io_sqe_files_unregister(ctx);
770 	return ret;
771 }
772 
773 /*
774  * Ensure the UNIX gc is aware of our file set, so we are certain that
775  * the io_uring can be safely unregistered on process exit, even if we have
776  * loops in the file referencing. We account only files that can hold other
777  * files because otherwise they can't form a loop and so are not interesting
778  * for GC.
779  */
780 int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
781 {
782 #if defined(CONFIG_UNIX)
783 	struct sock *sk = ctx->ring_sock->sk;
784 	struct sk_buff_head *head = &sk->sk_receive_queue;
785 	struct scm_fp_list *fpl;
786 	struct sk_buff *skb;
787 
788 	if (likely(!io_file_need_scm(file)))
789 		return 0;
790 
791 	/*
792 	 * See if we can merge this file into an existing skb SCM_RIGHTS
793 	 * file set. If there's no room, fall back to allocating a new skb
794 	 * and filling it in.
795 	 */
796 	spin_lock_irq(&head->lock);
797 	skb = skb_peek(head);
798 	if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
799 		__skb_unlink(skb, head);
800 	else
801 		skb = NULL;
802 	spin_unlock_irq(&head->lock);
803 
804 	if (!skb) {
805 		fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
806 		if (!fpl)
807 			return -ENOMEM;
808 
809 		skb = alloc_skb(0, GFP_KERNEL);
810 		if (!skb) {
811 			kfree(fpl);
812 			return -ENOMEM;
813 		}
814 
815 		fpl->user = get_uid(current_user());
816 		fpl->max = SCM_MAX_FD;
817 		fpl->count = 0;
818 
819 		UNIXCB(skb).fp = fpl;
820 		skb->sk = sk;
821 		skb->scm_io_uring = 1;
822 		skb->destructor = unix_destruct_scm;
823 		refcount_add(skb->truesize, &sk->sk_wmem_alloc);
824 	}
825 
826 	fpl = UNIXCB(skb).fp;
827 	fpl->fp[fpl->count++] = get_file(file);
828 	unix_inflight(fpl->user, file);
829 	skb_queue_head(head, skb);
830 	fput(file);
831 #endif
832 	return 0;
833 }
834 
835 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
836 {
837 	struct file *file = prsrc->file;
838 #if defined(CONFIG_UNIX)
839 	struct sock *sock = ctx->ring_sock->sk;
840 	struct sk_buff_head list, *head = &sock->sk_receive_queue;
841 	struct sk_buff *skb;
842 	int i;
843 
844 	if (!io_file_need_scm(file)) {
845 		fput(file);
846 		return;
847 	}
848 
849 	__skb_queue_head_init(&list);
850 
851 	/*
852 	 * Find the skb that holds this file in its SCM_RIGHTS. When found,
853 	 * remove this entry and rearrange the file array.
854 	 */
855 	skb = skb_dequeue(head);
856 	while (skb) {
857 		struct scm_fp_list *fp;
858 
859 		fp = UNIXCB(skb).fp;
860 		for (i = 0; i < fp->count; i++) {
861 			int left;
862 
863 			if (fp->fp[i] != file)
864 				continue;
865 
866 			unix_notinflight(fp->user, fp->fp[i]);
867 			left = fp->count - 1 - i;
868 			if (left) {
869 				memmove(&fp->fp[i], &fp->fp[i + 1],
870 						left * sizeof(struct file *));
871 			}
872 			fp->count--;
873 			if (!fp->count) {
874 				kfree_skb(skb);
875 				skb = NULL;
876 			} else {
877 				__skb_queue_tail(&list, skb);
878 			}
879 			fput(file);
880 			file = NULL;
881 			break;
882 		}
883 
884 		if (!file)
885 			break;
886 
887 		__skb_queue_tail(&list, skb);
888 
889 		skb = skb_dequeue(head);
890 	}
891 
892 	if (skb_peek(&list)) {
893 		spin_lock_irq(&head->lock);
894 		while ((skb = __skb_dequeue(&list)) != NULL)
895 			__skb_queue_tail(head, skb);
896 		spin_unlock_irq(&head->lock);
897 	}
898 #else
899 	fput(file);
900 #endif
901 }
902 
903 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
904 			  unsigned nr_args, u64 __user *tags)
905 {
906 	__s32 __user *fds = (__s32 __user *) arg;
907 	struct file *file;
908 	int fd, ret;
909 	unsigned i;
910 
911 	if (ctx->file_data)
912 		return -EBUSY;
913 	if (!nr_args)
914 		return -EINVAL;
915 	if (nr_args > IORING_MAX_FIXED_FILES)
916 		return -EMFILE;
917 	if (nr_args > rlimit(RLIMIT_NOFILE))
918 		return -EMFILE;
919 	ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
920 				 &ctx->file_data);
921 	if (ret)
922 		return ret;
923 
924 	if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
925 		io_rsrc_data_free(ctx->file_data);
926 		ctx->file_data = NULL;
927 		return -ENOMEM;
928 	}
929 
930 	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
931 		struct io_fixed_file *file_slot;
932 
933 		if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
934 			ret = -EFAULT;
935 			goto fail;
936 		}
937 		/* allow sparse sets */
938 		if (!fds || fd == -1) {
939 			ret = -EINVAL;
940 			if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
941 				goto fail;
942 			continue;
943 		}
944 
945 		file = fget(fd);
946 		ret = -EBADF;
947 		if (unlikely(!file))
948 			goto fail;
949 
950 		/*
951 		 * Don't allow io_uring instances to be registered. If UNIX
952 		 * isn't enabled, then this causes a reference cycle and this
953 		 * instance can never get freed. If UNIX is enabled we'll
954 		 * handle it just fine, but there's still no point in allowing
955 		 * a ring fd as it doesn't support regular read/write anyway.
956 		 */
957 		if (io_is_uring_fops(file)) {
958 			fput(file);
959 			goto fail;
960 		}
961 		ret = io_scm_file_account(ctx, file);
962 		if (ret) {
963 			fput(file);
964 			goto fail;
965 		}
966 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
967 		io_fixed_file_set(file_slot, file);
968 		io_file_bitmap_set(&ctx->file_table, i);
969 	}
970 
971 	/* default it to the whole table */
972 	io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
973 	return 0;
974 fail:
975 	__io_sqe_files_unregister(ctx);
976 	return ret;
977 }
978 
979 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
980 {
981 	io_buffer_unmap(ctx, &prsrc->buf);
982 	prsrc->buf = NULL;
983 }
984 
985 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
986 {
987 	unsigned int i;
988 
989 	for (i = 0; i < ctx->nr_user_bufs; i++)
990 		io_buffer_unmap(ctx, &ctx->user_bufs[i]);
991 	kfree(ctx->user_bufs);
992 	io_rsrc_data_free(ctx->buf_data);
993 	ctx->user_bufs = NULL;
994 	ctx->buf_data = NULL;
995 	ctx->nr_user_bufs = 0;
996 }
997 
998 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
999 {
1000 	unsigned nr = ctx->nr_user_bufs;
1001 	int ret;
1002 
1003 	if (!ctx->buf_data)
1004 		return -ENXIO;
1005 
1006 	/*
1007 	 * Quiesce may unlock ->uring_lock, and while it's not held
1008 	 * prevent new requests using the table.
1009 	 */
1010 	ctx->nr_user_bufs = 0;
1011 	ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
1012 	ctx->nr_user_bufs = nr;
1013 	if (!ret)
1014 		__io_sqe_buffers_unregister(ctx);
1015 	return ret;
1016 }
1017 
1018 /*
1019  * Not super efficient, but this is just a registration time. And we do cache
1020  * the last compound head, so generally we'll only do a full search if we don't
1021  * match that one.
1022  *
1023  * We check if the given compound head page has already been accounted, to
1024  * avoid double accounting it. This allows us to account the full size of the
1025  * page, not just the constituent pages of a huge page.
1026  */
1027 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
1028 				  int nr_pages, struct page *hpage)
1029 {
1030 	int i, j;
1031 
1032 	/* check current page array */
1033 	for (i = 0; i < nr_pages; i++) {
1034 		if (!PageCompound(pages[i]))
1035 			continue;
1036 		if (compound_head(pages[i]) == hpage)
1037 			return true;
1038 	}
1039 
1040 	/* check previously registered pages */
1041 	for (i = 0; i < ctx->nr_user_bufs; i++) {
1042 		struct io_mapped_ubuf *imu = ctx->user_bufs[i];
1043 
1044 		for (j = 0; j < imu->nr_bvecs; j++) {
1045 			if (!PageCompound(imu->bvec[j].bv_page))
1046 				continue;
1047 			if (compound_head(imu->bvec[j].bv_page) == hpage)
1048 				return true;
1049 		}
1050 	}
1051 
1052 	return false;
1053 }
1054 
1055 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
1056 				 int nr_pages, struct io_mapped_ubuf *imu,
1057 				 struct page **last_hpage)
1058 {
1059 	int i, ret;
1060 
1061 	imu->acct_pages = 0;
1062 	for (i = 0; i < nr_pages; i++) {
1063 		if (!PageCompound(pages[i])) {
1064 			imu->acct_pages++;
1065 		} else {
1066 			struct page *hpage;
1067 
1068 			hpage = compound_head(pages[i]);
1069 			if (hpage == *last_hpage)
1070 				continue;
1071 			*last_hpage = hpage;
1072 			if (headpage_already_acct(ctx, pages, i, hpage))
1073 				continue;
1074 			imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
1075 		}
1076 	}
1077 
1078 	if (!imu->acct_pages)
1079 		return 0;
1080 
1081 	ret = io_account_mem(ctx, imu->acct_pages);
1082 	if (ret)
1083 		imu->acct_pages = 0;
1084 	return ret;
1085 }
1086 
1087 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
1088 {
1089 	unsigned long start, end, nr_pages;
1090 	struct vm_area_struct **vmas = NULL;
1091 	struct page **pages = NULL;
1092 	int i, pret, ret = -ENOMEM;
1093 
1094 	end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1095 	start = ubuf >> PAGE_SHIFT;
1096 	nr_pages = end - start;
1097 
1098 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
1099 	if (!pages)
1100 		goto done;
1101 
1102 	vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
1103 			      GFP_KERNEL);
1104 	if (!vmas)
1105 		goto done;
1106 
1107 	ret = 0;
1108 	mmap_read_lock(current->mm);
1109 	pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
1110 			      pages, vmas);
1111 	if (pret == nr_pages) {
1112 		struct file *file = vmas[0]->vm_file;
1113 
1114 		/* don't support file backed memory */
1115 		for (i = 0; i < nr_pages; i++) {
1116 			if (vmas[i]->vm_file != file) {
1117 				ret = -EINVAL;
1118 				break;
1119 			}
1120 			if (!file)
1121 				continue;
1122 			if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
1123 				ret = -EOPNOTSUPP;
1124 				break;
1125 			}
1126 		}
1127 		*npages = nr_pages;
1128 	} else {
1129 		ret = pret < 0 ? pret : -EFAULT;
1130 	}
1131 	mmap_read_unlock(current->mm);
1132 	if (ret) {
1133 		/*
1134 		 * if we did partial map, or found file backed vmas,
1135 		 * release any pages we did get
1136 		 */
1137 		if (pret > 0)
1138 			unpin_user_pages(pages, pret);
1139 		goto done;
1140 	}
1141 	ret = 0;
1142 done:
1143 	kvfree(vmas);
1144 	if (ret < 0) {
1145 		kvfree(pages);
1146 		pages = ERR_PTR(ret);
1147 	}
1148 	return pages;
1149 }
1150 
1151 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
1152 				  struct io_mapped_ubuf **pimu,
1153 				  struct page **last_hpage)
1154 {
1155 	struct io_mapped_ubuf *imu = NULL;
1156 	struct page **pages = NULL;
1157 	unsigned long off;
1158 	size_t size;
1159 	int ret, nr_pages, i;
1160 	struct folio *folio = NULL;
1161 
1162 	*pimu = ctx->dummy_ubuf;
1163 	if (!iov->iov_base)
1164 		return 0;
1165 
1166 	ret = -ENOMEM;
1167 	pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
1168 				&nr_pages);
1169 	if (IS_ERR(pages)) {
1170 		ret = PTR_ERR(pages);
1171 		pages = NULL;
1172 		goto done;
1173 	}
1174 
1175 	/* If it's a huge page, try to coalesce them into a single bvec entry */
1176 	if (nr_pages > 1) {
1177 		folio = page_folio(pages[0]);
1178 		for (i = 1; i < nr_pages; i++) {
1179 			if (page_folio(pages[i]) != folio) {
1180 				folio = NULL;
1181 				break;
1182 			}
1183 		}
1184 		if (folio) {
1185 			/*
1186 			 * The pages are bound to the folio, it doesn't
1187 			 * actually unpin them but drops all but one reference,
1188 			 * which is usually put down by io_buffer_unmap().
1189 			 * Note, needs a better helper.
1190 			 */
1191 			unpin_user_pages(&pages[1], nr_pages - 1);
1192 			nr_pages = 1;
1193 		}
1194 	}
1195 
1196 	imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
1197 	if (!imu)
1198 		goto done;
1199 
1200 	ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
1201 	if (ret) {
1202 		unpin_user_pages(pages, nr_pages);
1203 		goto done;
1204 	}
1205 
1206 	off = (unsigned long) iov->iov_base & ~PAGE_MASK;
1207 	size = iov->iov_len;
1208 	/* store original address for later verification */
1209 	imu->ubuf = (unsigned long) iov->iov_base;
1210 	imu->ubuf_end = imu->ubuf + iov->iov_len;
1211 	imu->nr_bvecs = nr_pages;
1212 	*pimu = imu;
1213 	ret = 0;
1214 
1215 	if (folio) {
1216 		bvec_set_page(&imu->bvec[0], pages[0], size, off);
1217 		goto done;
1218 	}
1219 	for (i = 0; i < nr_pages; i++) {
1220 		size_t vec_len;
1221 
1222 		vec_len = min_t(size_t, size, PAGE_SIZE - off);
1223 		bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
1224 		off = 0;
1225 		size -= vec_len;
1226 	}
1227 done:
1228 	if (ret)
1229 		kvfree(imu);
1230 	kvfree(pages);
1231 	return ret;
1232 }
1233 
1234 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1235 {
1236 	ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1237 	return ctx->user_bufs ? 0 : -ENOMEM;
1238 }
1239 
1240 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1241 			    unsigned int nr_args, u64 __user *tags)
1242 {
1243 	struct page *last_hpage = NULL;
1244 	struct io_rsrc_data *data;
1245 	int i, ret;
1246 	struct iovec iov;
1247 
1248 	BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1249 
1250 	if (ctx->user_bufs)
1251 		return -EBUSY;
1252 	if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1253 		return -EINVAL;
1254 	ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
1255 	if (ret)
1256 		return ret;
1257 	ret = io_buffers_map_alloc(ctx, nr_args);
1258 	if (ret) {
1259 		io_rsrc_data_free(data);
1260 		return ret;
1261 	}
1262 
1263 	for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1264 		if (arg) {
1265 			ret = io_copy_iov(ctx, &iov, arg, i);
1266 			if (ret)
1267 				break;
1268 			ret = io_buffer_validate(&iov);
1269 			if (ret)
1270 				break;
1271 		} else {
1272 			memset(&iov, 0, sizeof(iov));
1273 		}
1274 
1275 		if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1276 			ret = -EINVAL;
1277 			break;
1278 		}
1279 
1280 		ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1281 					     &last_hpage);
1282 		if (ret)
1283 			break;
1284 	}
1285 
1286 	WARN_ON_ONCE(ctx->buf_data);
1287 
1288 	ctx->buf_data = data;
1289 	if (ret)
1290 		__io_sqe_buffers_unregister(ctx);
1291 	return ret;
1292 }
1293 
1294 int io_import_fixed(int ddir, struct iov_iter *iter,
1295 			   struct io_mapped_ubuf *imu,
1296 			   u64 buf_addr, size_t len)
1297 {
1298 	u64 buf_end;
1299 	size_t offset;
1300 
1301 	if (WARN_ON_ONCE(!imu))
1302 		return -EFAULT;
1303 	if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1304 		return -EFAULT;
1305 	/* not inside the mapped region */
1306 	if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1307 		return -EFAULT;
1308 
1309 	/*
1310 	 * Might not be a start of buffer, set size appropriately
1311 	 * and advance us to the beginning.
1312 	 */
1313 	offset = buf_addr - imu->ubuf;
1314 	iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1315 
1316 	if (offset) {
1317 		/*
1318 		 * Don't use iov_iter_advance() here, as it's really slow for
1319 		 * using the latter parts of a big fixed buffer - it iterates
1320 		 * over each segment manually. We can cheat a bit here, because
1321 		 * we know that:
1322 		 *
1323 		 * 1) it's a BVEC iter, we set it up
1324 		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1325 		 *    first and last bvec
1326 		 *
1327 		 * So just find our index, and adjust the iterator afterwards.
1328 		 * If the offset is within the first bvec (or the whole first
1329 		 * bvec, just use iov_iter_advance(). This makes it easier
1330 		 * since we can just skip the first segment, which may not
1331 		 * be PAGE_SIZE aligned.
1332 		 */
1333 		const struct bio_vec *bvec = imu->bvec;
1334 
1335 		if (offset <= bvec->bv_len) {
1336 			/*
1337 			 * Note, huge pages buffers consists of one large
1338 			 * bvec entry and should always go this way. The other
1339 			 * branch doesn't expect non PAGE_SIZE'd chunks.
1340 			 */
1341 			iter->bvec = bvec;
1342 			iter->nr_segs = bvec->bv_len;
1343 			iter->count -= offset;
1344 			iter->iov_offset = offset;
1345 		} else {
1346 			unsigned long seg_skip;
1347 
1348 			/* skip first vec */
1349 			offset -= bvec->bv_len;
1350 			seg_skip = 1 + (offset >> PAGE_SHIFT);
1351 
1352 			iter->bvec = bvec + seg_skip;
1353 			iter->nr_segs -= seg_skip;
1354 			iter->count -= bvec->bv_len + offset;
1355 			iter->iov_offset = offset & ~PAGE_MASK;
1356 		}
1357 	}
1358 
1359 	return 0;
1360 }
1361