xref: /openbmc/linux/io_uring/rsrc.c (revision c732ea24)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "io_uring.h"
16 #include "openclose.h"
17 #include "rsrc.h"
18 
19 struct io_rsrc_update {
20 	struct file			*file;
21 	u64				arg;
22 	u32				nr_args;
23 	u32				offset;
24 };
25 
26 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
27 				  struct io_mapped_ubuf **pimu,
28 				  struct page **last_hpage);
29 
30 /* only define max */
31 #define IORING_MAX_FIXED_FILES	(1U << 20)
32 #define IORING_MAX_REG_BUFFERS	(1U << 14)
33 
34 static inline bool io_put_rsrc_data_ref(struct io_rsrc_data *rsrc_data)
35 {
36 	return !--rsrc_data->refs;
37 }
38 
39 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
40 {
41 	unsigned long page_limit, cur_pages, new_pages;
42 
43 	if (!nr_pages)
44 		return 0;
45 
46 	/* Don't allow more pages than we can safely lock */
47 	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
48 
49 	cur_pages = atomic_long_read(&user->locked_vm);
50 	do {
51 		new_pages = cur_pages + nr_pages;
52 		if (new_pages > page_limit)
53 			return -ENOMEM;
54 	} while (!atomic_long_try_cmpxchg(&user->locked_vm,
55 					  &cur_pages, new_pages));
56 	return 0;
57 }
58 
59 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
60 {
61 	if (ctx->user)
62 		__io_unaccount_mem(ctx->user, nr_pages);
63 
64 	if (ctx->mm_account)
65 		atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
66 }
67 
68 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
69 {
70 	int ret;
71 
72 	if (ctx->user) {
73 		ret = __io_account_mem(ctx->user, nr_pages);
74 		if (ret)
75 			return ret;
76 	}
77 
78 	if (ctx->mm_account)
79 		atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
80 
81 	return 0;
82 }
83 
84 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
85 		       void __user *arg, unsigned index)
86 {
87 	struct iovec __user *src;
88 
89 #ifdef CONFIG_COMPAT
90 	if (ctx->compat) {
91 		struct compat_iovec __user *ciovs;
92 		struct compat_iovec ciov;
93 
94 		ciovs = (struct compat_iovec __user *) arg;
95 		if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
96 			return -EFAULT;
97 
98 		dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
99 		dst->iov_len = ciov.iov_len;
100 		return 0;
101 	}
102 #endif
103 	src = (struct iovec __user *) arg;
104 	if (copy_from_user(dst, &src[index], sizeof(*dst)))
105 		return -EFAULT;
106 	return 0;
107 }
108 
109 static int io_buffer_validate(struct iovec *iov)
110 {
111 	unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
112 
113 	/*
114 	 * Don't impose further limits on the size and buffer
115 	 * constraints here, we'll -EINVAL later when IO is
116 	 * submitted if they are wrong.
117 	 */
118 	if (!iov->iov_base)
119 		return iov->iov_len ? -EFAULT : 0;
120 	if (!iov->iov_len)
121 		return -EFAULT;
122 
123 	/* arbitrary limit, but we need something */
124 	if (iov->iov_len > SZ_1G)
125 		return -EFAULT;
126 
127 	if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
128 		return -EOVERFLOW;
129 
130 	return 0;
131 }
132 
133 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
134 {
135 	struct io_mapped_ubuf *imu = *slot;
136 	unsigned int i;
137 
138 	if (imu != ctx->dummy_ubuf) {
139 		for (i = 0; i < imu->nr_bvecs; i++)
140 			unpin_user_page(imu->bvec[i].bv_page);
141 		if (imu->acct_pages)
142 			io_unaccount_mem(ctx, imu->acct_pages);
143 		kvfree(imu);
144 	}
145 	*slot = NULL;
146 }
147 
148 static void io_rsrc_put_work_one(struct io_rsrc_data *rsrc_data,
149 				 struct io_rsrc_put *prsrc)
150 {
151 	struct io_ring_ctx *ctx = rsrc_data->ctx;
152 
153 	if (prsrc->tag)
154 		io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
155 	rsrc_data->do_put(ctx, prsrc);
156 }
157 
158 static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
159 {
160 	struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
161 	struct io_rsrc_put *prsrc, *tmp;
162 
163 	if (ref_node->inline_items)
164 		io_rsrc_put_work_one(rsrc_data, &ref_node->item);
165 
166 	list_for_each_entry_safe(prsrc, tmp, &ref_node->item_list, list) {
167 		list_del(&prsrc->list);
168 		io_rsrc_put_work_one(rsrc_data, prsrc);
169 		kfree(prsrc);
170 	}
171 
172 	io_rsrc_node_destroy(rsrc_data->ctx, ref_node);
173 	if (io_put_rsrc_data_ref(rsrc_data))
174 		complete(&rsrc_data->done);
175 }
176 
177 void io_wait_rsrc_data(struct io_rsrc_data *data)
178 {
179 	if (data && !io_put_rsrc_data_ref(data))
180 		wait_for_completion(&data->done);
181 }
182 
183 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
184 {
185 	if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache))
186 		kfree(node);
187 }
188 
189 void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
190 	__must_hold(&node->rsrc_data->ctx->uring_lock)
191 {
192 	struct io_ring_ctx *ctx = node->rsrc_data->ctx;
193 
194 	while (!list_empty(&ctx->rsrc_ref_list)) {
195 		node = list_first_entry(&ctx->rsrc_ref_list,
196 					    struct io_rsrc_node, node);
197 		/* recycle ref nodes in order */
198 		if (node->refs)
199 			break;
200 		list_del(&node->node);
201 		__io_rsrc_put_work(node);
202 	}
203 }
204 
205 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
206 {
207 	struct io_rsrc_node *ref_node;
208 	struct io_cache_entry *entry;
209 
210 	entry = io_alloc_cache_get(&ctx->rsrc_node_cache);
211 	if (entry) {
212 		ref_node = container_of(entry, struct io_rsrc_node, cache);
213 	} else {
214 		ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
215 		if (!ref_node)
216 			return NULL;
217 	}
218 
219 	ref_node->rsrc_data = NULL;
220 	ref_node->refs = 1;
221 	INIT_LIST_HEAD(&ref_node->node);
222 	INIT_LIST_HEAD(&ref_node->item_list);
223 	ref_node->inline_items = 0;
224 	return ref_node;
225 }
226 
227 void io_rsrc_node_switch(struct io_ring_ctx *ctx,
228 			 struct io_rsrc_data *data_to_kill)
229 	__must_hold(&ctx->uring_lock)
230 {
231 	struct io_rsrc_node *node = ctx->rsrc_node;
232 	struct io_rsrc_node *backup = io_rsrc_node_alloc(ctx);
233 
234 	if (WARN_ON_ONCE(!backup))
235 		return;
236 
237 	data_to_kill->refs++;
238 	node->rsrc_data = data_to_kill;
239 	list_add_tail(&node->node, &ctx->rsrc_ref_list);
240 	/* put master ref */
241 	io_put_rsrc_node(ctx, node);
242 	ctx->rsrc_node = backup;
243 }
244 
245 int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
246 {
247 	if (io_alloc_cache_empty(&ctx->rsrc_node_cache)) {
248 		struct io_rsrc_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
249 
250 		if (!node)
251 			return -ENOMEM;
252 		io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache);
253 	}
254 	return 0;
255 }
256 
257 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
258 				      struct io_ring_ctx *ctx)
259 {
260 	int ret;
261 
262 	/* As we may drop ->uring_lock, other task may have started quiesce */
263 	if (data->quiesce)
264 		return -ENXIO;
265 	ret = io_rsrc_node_switch_start(ctx);
266 	if (ret)
267 		return ret;
268 	io_rsrc_node_switch(ctx, data);
269 
270 	/* kill initial ref */
271 	if (io_put_rsrc_data_ref(data))
272 		return 0;
273 
274 	data->quiesce = true;
275 	mutex_unlock(&ctx->uring_lock);
276 	do {
277 		ret = io_run_task_work_sig(ctx);
278 		if (ret < 0) {
279 			mutex_lock(&ctx->uring_lock);
280 			if (!data->refs) {
281 				ret = 0;
282 			} else {
283 				/* restore the master reference */
284 				data->refs++;
285 			}
286 			break;
287 		}
288 		ret = wait_for_completion_interruptible(&data->done);
289 		if (!ret) {
290 			mutex_lock(&ctx->uring_lock);
291 			if (!data->refs)
292 				break;
293 			/*
294 			 * it has been revived by another thread while
295 			 * we were unlocked
296 			 */
297 			mutex_unlock(&ctx->uring_lock);
298 		}
299 	} while (1);
300 	data->quiesce = false;
301 
302 	return ret;
303 }
304 
305 static void io_free_page_table(void **table, size_t size)
306 {
307 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
308 
309 	for (i = 0; i < nr_tables; i++)
310 		kfree(table[i]);
311 	kfree(table);
312 }
313 
314 static void io_rsrc_data_free(struct io_rsrc_data *data)
315 {
316 	size_t size = data->nr * sizeof(data->tags[0][0]);
317 
318 	if (data->tags)
319 		io_free_page_table((void **)data->tags, size);
320 	kfree(data);
321 }
322 
323 static __cold void **io_alloc_page_table(size_t size)
324 {
325 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
326 	size_t init_size = size;
327 	void **table;
328 
329 	table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
330 	if (!table)
331 		return NULL;
332 
333 	for (i = 0; i < nr_tables; i++) {
334 		unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
335 
336 		table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
337 		if (!table[i]) {
338 			io_free_page_table(table, init_size);
339 			return NULL;
340 		}
341 		size -= this_size;
342 	}
343 	return table;
344 }
345 
346 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
347 				     rsrc_put_fn *do_put, u64 __user *utags,
348 				     unsigned nr, struct io_rsrc_data **pdata)
349 {
350 	struct io_rsrc_data *data;
351 	int ret = 0;
352 	unsigned i;
353 
354 	data = kzalloc(sizeof(*data), GFP_KERNEL);
355 	if (!data)
356 		return -ENOMEM;
357 	data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
358 	if (!data->tags) {
359 		kfree(data);
360 		return -ENOMEM;
361 	}
362 
363 	data->nr = nr;
364 	data->ctx = ctx;
365 	data->do_put = do_put;
366 	data->refs = 1;
367 	if (utags) {
368 		ret = -EFAULT;
369 		for (i = 0; i < nr; i++) {
370 			u64 *tag_slot = io_get_tag_slot(data, i);
371 
372 			if (copy_from_user(tag_slot, &utags[i],
373 					   sizeof(*tag_slot)))
374 				goto fail;
375 		}
376 	}
377 	init_completion(&data->done);
378 	*pdata = data;
379 	return 0;
380 fail:
381 	io_rsrc_data_free(data);
382 	return ret;
383 }
384 
385 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
386 				 struct io_uring_rsrc_update2 *up,
387 				 unsigned nr_args)
388 {
389 	u64 __user *tags = u64_to_user_ptr(up->tags);
390 	__s32 __user *fds = u64_to_user_ptr(up->data);
391 	struct io_rsrc_data *data = ctx->file_data;
392 	struct io_fixed_file *file_slot;
393 	struct file *file;
394 	int fd, i, err = 0;
395 	unsigned int done;
396 	bool needs_switch = false;
397 
398 	if (!ctx->file_data)
399 		return -ENXIO;
400 	if (up->offset + nr_args > ctx->nr_user_files)
401 		return -EINVAL;
402 
403 	for (done = 0; done < nr_args; done++) {
404 		u64 tag = 0;
405 
406 		if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
407 		    copy_from_user(&fd, &fds[done], sizeof(fd))) {
408 			err = -EFAULT;
409 			break;
410 		}
411 		if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
412 			err = -EINVAL;
413 			break;
414 		}
415 		if (fd == IORING_REGISTER_FILES_SKIP)
416 			continue;
417 
418 		i = array_index_nospec(up->offset + done, ctx->nr_user_files);
419 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
420 
421 		if (file_slot->file_ptr) {
422 			file = (struct file *)(file_slot->file_ptr & FFS_MASK);
423 			err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
424 			if (err)
425 				break;
426 			file_slot->file_ptr = 0;
427 			io_file_bitmap_clear(&ctx->file_table, i);
428 			needs_switch = true;
429 		}
430 		if (fd != -1) {
431 			file = fget(fd);
432 			if (!file) {
433 				err = -EBADF;
434 				break;
435 			}
436 			/*
437 			 * Don't allow io_uring instances to be registered. If
438 			 * UNIX isn't enabled, then this causes a reference
439 			 * cycle and this instance can never get freed. If UNIX
440 			 * is enabled we'll handle it just fine, but there's
441 			 * still no point in allowing a ring fd as it doesn't
442 			 * support regular read/write anyway.
443 			 */
444 			if (io_is_uring_fops(file)) {
445 				fput(file);
446 				err = -EBADF;
447 				break;
448 			}
449 			err = io_scm_file_account(ctx, file);
450 			if (err) {
451 				fput(file);
452 				break;
453 			}
454 			*io_get_tag_slot(data, i) = tag;
455 			io_fixed_file_set(file_slot, file);
456 			io_file_bitmap_set(&ctx->file_table, i);
457 		}
458 	}
459 
460 	if (needs_switch)
461 		io_rsrc_node_switch(ctx, data);
462 	return done ? done : err;
463 }
464 
465 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
466 				   struct io_uring_rsrc_update2 *up,
467 				   unsigned int nr_args)
468 {
469 	u64 __user *tags = u64_to_user_ptr(up->tags);
470 	struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
471 	struct page *last_hpage = NULL;
472 	bool needs_switch = false;
473 	__u32 done;
474 	int i, err;
475 
476 	if (!ctx->buf_data)
477 		return -ENXIO;
478 	if (up->offset + nr_args > ctx->nr_user_bufs)
479 		return -EINVAL;
480 
481 	for (done = 0; done < nr_args; done++) {
482 		struct io_mapped_ubuf *imu;
483 		int offset = up->offset + done;
484 		u64 tag = 0;
485 
486 		err = io_copy_iov(ctx, &iov, iovs, done);
487 		if (err)
488 			break;
489 		if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
490 			err = -EFAULT;
491 			break;
492 		}
493 		err = io_buffer_validate(&iov);
494 		if (err)
495 			break;
496 		if (!iov.iov_base && tag) {
497 			err = -EINVAL;
498 			break;
499 		}
500 		err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
501 		if (err)
502 			break;
503 
504 		i = array_index_nospec(offset, ctx->nr_user_bufs);
505 		if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
506 			err = io_queue_rsrc_removal(ctx->buf_data, i,
507 						    ctx->rsrc_node, ctx->user_bufs[i]);
508 			if (unlikely(err)) {
509 				io_buffer_unmap(ctx, &imu);
510 				break;
511 			}
512 			ctx->user_bufs[i] = ctx->dummy_ubuf;
513 			needs_switch = true;
514 		}
515 
516 		ctx->user_bufs[i] = imu;
517 		*io_get_tag_slot(ctx->buf_data, i) = tag;
518 	}
519 
520 	if (needs_switch)
521 		io_rsrc_node_switch(ctx, ctx->buf_data);
522 	return done ? done : err;
523 }
524 
525 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
526 				     struct io_uring_rsrc_update2 *up,
527 				     unsigned nr_args)
528 {
529 	__u32 tmp;
530 	int err;
531 
532 	lockdep_assert_held(&ctx->uring_lock);
533 
534 	if (check_add_overflow(up->offset, nr_args, &tmp))
535 		return -EOVERFLOW;
536 	err = io_rsrc_node_switch_start(ctx);
537 	if (err)
538 		return err;
539 
540 	switch (type) {
541 	case IORING_RSRC_FILE:
542 		return __io_sqe_files_update(ctx, up, nr_args);
543 	case IORING_RSRC_BUFFER:
544 		return __io_sqe_buffers_update(ctx, up, nr_args);
545 	}
546 	return -EINVAL;
547 }
548 
549 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
550 			     unsigned nr_args)
551 {
552 	struct io_uring_rsrc_update2 up;
553 
554 	if (!nr_args)
555 		return -EINVAL;
556 	memset(&up, 0, sizeof(up));
557 	if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
558 		return -EFAULT;
559 	if (up.resv || up.resv2)
560 		return -EINVAL;
561 	return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
562 }
563 
564 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
565 			    unsigned size, unsigned type)
566 {
567 	struct io_uring_rsrc_update2 up;
568 
569 	if (size != sizeof(up))
570 		return -EINVAL;
571 	if (copy_from_user(&up, arg, sizeof(up)))
572 		return -EFAULT;
573 	if (!up.nr || up.resv || up.resv2)
574 		return -EINVAL;
575 	return __io_register_rsrc_update(ctx, type, &up, up.nr);
576 }
577 
578 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
579 			    unsigned int size, unsigned int type)
580 {
581 	struct io_uring_rsrc_register rr;
582 
583 	/* keep it extendible */
584 	if (size != sizeof(rr))
585 		return -EINVAL;
586 
587 	memset(&rr, 0, sizeof(rr));
588 	if (copy_from_user(&rr, arg, size))
589 		return -EFAULT;
590 	if (!rr.nr || rr.resv2)
591 		return -EINVAL;
592 	if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
593 		return -EINVAL;
594 
595 	switch (type) {
596 	case IORING_RSRC_FILE:
597 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
598 			break;
599 		return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
600 					     rr.nr, u64_to_user_ptr(rr.tags));
601 	case IORING_RSRC_BUFFER:
602 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
603 			break;
604 		return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
605 					       rr.nr, u64_to_user_ptr(rr.tags));
606 	}
607 	return -EINVAL;
608 }
609 
610 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
611 {
612 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
613 
614 	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
615 		return -EINVAL;
616 	if (sqe->rw_flags || sqe->splice_fd_in)
617 		return -EINVAL;
618 
619 	up->offset = READ_ONCE(sqe->off);
620 	up->nr_args = READ_ONCE(sqe->len);
621 	if (!up->nr_args)
622 		return -EINVAL;
623 	up->arg = READ_ONCE(sqe->addr);
624 	return 0;
625 }
626 
627 static int io_files_update_with_index_alloc(struct io_kiocb *req,
628 					    unsigned int issue_flags)
629 {
630 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
631 	__s32 __user *fds = u64_to_user_ptr(up->arg);
632 	unsigned int done;
633 	struct file *file;
634 	int ret, fd;
635 
636 	if (!req->ctx->file_data)
637 		return -ENXIO;
638 
639 	for (done = 0; done < up->nr_args; done++) {
640 		if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
641 			ret = -EFAULT;
642 			break;
643 		}
644 
645 		file = fget(fd);
646 		if (!file) {
647 			ret = -EBADF;
648 			break;
649 		}
650 		ret = io_fixed_fd_install(req, issue_flags, file,
651 					  IORING_FILE_INDEX_ALLOC);
652 		if (ret < 0)
653 			break;
654 		if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
655 			__io_close_fixed(req->ctx, issue_flags, ret);
656 			ret = -EFAULT;
657 			break;
658 		}
659 	}
660 
661 	if (done)
662 		return done;
663 	return ret;
664 }
665 
666 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
667 {
668 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
669 	struct io_ring_ctx *ctx = req->ctx;
670 	struct io_uring_rsrc_update2 up2;
671 	int ret;
672 
673 	up2.offset = up->offset;
674 	up2.data = up->arg;
675 	up2.nr = 0;
676 	up2.tags = 0;
677 	up2.resv = 0;
678 	up2.resv2 = 0;
679 
680 	if (up->offset == IORING_FILE_INDEX_ALLOC) {
681 		ret = io_files_update_with_index_alloc(req, issue_flags);
682 	} else {
683 		io_ring_submit_lock(ctx, issue_flags);
684 		ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
685 						&up2, up->nr_args);
686 		io_ring_submit_unlock(ctx, issue_flags);
687 	}
688 
689 	if (ret < 0)
690 		req_set_fail(req);
691 	io_req_set_res(req, ret, 0);
692 	return IOU_OK;
693 }
694 
695 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
696 			  struct io_rsrc_node *node, void *rsrc)
697 {
698 	u64 *tag_slot = io_get_tag_slot(data, idx);
699 	struct io_rsrc_put *prsrc;
700 	bool inline_item = true;
701 
702 	if (!node->inline_items) {
703 		prsrc = &node->item;
704 		node->inline_items++;
705 	} else {
706 		prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
707 		if (!prsrc)
708 			return -ENOMEM;
709 		inline_item = false;
710 	}
711 
712 	prsrc->tag = *tag_slot;
713 	*tag_slot = 0;
714 	prsrc->rsrc = rsrc;
715 	if (!inline_item)
716 		list_add(&prsrc->list, &node->item_list);
717 	return 0;
718 }
719 
720 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
721 {
722 	int i;
723 
724 	for (i = 0; i < ctx->nr_user_files; i++) {
725 		struct file *file = io_file_from_index(&ctx->file_table, i);
726 
727 		/* skip scm accounted files, they'll be freed by ->ring_sock */
728 		if (!file || io_file_need_scm(file))
729 			continue;
730 		io_file_bitmap_clear(&ctx->file_table, i);
731 		fput(file);
732 	}
733 
734 #if defined(CONFIG_UNIX)
735 	if (ctx->ring_sock) {
736 		struct sock *sock = ctx->ring_sock->sk;
737 		struct sk_buff *skb;
738 
739 		while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
740 			kfree_skb(skb);
741 	}
742 #endif
743 	io_free_file_tables(&ctx->file_table);
744 	io_file_table_set_alloc_range(ctx, 0, 0);
745 	io_rsrc_data_free(ctx->file_data);
746 	ctx->file_data = NULL;
747 	ctx->nr_user_files = 0;
748 }
749 
750 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
751 {
752 	unsigned nr = ctx->nr_user_files;
753 	int ret;
754 
755 	if (!ctx->file_data)
756 		return -ENXIO;
757 
758 	/*
759 	 * Quiesce may unlock ->uring_lock, and while it's not held
760 	 * prevent new requests using the table.
761 	 */
762 	ctx->nr_user_files = 0;
763 	ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
764 	ctx->nr_user_files = nr;
765 	if (!ret)
766 		__io_sqe_files_unregister(ctx);
767 	return ret;
768 }
769 
770 /*
771  * Ensure the UNIX gc is aware of our file set, so we are certain that
772  * the io_uring can be safely unregistered on process exit, even if we have
773  * loops in the file referencing. We account only files that can hold other
774  * files because otherwise they can't form a loop and so are not interesting
775  * for GC.
776  */
777 int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
778 {
779 #if defined(CONFIG_UNIX)
780 	struct sock *sk = ctx->ring_sock->sk;
781 	struct sk_buff_head *head = &sk->sk_receive_queue;
782 	struct scm_fp_list *fpl;
783 	struct sk_buff *skb;
784 
785 	if (likely(!io_file_need_scm(file)))
786 		return 0;
787 
788 	/*
789 	 * See if we can merge this file into an existing skb SCM_RIGHTS
790 	 * file set. If there's no room, fall back to allocating a new skb
791 	 * and filling it in.
792 	 */
793 	spin_lock_irq(&head->lock);
794 	skb = skb_peek(head);
795 	if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
796 		__skb_unlink(skb, head);
797 	else
798 		skb = NULL;
799 	spin_unlock_irq(&head->lock);
800 
801 	if (!skb) {
802 		fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
803 		if (!fpl)
804 			return -ENOMEM;
805 
806 		skb = alloc_skb(0, GFP_KERNEL);
807 		if (!skb) {
808 			kfree(fpl);
809 			return -ENOMEM;
810 		}
811 
812 		fpl->user = get_uid(current_user());
813 		fpl->max = SCM_MAX_FD;
814 		fpl->count = 0;
815 
816 		UNIXCB(skb).fp = fpl;
817 		skb->sk = sk;
818 		skb->scm_io_uring = 1;
819 		skb->destructor = unix_destruct_scm;
820 		refcount_add(skb->truesize, &sk->sk_wmem_alloc);
821 	}
822 
823 	fpl = UNIXCB(skb).fp;
824 	fpl->fp[fpl->count++] = get_file(file);
825 	unix_inflight(fpl->user, file);
826 	skb_queue_head(head, skb);
827 	fput(file);
828 #endif
829 	return 0;
830 }
831 
832 static __cold void io_rsrc_file_scm_put(struct io_ring_ctx *ctx, struct file *file)
833 {
834 #if defined(CONFIG_UNIX)
835 	struct sock *sock = ctx->ring_sock->sk;
836 	struct sk_buff_head list, *head = &sock->sk_receive_queue;
837 	struct sk_buff *skb;
838 	int i;
839 
840 	__skb_queue_head_init(&list);
841 
842 	/*
843 	 * Find the skb that holds this file in its SCM_RIGHTS. When found,
844 	 * remove this entry and rearrange the file array.
845 	 */
846 	skb = skb_dequeue(head);
847 	while (skb) {
848 		struct scm_fp_list *fp;
849 
850 		fp = UNIXCB(skb).fp;
851 		for (i = 0; i < fp->count; i++) {
852 			int left;
853 
854 			if (fp->fp[i] != file)
855 				continue;
856 
857 			unix_notinflight(fp->user, fp->fp[i]);
858 			left = fp->count - 1 - i;
859 			if (left) {
860 				memmove(&fp->fp[i], &fp->fp[i + 1],
861 						left * sizeof(struct file *));
862 			}
863 			fp->count--;
864 			if (!fp->count) {
865 				kfree_skb(skb);
866 				skb = NULL;
867 			} else {
868 				__skb_queue_tail(&list, skb);
869 			}
870 			fput(file);
871 			file = NULL;
872 			break;
873 		}
874 
875 		if (!file)
876 			break;
877 
878 		__skb_queue_tail(&list, skb);
879 
880 		skb = skb_dequeue(head);
881 	}
882 
883 	if (skb_peek(&list)) {
884 		spin_lock_irq(&head->lock);
885 		while ((skb = __skb_dequeue(&list)) != NULL)
886 			__skb_queue_tail(head, skb);
887 		spin_unlock_irq(&head->lock);
888 	}
889 #endif
890 }
891 
892 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
893 {
894 	struct file *file = prsrc->file;
895 
896 	if (likely(!io_file_need_scm(file)))
897 		fput(file);
898 	else
899 		io_rsrc_file_scm_put(ctx, file);
900 }
901 
902 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
903 			  unsigned nr_args, u64 __user *tags)
904 {
905 	__s32 __user *fds = (__s32 __user *) arg;
906 	struct file *file;
907 	int fd, ret;
908 	unsigned i;
909 
910 	if (ctx->file_data)
911 		return -EBUSY;
912 	if (!nr_args)
913 		return -EINVAL;
914 	if (nr_args > IORING_MAX_FIXED_FILES)
915 		return -EMFILE;
916 	if (nr_args > rlimit(RLIMIT_NOFILE))
917 		return -EMFILE;
918 	ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
919 				 &ctx->file_data);
920 	if (ret)
921 		return ret;
922 
923 	if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
924 		io_rsrc_data_free(ctx->file_data);
925 		ctx->file_data = NULL;
926 		return -ENOMEM;
927 	}
928 
929 	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
930 		struct io_fixed_file *file_slot;
931 
932 		if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
933 			ret = -EFAULT;
934 			goto fail;
935 		}
936 		/* allow sparse sets */
937 		if (!fds || fd == -1) {
938 			ret = -EINVAL;
939 			if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
940 				goto fail;
941 			continue;
942 		}
943 
944 		file = fget(fd);
945 		ret = -EBADF;
946 		if (unlikely(!file))
947 			goto fail;
948 
949 		/*
950 		 * Don't allow io_uring instances to be registered. If UNIX
951 		 * isn't enabled, then this causes a reference cycle and this
952 		 * instance can never get freed. If UNIX is enabled we'll
953 		 * handle it just fine, but there's still no point in allowing
954 		 * a ring fd as it doesn't support regular read/write anyway.
955 		 */
956 		if (io_is_uring_fops(file)) {
957 			fput(file);
958 			goto fail;
959 		}
960 		ret = io_scm_file_account(ctx, file);
961 		if (ret) {
962 			fput(file);
963 			goto fail;
964 		}
965 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
966 		io_fixed_file_set(file_slot, file);
967 		io_file_bitmap_set(&ctx->file_table, i);
968 	}
969 
970 	/* default it to the whole table */
971 	io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
972 	return 0;
973 fail:
974 	__io_sqe_files_unregister(ctx);
975 	return ret;
976 }
977 
978 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
979 {
980 	io_buffer_unmap(ctx, &prsrc->buf);
981 	prsrc->buf = NULL;
982 }
983 
984 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
985 {
986 	unsigned int i;
987 
988 	for (i = 0; i < ctx->nr_user_bufs; i++)
989 		io_buffer_unmap(ctx, &ctx->user_bufs[i]);
990 	kfree(ctx->user_bufs);
991 	io_rsrc_data_free(ctx->buf_data);
992 	ctx->user_bufs = NULL;
993 	ctx->buf_data = NULL;
994 	ctx->nr_user_bufs = 0;
995 }
996 
997 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
998 {
999 	unsigned nr = ctx->nr_user_bufs;
1000 	int ret;
1001 
1002 	if (!ctx->buf_data)
1003 		return -ENXIO;
1004 
1005 	/*
1006 	 * Quiesce may unlock ->uring_lock, and while it's not held
1007 	 * prevent new requests using the table.
1008 	 */
1009 	ctx->nr_user_bufs = 0;
1010 	ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
1011 	ctx->nr_user_bufs = nr;
1012 	if (!ret)
1013 		__io_sqe_buffers_unregister(ctx);
1014 	return ret;
1015 }
1016 
1017 /*
1018  * Not super efficient, but this is just a registration time. And we do cache
1019  * the last compound head, so generally we'll only do a full search if we don't
1020  * match that one.
1021  *
1022  * We check if the given compound head page has already been accounted, to
1023  * avoid double accounting it. This allows us to account the full size of the
1024  * page, not just the constituent pages of a huge page.
1025  */
1026 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
1027 				  int nr_pages, struct page *hpage)
1028 {
1029 	int i, j;
1030 
1031 	/* check current page array */
1032 	for (i = 0; i < nr_pages; i++) {
1033 		if (!PageCompound(pages[i]))
1034 			continue;
1035 		if (compound_head(pages[i]) == hpage)
1036 			return true;
1037 	}
1038 
1039 	/* check previously registered pages */
1040 	for (i = 0; i < ctx->nr_user_bufs; i++) {
1041 		struct io_mapped_ubuf *imu = ctx->user_bufs[i];
1042 
1043 		for (j = 0; j < imu->nr_bvecs; j++) {
1044 			if (!PageCompound(imu->bvec[j].bv_page))
1045 				continue;
1046 			if (compound_head(imu->bvec[j].bv_page) == hpage)
1047 				return true;
1048 		}
1049 	}
1050 
1051 	return false;
1052 }
1053 
1054 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
1055 				 int nr_pages, struct io_mapped_ubuf *imu,
1056 				 struct page **last_hpage)
1057 {
1058 	int i, ret;
1059 
1060 	imu->acct_pages = 0;
1061 	for (i = 0; i < nr_pages; i++) {
1062 		if (!PageCompound(pages[i])) {
1063 			imu->acct_pages++;
1064 		} else {
1065 			struct page *hpage;
1066 
1067 			hpage = compound_head(pages[i]);
1068 			if (hpage == *last_hpage)
1069 				continue;
1070 			*last_hpage = hpage;
1071 			if (headpage_already_acct(ctx, pages, i, hpage))
1072 				continue;
1073 			imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
1074 		}
1075 	}
1076 
1077 	if (!imu->acct_pages)
1078 		return 0;
1079 
1080 	ret = io_account_mem(ctx, imu->acct_pages);
1081 	if (ret)
1082 		imu->acct_pages = 0;
1083 	return ret;
1084 }
1085 
1086 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
1087 {
1088 	unsigned long start, end, nr_pages;
1089 	struct vm_area_struct **vmas = NULL;
1090 	struct page **pages = NULL;
1091 	int i, pret, ret = -ENOMEM;
1092 
1093 	end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1094 	start = ubuf >> PAGE_SHIFT;
1095 	nr_pages = end - start;
1096 
1097 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
1098 	if (!pages)
1099 		goto done;
1100 
1101 	vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
1102 			      GFP_KERNEL);
1103 	if (!vmas)
1104 		goto done;
1105 
1106 	ret = 0;
1107 	mmap_read_lock(current->mm);
1108 	pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
1109 			      pages, vmas);
1110 	if (pret == nr_pages) {
1111 		struct file *file = vmas[0]->vm_file;
1112 
1113 		/* don't support file backed memory */
1114 		for (i = 0; i < nr_pages; i++) {
1115 			if (vmas[i]->vm_file != file) {
1116 				ret = -EINVAL;
1117 				break;
1118 			}
1119 			if (!file)
1120 				continue;
1121 			if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
1122 				ret = -EOPNOTSUPP;
1123 				break;
1124 			}
1125 		}
1126 		*npages = nr_pages;
1127 	} else {
1128 		ret = pret < 0 ? pret : -EFAULT;
1129 	}
1130 	mmap_read_unlock(current->mm);
1131 	if (ret) {
1132 		/*
1133 		 * if we did partial map, or found file backed vmas,
1134 		 * release any pages we did get
1135 		 */
1136 		if (pret > 0)
1137 			unpin_user_pages(pages, pret);
1138 		goto done;
1139 	}
1140 	ret = 0;
1141 done:
1142 	kvfree(vmas);
1143 	if (ret < 0) {
1144 		kvfree(pages);
1145 		pages = ERR_PTR(ret);
1146 	}
1147 	return pages;
1148 }
1149 
1150 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
1151 				  struct io_mapped_ubuf **pimu,
1152 				  struct page **last_hpage)
1153 {
1154 	struct io_mapped_ubuf *imu = NULL;
1155 	struct page **pages = NULL;
1156 	unsigned long off;
1157 	size_t size;
1158 	int ret, nr_pages, i;
1159 	struct folio *folio = NULL;
1160 
1161 	*pimu = ctx->dummy_ubuf;
1162 	if (!iov->iov_base)
1163 		return 0;
1164 
1165 	ret = -ENOMEM;
1166 	pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
1167 				&nr_pages);
1168 	if (IS_ERR(pages)) {
1169 		ret = PTR_ERR(pages);
1170 		pages = NULL;
1171 		goto done;
1172 	}
1173 
1174 	/* If it's a huge page, try to coalesce them into a single bvec entry */
1175 	if (nr_pages > 1) {
1176 		folio = page_folio(pages[0]);
1177 		for (i = 1; i < nr_pages; i++) {
1178 			if (page_folio(pages[i]) != folio) {
1179 				folio = NULL;
1180 				break;
1181 			}
1182 		}
1183 		if (folio) {
1184 			/*
1185 			 * The pages are bound to the folio, it doesn't
1186 			 * actually unpin them but drops all but one reference,
1187 			 * which is usually put down by io_buffer_unmap().
1188 			 * Note, needs a better helper.
1189 			 */
1190 			unpin_user_pages(&pages[1], nr_pages - 1);
1191 			nr_pages = 1;
1192 		}
1193 	}
1194 
1195 	imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
1196 	if (!imu)
1197 		goto done;
1198 
1199 	ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
1200 	if (ret) {
1201 		unpin_user_pages(pages, nr_pages);
1202 		goto done;
1203 	}
1204 
1205 	off = (unsigned long) iov->iov_base & ~PAGE_MASK;
1206 	size = iov->iov_len;
1207 	/* store original address for later verification */
1208 	imu->ubuf = (unsigned long) iov->iov_base;
1209 	imu->ubuf_end = imu->ubuf + iov->iov_len;
1210 	imu->nr_bvecs = nr_pages;
1211 	*pimu = imu;
1212 	ret = 0;
1213 
1214 	if (folio) {
1215 		bvec_set_page(&imu->bvec[0], pages[0], size, off);
1216 		goto done;
1217 	}
1218 	for (i = 0; i < nr_pages; i++) {
1219 		size_t vec_len;
1220 
1221 		vec_len = min_t(size_t, size, PAGE_SIZE - off);
1222 		bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
1223 		off = 0;
1224 		size -= vec_len;
1225 	}
1226 done:
1227 	if (ret)
1228 		kvfree(imu);
1229 	kvfree(pages);
1230 	return ret;
1231 }
1232 
1233 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1234 {
1235 	ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1236 	return ctx->user_bufs ? 0 : -ENOMEM;
1237 }
1238 
1239 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1240 			    unsigned int nr_args, u64 __user *tags)
1241 {
1242 	struct page *last_hpage = NULL;
1243 	struct io_rsrc_data *data;
1244 	int i, ret;
1245 	struct iovec iov;
1246 
1247 	BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1248 
1249 	if (ctx->user_bufs)
1250 		return -EBUSY;
1251 	if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1252 		return -EINVAL;
1253 	ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
1254 	if (ret)
1255 		return ret;
1256 	ret = io_buffers_map_alloc(ctx, nr_args);
1257 	if (ret) {
1258 		io_rsrc_data_free(data);
1259 		return ret;
1260 	}
1261 
1262 	for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1263 		if (arg) {
1264 			ret = io_copy_iov(ctx, &iov, arg, i);
1265 			if (ret)
1266 				break;
1267 			ret = io_buffer_validate(&iov);
1268 			if (ret)
1269 				break;
1270 		} else {
1271 			memset(&iov, 0, sizeof(iov));
1272 		}
1273 
1274 		if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1275 			ret = -EINVAL;
1276 			break;
1277 		}
1278 
1279 		ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1280 					     &last_hpage);
1281 		if (ret)
1282 			break;
1283 	}
1284 
1285 	WARN_ON_ONCE(ctx->buf_data);
1286 
1287 	ctx->buf_data = data;
1288 	if (ret)
1289 		__io_sqe_buffers_unregister(ctx);
1290 	return ret;
1291 }
1292 
1293 int io_import_fixed(int ddir, struct iov_iter *iter,
1294 			   struct io_mapped_ubuf *imu,
1295 			   u64 buf_addr, size_t len)
1296 {
1297 	u64 buf_end;
1298 	size_t offset;
1299 
1300 	if (WARN_ON_ONCE(!imu))
1301 		return -EFAULT;
1302 	if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1303 		return -EFAULT;
1304 	/* not inside the mapped region */
1305 	if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1306 		return -EFAULT;
1307 
1308 	/*
1309 	 * Might not be a start of buffer, set size appropriately
1310 	 * and advance us to the beginning.
1311 	 */
1312 	offset = buf_addr - imu->ubuf;
1313 	iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1314 
1315 	if (offset) {
1316 		/*
1317 		 * Don't use iov_iter_advance() here, as it's really slow for
1318 		 * using the latter parts of a big fixed buffer - it iterates
1319 		 * over each segment manually. We can cheat a bit here, because
1320 		 * we know that:
1321 		 *
1322 		 * 1) it's a BVEC iter, we set it up
1323 		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1324 		 *    first and last bvec
1325 		 *
1326 		 * So just find our index, and adjust the iterator afterwards.
1327 		 * If the offset is within the first bvec (or the whole first
1328 		 * bvec, just use iov_iter_advance(). This makes it easier
1329 		 * since we can just skip the first segment, which may not
1330 		 * be PAGE_SIZE aligned.
1331 		 */
1332 		const struct bio_vec *bvec = imu->bvec;
1333 
1334 		if (offset <= bvec->bv_len) {
1335 			/*
1336 			 * Note, huge pages buffers consists of one large
1337 			 * bvec entry and should always go this way. The other
1338 			 * branch doesn't expect non PAGE_SIZE'd chunks.
1339 			 */
1340 			iter->bvec = bvec;
1341 			iter->nr_segs = bvec->bv_len;
1342 			iter->count -= offset;
1343 			iter->iov_offset = offset;
1344 		} else {
1345 			unsigned long seg_skip;
1346 
1347 			/* skip first vec */
1348 			offset -= bvec->bv_len;
1349 			seg_skip = 1 + (offset >> PAGE_SHIFT);
1350 
1351 			iter->bvec = bvec + seg_skip;
1352 			iter->nr_segs -= seg_skip;
1353 			iter->count -= bvec->bv_len + offset;
1354 			iter->iov_offset = offset & ~PAGE_MASK;
1355 		}
1356 	}
1357 
1358 	return 0;
1359 }
1360