xref: /openbmc/linux/io_uring/rsrc.c (revision 953c37e0)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "io_uring.h"
16 #include "openclose.h"
17 #include "rsrc.h"
18 
19 struct io_rsrc_update {
20 	struct file			*file;
21 	u64				arg;
22 	u32				nr_args;
23 	u32				offset;
24 };
25 
26 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
27 				  struct io_mapped_ubuf **pimu,
28 				  struct page **last_hpage);
29 
30 /* only define max */
31 #define IORING_MAX_FIXED_FILES	(1U << 20)
32 #define IORING_MAX_REG_BUFFERS	(1U << 14)
33 
34 static inline bool io_put_rsrc_data_ref(struct io_rsrc_data *rsrc_data)
35 {
36 	return !--rsrc_data->refs;
37 }
38 
39 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
40 {
41 	unsigned long page_limit, cur_pages, new_pages;
42 
43 	if (!nr_pages)
44 		return 0;
45 
46 	/* Don't allow more pages than we can safely lock */
47 	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
48 
49 	cur_pages = atomic_long_read(&user->locked_vm);
50 	do {
51 		new_pages = cur_pages + nr_pages;
52 		if (new_pages > page_limit)
53 			return -ENOMEM;
54 	} while (!atomic_long_try_cmpxchg(&user->locked_vm,
55 					  &cur_pages, new_pages));
56 	return 0;
57 }
58 
59 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
60 {
61 	if (ctx->user)
62 		__io_unaccount_mem(ctx->user, nr_pages);
63 
64 	if (ctx->mm_account)
65 		atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
66 }
67 
68 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
69 {
70 	int ret;
71 
72 	if (ctx->user) {
73 		ret = __io_account_mem(ctx->user, nr_pages);
74 		if (ret)
75 			return ret;
76 	}
77 
78 	if (ctx->mm_account)
79 		atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
80 
81 	return 0;
82 }
83 
84 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
85 		       void __user *arg, unsigned index)
86 {
87 	struct iovec __user *src;
88 
89 #ifdef CONFIG_COMPAT
90 	if (ctx->compat) {
91 		struct compat_iovec __user *ciovs;
92 		struct compat_iovec ciov;
93 
94 		ciovs = (struct compat_iovec __user *) arg;
95 		if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
96 			return -EFAULT;
97 
98 		dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
99 		dst->iov_len = ciov.iov_len;
100 		return 0;
101 	}
102 #endif
103 	src = (struct iovec __user *) arg;
104 	if (copy_from_user(dst, &src[index], sizeof(*dst)))
105 		return -EFAULT;
106 	return 0;
107 }
108 
109 static int io_buffer_validate(struct iovec *iov)
110 {
111 	unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
112 
113 	/*
114 	 * Don't impose further limits on the size and buffer
115 	 * constraints here, we'll -EINVAL later when IO is
116 	 * submitted if they are wrong.
117 	 */
118 	if (!iov->iov_base)
119 		return iov->iov_len ? -EFAULT : 0;
120 	if (!iov->iov_len)
121 		return -EFAULT;
122 
123 	/* arbitrary limit, but we need something */
124 	if (iov->iov_len > SZ_1G)
125 		return -EFAULT;
126 
127 	if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
128 		return -EOVERFLOW;
129 
130 	return 0;
131 }
132 
133 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
134 {
135 	struct io_mapped_ubuf *imu = *slot;
136 	unsigned int i;
137 
138 	if (imu != ctx->dummy_ubuf) {
139 		for (i = 0; i < imu->nr_bvecs; i++)
140 			unpin_user_page(imu->bvec[i].bv_page);
141 		if (imu->acct_pages)
142 			io_unaccount_mem(ctx, imu->acct_pages);
143 		kvfree(imu);
144 	}
145 	*slot = NULL;
146 }
147 
148 static void io_rsrc_put_work_one(struct io_rsrc_data *rsrc_data,
149 				 struct io_rsrc_put *prsrc)
150 {
151 	struct io_ring_ctx *ctx = rsrc_data->ctx;
152 
153 	if (prsrc->tag)
154 		io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
155 	rsrc_data->do_put(ctx, prsrc);
156 }
157 
158 static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
159 {
160 	struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
161 	struct io_rsrc_put *prsrc, *tmp;
162 
163 	if (ref_node->inline_items)
164 		io_rsrc_put_work_one(rsrc_data, &ref_node->item);
165 
166 	list_for_each_entry_safe(prsrc, tmp, &ref_node->item_list, list) {
167 		list_del(&prsrc->list);
168 		io_rsrc_put_work_one(rsrc_data, prsrc);
169 		kfree(prsrc);
170 	}
171 
172 	io_rsrc_node_destroy(rsrc_data->ctx, ref_node);
173 	if (io_put_rsrc_data_ref(rsrc_data))
174 		complete(&rsrc_data->done);
175 }
176 
177 void io_wait_rsrc_data(struct io_rsrc_data *data)
178 {
179 	if (data && !io_put_rsrc_data_ref(data))
180 		wait_for_completion(&data->done);
181 }
182 
183 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
184 {
185 	if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache))
186 		kfree(node);
187 }
188 
189 void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
190 	__must_hold(&node->rsrc_data->ctx->uring_lock)
191 {
192 	struct io_ring_ctx *ctx = node->rsrc_data->ctx;
193 
194 	node->done = true;
195 	while (!list_empty(&ctx->rsrc_ref_list)) {
196 		node = list_first_entry(&ctx->rsrc_ref_list,
197 					    struct io_rsrc_node, node);
198 		/* recycle ref nodes in order */
199 		if (!node->done)
200 			break;
201 
202 		list_del(&node->node);
203 		__io_rsrc_put_work(node);
204 	}
205 }
206 
207 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
208 {
209 	struct io_rsrc_node *ref_node;
210 	struct io_cache_entry *entry;
211 
212 	entry = io_alloc_cache_get(&ctx->rsrc_node_cache);
213 	if (entry) {
214 		ref_node = container_of(entry, struct io_rsrc_node, cache);
215 	} else {
216 		ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
217 		if (!ref_node)
218 			return NULL;
219 	}
220 
221 	ref_node->rsrc_data = NULL;
222 	ref_node->refs = 1;
223 	INIT_LIST_HEAD(&ref_node->node);
224 	INIT_LIST_HEAD(&ref_node->item_list);
225 	ref_node->done = false;
226 	ref_node->inline_items = 0;
227 	return ref_node;
228 }
229 
230 void io_rsrc_node_switch(struct io_ring_ctx *ctx,
231 			 struct io_rsrc_data *data_to_kill)
232 	__must_hold(&ctx->uring_lock)
233 {
234 	struct io_rsrc_node *node = ctx->rsrc_node;
235 	struct io_rsrc_node *backup = io_rsrc_node_alloc(ctx);
236 
237 	if (WARN_ON_ONCE(!backup))
238 		return;
239 
240 	data_to_kill->refs++;
241 	node->rsrc_data = data_to_kill;
242 	list_add_tail(&node->node, &ctx->rsrc_ref_list);
243 	/* put master ref */
244 	io_put_rsrc_node(ctx, node);
245 	ctx->rsrc_node = backup;
246 }
247 
248 int io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
249 {
250 	if (io_alloc_cache_empty(&ctx->rsrc_node_cache)) {
251 		struct io_rsrc_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
252 
253 		if (!node)
254 			return -ENOMEM;
255 		io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache);
256 	}
257 	return 0;
258 }
259 
260 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
261 				      struct io_ring_ctx *ctx)
262 {
263 	int ret;
264 
265 	/* As we may drop ->uring_lock, other task may have started quiesce */
266 	if (data->quiesce)
267 		return -ENXIO;
268 	ret = io_rsrc_node_switch_start(ctx);
269 	if (ret)
270 		return ret;
271 	io_rsrc_node_switch(ctx, data);
272 
273 	/* kill initial ref */
274 	if (io_put_rsrc_data_ref(data))
275 		return 0;
276 
277 	data->quiesce = true;
278 	mutex_unlock(&ctx->uring_lock);
279 	do {
280 		ret = io_run_task_work_sig(ctx);
281 		if (ret < 0) {
282 			mutex_lock(&ctx->uring_lock);
283 			if (!data->refs) {
284 				ret = 0;
285 			} else {
286 				/* restore the master reference */
287 				data->refs++;
288 			}
289 			break;
290 		}
291 		ret = wait_for_completion_interruptible(&data->done);
292 		if (!ret) {
293 			mutex_lock(&ctx->uring_lock);
294 			if (!data->refs)
295 				break;
296 			/*
297 			 * it has been revived by another thread while
298 			 * we were unlocked
299 			 */
300 			mutex_unlock(&ctx->uring_lock);
301 		}
302 	} while (1);
303 	data->quiesce = false;
304 
305 	return ret;
306 }
307 
308 static void io_free_page_table(void **table, size_t size)
309 {
310 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
311 
312 	for (i = 0; i < nr_tables; i++)
313 		kfree(table[i]);
314 	kfree(table);
315 }
316 
317 static void io_rsrc_data_free(struct io_rsrc_data *data)
318 {
319 	size_t size = data->nr * sizeof(data->tags[0][0]);
320 
321 	if (data->tags)
322 		io_free_page_table((void **)data->tags, size);
323 	kfree(data);
324 }
325 
326 static __cold void **io_alloc_page_table(size_t size)
327 {
328 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
329 	size_t init_size = size;
330 	void **table;
331 
332 	table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
333 	if (!table)
334 		return NULL;
335 
336 	for (i = 0; i < nr_tables; i++) {
337 		unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
338 
339 		table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
340 		if (!table[i]) {
341 			io_free_page_table(table, init_size);
342 			return NULL;
343 		}
344 		size -= this_size;
345 	}
346 	return table;
347 }
348 
349 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
350 				     rsrc_put_fn *do_put, u64 __user *utags,
351 				     unsigned nr, struct io_rsrc_data **pdata)
352 {
353 	struct io_rsrc_data *data;
354 	int ret = 0;
355 	unsigned i;
356 
357 	data = kzalloc(sizeof(*data), GFP_KERNEL);
358 	if (!data)
359 		return -ENOMEM;
360 	data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
361 	if (!data->tags) {
362 		kfree(data);
363 		return -ENOMEM;
364 	}
365 
366 	data->nr = nr;
367 	data->ctx = ctx;
368 	data->do_put = do_put;
369 	data->refs = 1;
370 	if (utags) {
371 		ret = -EFAULT;
372 		for (i = 0; i < nr; i++) {
373 			u64 *tag_slot = io_get_tag_slot(data, i);
374 
375 			if (copy_from_user(tag_slot, &utags[i],
376 					   sizeof(*tag_slot)))
377 				goto fail;
378 		}
379 	}
380 	init_completion(&data->done);
381 	*pdata = data;
382 	return 0;
383 fail:
384 	io_rsrc_data_free(data);
385 	return ret;
386 }
387 
388 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
389 				 struct io_uring_rsrc_update2 *up,
390 				 unsigned nr_args)
391 {
392 	u64 __user *tags = u64_to_user_ptr(up->tags);
393 	__s32 __user *fds = u64_to_user_ptr(up->data);
394 	struct io_rsrc_data *data = ctx->file_data;
395 	struct io_fixed_file *file_slot;
396 	struct file *file;
397 	int fd, i, err = 0;
398 	unsigned int done;
399 	bool needs_switch = false;
400 
401 	if (!ctx->file_data)
402 		return -ENXIO;
403 	if (up->offset + nr_args > ctx->nr_user_files)
404 		return -EINVAL;
405 
406 	for (done = 0; done < nr_args; done++) {
407 		u64 tag = 0;
408 
409 		if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
410 		    copy_from_user(&fd, &fds[done], sizeof(fd))) {
411 			err = -EFAULT;
412 			break;
413 		}
414 		if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
415 			err = -EINVAL;
416 			break;
417 		}
418 		if (fd == IORING_REGISTER_FILES_SKIP)
419 			continue;
420 
421 		i = array_index_nospec(up->offset + done, ctx->nr_user_files);
422 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
423 
424 		if (file_slot->file_ptr) {
425 			file = (struct file *)(file_slot->file_ptr & FFS_MASK);
426 			err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
427 			if (err)
428 				break;
429 			file_slot->file_ptr = 0;
430 			io_file_bitmap_clear(&ctx->file_table, i);
431 			needs_switch = true;
432 		}
433 		if (fd != -1) {
434 			file = fget(fd);
435 			if (!file) {
436 				err = -EBADF;
437 				break;
438 			}
439 			/*
440 			 * Don't allow io_uring instances to be registered. If
441 			 * UNIX isn't enabled, then this causes a reference
442 			 * cycle and this instance can never get freed. If UNIX
443 			 * is enabled we'll handle it just fine, but there's
444 			 * still no point in allowing a ring fd as it doesn't
445 			 * support regular read/write anyway.
446 			 */
447 			if (io_is_uring_fops(file)) {
448 				fput(file);
449 				err = -EBADF;
450 				break;
451 			}
452 			err = io_scm_file_account(ctx, file);
453 			if (err) {
454 				fput(file);
455 				break;
456 			}
457 			*io_get_tag_slot(data, i) = tag;
458 			io_fixed_file_set(file_slot, file);
459 			io_file_bitmap_set(&ctx->file_table, i);
460 		}
461 	}
462 
463 	if (needs_switch)
464 		io_rsrc_node_switch(ctx, data);
465 	return done ? done : err;
466 }
467 
468 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
469 				   struct io_uring_rsrc_update2 *up,
470 				   unsigned int nr_args)
471 {
472 	u64 __user *tags = u64_to_user_ptr(up->tags);
473 	struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
474 	struct page *last_hpage = NULL;
475 	bool needs_switch = false;
476 	__u32 done;
477 	int i, err;
478 
479 	if (!ctx->buf_data)
480 		return -ENXIO;
481 	if (up->offset + nr_args > ctx->nr_user_bufs)
482 		return -EINVAL;
483 
484 	for (done = 0; done < nr_args; done++) {
485 		struct io_mapped_ubuf *imu;
486 		int offset = up->offset + done;
487 		u64 tag = 0;
488 
489 		err = io_copy_iov(ctx, &iov, iovs, done);
490 		if (err)
491 			break;
492 		if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
493 			err = -EFAULT;
494 			break;
495 		}
496 		err = io_buffer_validate(&iov);
497 		if (err)
498 			break;
499 		if (!iov.iov_base && tag) {
500 			err = -EINVAL;
501 			break;
502 		}
503 		err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
504 		if (err)
505 			break;
506 
507 		i = array_index_nospec(offset, ctx->nr_user_bufs);
508 		if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
509 			err = io_queue_rsrc_removal(ctx->buf_data, i,
510 						    ctx->rsrc_node, ctx->user_bufs[i]);
511 			if (unlikely(err)) {
512 				io_buffer_unmap(ctx, &imu);
513 				break;
514 			}
515 			ctx->user_bufs[i] = ctx->dummy_ubuf;
516 			needs_switch = true;
517 		}
518 
519 		ctx->user_bufs[i] = imu;
520 		*io_get_tag_slot(ctx->buf_data, i) = tag;
521 	}
522 
523 	if (needs_switch)
524 		io_rsrc_node_switch(ctx, ctx->buf_data);
525 	return done ? done : err;
526 }
527 
528 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
529 				     struct io_uring_rsrc_update2 *up,
530 				     unsigned nr_args)
531 {
532 	__u32 tmp;
533 	int err;
534 
535 	lockdep_assert_held(&ctx->uring_lock);
536 
537 	if (check_add_overflow(up->offset, nr_args, &tmp))
538 		return -EOVERFLOW;
539 	err = io_rsrc_node_switch_start(ctx);
540 	if (err)
541 		return err;
542 
543 	switch (type) {
544 	case IORING_RSRC_FILE:
545 		return __io_sqe_files_update(ctx, up, nr_args);
546 	case IORING_RSRC_BUFFER:
547 		return __io_sqe_buffers_update(ctx, up, nr_args);
548 	}
549 	return -EINVAL;
550 }
551 
552 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
553 			     unsigned nr_args)
554 {
555 	struct io_uring_rsrc_update2 up;
556 
557 	if (!nr_args)
558 		return -EINVAL;
559 	memset(&up, 0, sizeof(up));
560 	if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
561 		return -EFAULT;
562 	if (up.resv || up.resv2)
563 		return -EINVAL;
564 	return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
565 }
566 
567 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
568 			    unsigned size, unsigned type)
569 {
570 	struct io_uring_rsrc_update2 up;
571 
572 	if (size != sizeof(up))
573 		return -EINVAL;
574 	if (copy_from_user(&up, arg, sizeof(up)))
575 		return -EFAULT;
576 	if (!up.nr || up.resv || up.resv2)
577 		return -EINVAL;
578 	return __io_register_rsrc_update(ctx, type, &up, up.nr);
579 }
580 
581 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
582 			    unsigned int size, unsigned int type)
583 {
584 	struct io_uring_rsrc_register rr;
585 
586 	/* keep it extendible */
587 	if (size != sizeof(rr))
588 		return -EINVAL;
589 
590 	memset(&rr, 0, sizeof(rr));
591 	if (copy_from_user(&rr, arg, size))
592 		return -EFAULT;
593 	if (!rr.nr || rr.resv2)
594 		return -EINVAL;
595 	if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
596 		return -EINVAL;
597 
598 	switch (type) {
599 	case IORING_RSRC_FILE:
600 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
601 			break;
602 		return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
603 					     rr.nr, u64_to_user_ptr(rr.tags));
604 	case IORING_RSRC_BUFFER:
605 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
606 			break;
607 		return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
608 					       rr.nr, u64_to_user_ptr(rr.tags));
609 	}
610 	return -EINVAL;
611 }
612 
613 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
614 {
615 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
616 
617 	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
618 		return -EINVAL;
619 	if (sqe->rw_flags || sqe->splice_fd_in)
620 		return -EINVAL;
621 
622 	up->offset = READ_ONCE(sqe->off);
623 	up->nr_args = READ_ONCE(sqe->len);
624 	if (!up->nr_args)
625 		return -EINVAL;
626 	up->arg = READ_ONCE(sqe->addr);
627 	return 0;
628 }
629 
630 static int io_files_update_with_index_alloc(struct io_kiocb *req,
631 					    unsigned int issue_flags)
632 {
633 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
634 	__s32 __user *fds = u64_to_user_ptr(up->arg);
635 	unsigned int done;
636 	struct file *file;
637 	int ret, fd;
638 
639 	if (!req->ctx->file_data)
640 		return -ENXIO;
641 
642 	for (done = 0; done < up->nr_args; done++) {
643 		if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
644 			ret = -EFAULT;
645 			break;
646 		}
647 
648 		file = fget(fd);
649 		if (!file) {
650 			ret = -EBADF;
651 			break;
652 		}
653 		ret = io_fixed_fd_install(req, issue_flags, file,
654 					  IORING_FILE_INDEX_ALLOC);
655 		if (ret < 0)
656 			break;
657 		if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
658 			__io_close_fixed(req->ctx, issue_flags, ret);
659 			ret = -EFAULT;
660 			break;
661 		}
662 	}
663 
664 	if (done)
665 		return done;
666 	return ret;
667 }
668 
669 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
670 {
671 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
672 	struct io_ring_ctx *ctx = req->ctx;
673 	struct io_uring_rsrc_update2 up2;
674 	int ret;
675 
676 	up2.offset = up->offset;
677 	up2.data = up->arg;
678 	up2.nr = 0;
679 	up2.tags = 0;
680 	up2.resv = 0;
681 	up2.resv2 = 0;
682 
683 	if (up->offset == IORING_FILE_INDEX_ALLOC) {
684 		ret = io_files_update_with_index_alloc(req, issue_flags);
685 	} else {
686 		io_ring_submit_lock(ctx, issue_flags);
687 		ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
688 						&up2, up->nr_args);
689 		io_ring_submit_unlock(ctx, issue_flags);
690 	}
691 
692 	if (ret < 0)
693 		req_set_fail(req);
694 	io_req_set_res(req, ret, 0);
695 	return IOU_OK;
696 }
697 
698 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
699 			  struct io_rsrc_node *node, void *rsrc)
700 {
701 	u64 *tag_slot = io_get_tag_slot(data, idx);
702 	struct io_rsrc_put *prsrc;
703 	bool inline_item = true;
704 
705 	if (!node->inline_items) {
706 		prsrc = &node->item;
707 		node->inline_items++;
708 	} else {
709 		prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
710 		if (!prsrc)
711 			return -ENOMEM;
712 		inline_item = false;
713 	}
714 
715 	prsrc->tag = *tag_slot;
716 	*tag_slot = 0;
717 	prsrc->rsrc = rsrc;
718 	if (!inline_item)
719 		list_add(&prsrc->list, &node->item_list);
720 	return 0;
721 }
722 
723 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
724 {
725 	int i;
726 
727 	for (i = 0; i < ctx->nr_user_files; i++) {
728 		struct file *file = io_file_from_index(&ctx->file_table, i);
729 
730 		/* skip scm accounted files, they'll be freed by ->ring_sock */
731 		if (!file || io_file_need_scm(file))
732 			continue;
733 		io_file_bitmap_clear(&ctx->file_table, i);
734 		fput(file);
735 	}
736 
737 #if defined(CONFIG_UNIX)
738 	if (ctx->ring_sock) {
739 		struct sock *sock = ctx->ring_sock->sk;
740 		struct sk_buff *skb;
741 
742 		while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
743 			kfree_skb(skb);
744 	}
745 #endif
746 	io_free_file_tables(&ctx->file_table);
747 	io_file_table_set_alloc_range(ctx, 0, 0);
748 	io_rsrc_data_free(ctx->file_data);
749 	ctx->file_data = NULL;
750 	ctx->nr_user_files = 0;
751 }
752 
753 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
754 {
755 	unsigned nr = ctx->nr_user_files;
756 	int ret;
757 
758 	if (!ctx->file_data)
759 		return -ENXIO;
760 
761 	/*
762 	 * Quiesce may unlock ->uring_lock, and while it's not held
763 	 * prevent new requests using the table.
764 	 */
765 	ctx->nr_user_files = 0;
766 	ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
767 	ctx->nr_user_files = nr;
768 	if (!ret)
769 		__io_sqe_files_unregister(ctx);
770 	return ret;
771 }
772 
773 /*
774  * Ensure the UNIX gc is aware of our file set, so we are certain that
775  * the io_uring can be safely unregistered on process exit, even if we have
776  * loops in the file referencing. We account only files that can hold other
777  * files because otherwise they can't form a loop and so are not interesting
778  * for GC.
779  */
780 int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
781 {
782 #if defined(CONFIG_UNIX)
783 	struct sock *sk = ctx->ring_sock->sk;
784 	struct sk_buff_head *head = &sk->sk_receive_queue;
785 	struct scm_fp_list *fpl;
786 	struct sk_buff *skb;
787 
788 	if (likely(!io_file_need_scm(file)))
789 		return 0;
790 
791 	/*
792 	 * See if we can merge this file into an existing skb SCM_RIGHTS
793 	 * file set. If there's no room, fall back to allocating a new skb
794 	 * and filling it in.
795 	 */
796 	spin_lock_irq(&head->lock);
797 	skb = skb_peek(head);
798 	if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
799 		__skb_unlink(skb, head);
800 	else
801 		skb = NULL;
802 	spin_unlock_irq(&head->lock);
803 
804 	if (!skb) {
805 		fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
806 		if (!fpl)
807 			return -ENOMEM;
808 
809 		skb = alloc_skb(0, GFP_KERNEL);
810 		if (!skb) {
811 			kfree(fpl);
812 			return -ENOMEM;
813 		}
814 
815 		fpl->user = get_uid(current_user());
816 		fpl->max = SCM_MAX_FD;
817 		fpl->count = 0;
818 
819 		UNIXCB(skb).fp = fpl;
820 		skb->sk = sk;
821 		skb->scm_io_uring = 1;
822 		skb->destructor = unix_destruct_scm;
823 		refcount_add(skb->truesize, &sk->sk_wmem_alloc);
824 	}
825 
826 	fpl = UNIXCB(skb).fp;
827 	fpl->fp[fpl->count++] = get_file(file);
828 	unix_inflight(fpl->user, file);
829 	skb_queue_head(head, skb);
830 	fput(file);
831 #endif
832 	return 0;
833 }
834 
835 static __cold void io_rsrc_file_scm_put(struct io_ring_ctx *ctx, struct file *file)
836 {
837 #if defined(CONFIG_UNIX)
838 	struct sock *sock = ctx->ring_sock->sk;
839 	struct sk_buff_head list, *head = &sock->sk_receive_queue;
840 	struct sk_buff *skb;
841 	int i;
842 
843 	__skb_queue_head_init(&list);
844 
845 	/*
846 	 * Find the skb that holds this file in its SCM_RIGHTS. When found,
847 	 * remove this entry and rearrange the file array.
848 	 */
849 	skb = skb_dequeue(head);
850 	while (skb) {
851 		struct scm_fp_list *fp;
852 
853 		fp = UNIXCB(skb).fp;
854 		for (i = 0; i < fp->count; i++) {
855 			int left;
856 
857 			if (fp->fp[i] != file)
858 				continue;
859 
860 			unix_notinflight(fp->user, fp->fp[i]);
861 			left = fp->count - 1 - i;
862 			if (left) {
863 				memmove(&fp->fp[i], &fp->fp[i + 1],
864 						left * sizeof(struct file *));
865 			}
866 			fp->count--;
867 			if (!fp->count) {
868 				kfree_skb(skb);
869 				skb = NULL;
870 			} else {
871 				__skb_queue_tail(&list, skb);
872 			}
873 			fput(file);
874 			file = NULL;
875 			break;
876 		}
877 
878 		if (!file)
879 			break;
880 
881 		__skb_queue_tail(&list, skb);
882 
883 		skb = skb_dequeue(head);
884 	}
885 
886 	if (skb_peek(&list)) {
887 		spin_lock_irq(&head->lock);
888 		while ((skb = __skb_dequeue(&list)) != NULL)
889 			__skb_queue_tail(head, skb);
890 		spin_unlock_irq(&head->lock);
891 	}
892 #endif
893 }
894 
895 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
896 {
897 	struct file *file = prsrc->file;
898 
899 	if (likely(!io_file_need_scm(file)))
900 		fput(file);
901 	else
902 		io_rsrc_file_scm_put(ctx, file);
903 }
904 
905 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
906 			  unsigned nr_args, u64 __user *tags)
907 {
908 	__s32 __user *fds = (__s32 __user *) arg;
909 	struct file *file;
910 	int fd, ret;
911 	unsigned i;
912 
913 	if (ctx->file_data)
914 		return -EBUSY;
915 	if (!nr_args)
916 		return -EINVAL;
917 	if (nr_args > IORING_MAX_FIXED_FILES)
918 		return -EMFILE;
919 	if (nr_args > rlimit(RLIMIT_NOFILE))
920 		return -EMFILE;
921 	ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
922 				 &ctx->file_data);
923 	if (ret)
924 		return ret;
925 
926 	if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
927 		io_rsrc_data_free(ctx->file_data);
928 		ctx->file_data = NULL;
929 		return -ENOMEM;
930 	}
931 
932 	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
933 		struct io_fixed_file *file_slot;
934 
935 		if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
936 			ret = -EFAULT;
937 			goto fail;
938 		}
939 		/* allow sparse sets */
940 		if (!fds || fd == -1) {
941 			ret = -EINVAL;
942 			if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
943 				goto fail;
944 			continue;
945 		}
946 
947 		file = fget(fd);
948 		ret = -EBADF;
949 		if (unlikely(!file))
950 			goto fail;
951 
952 		/*
953 		 * Don't allow io_uring instances to be registered. If UNIX
954 		 * isn't enabled, then this causes a reference cycle and this
955 		 * instance can never get freed. If UNIX is enabled we'll
956 		 * handle it just fine, but there's still no point in allowing
957 		 * a ring fd as it doesn't support regular read/write anyway.
958 		 */
959 		if (io_is_uring_fops(file)) {
960 			fput(file);
961 			goto fail;
962 		}
963 		ret = io_scm_file_account(ctx, file);
964 		if (ret) {
965 			fput(file);
966 			goto fail;
967 		}
968 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
969 		io_fixed_file_set(file_slot, file);
970 		io_file_bitmap_set(&ctx->file_table, i);
971 	}
972 
973 	/* default it to the whole table */
974 	io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
975 	return 0;
976 fail:
977 	__io_sqe_files_unregister(ctx);
978 	return ret;
979 }
980 
981 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
982 {
983 	io_buffer_unmap(ctx, &prsrc->buf);
984 	prsrc->buf = NULL;
985 }
986 
987 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
988 {
989 	unsigned int i;
990 
991 	for (i = 0; i < ctx->nr_user_bufs; i++)
992 		io_buffer_unmap(ctx, &ctx->user_bufs[i]);
993 	kfree(ctx->user_bufs);
994 	io_rsrc_data_free(ctx->buf_data);
995 	ctx->user_bufs = NULL;
996 	ctx->buf_data = NULL;
997 	ctx->nr_user_bufs = 0;
998 }
999 
1000 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
1001 {
1002 	unsigned nr = ctx->nr_user_bufs;
1003 	int ret;
1004 
1005 	if (!ctx->buf_data)
1006 		return -ENXIO;
1007 
1008 	/*
1009 	 * Quiesce may unlock ->uring_lock, and while it's not held
1010 	 * prevent new requests using the table.
1011 	 */
1012 	ctx->nr_user_bufs = 0;
1013 	ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
1014 	ctx->nr_user_bufs = nr;
1015 	if (!ret)
1016 		__io_sqe_buffers_unregister(ctx);
1017 	return ret;
1018 }
1019 
1020 /*
1021  * Not super efficient, but this is just a registration time. And we do cache
1022  * the last compound head, so generally we'll only do a full search if we don't
1023  * match that one.
1024  *
1025  * We check if the given compound head page has already been accounted, to
1026  * avoid double accounting it. This allows us to account the full size of the
1027  * page, not just the constituent pages of a huge page.
1028  */
1029 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
1030 				  int nr_pages, struct page *hpage)
1031 {
1032 	int i, j;
1033 
1034 	/* check current page array */
1035 	for (i = 0; i < nr_pages; i++) {
1036 		if (!PageCompound(pages[i]))
1037 			continue;
1038 		if (compound_head(pages[i]) == hpage)
1039 			return true;
1040 	}
1041 
1042 	/* check previously registered pages */
1043 	for (i = 0; i < ctx->nr_user_bufs; i++) {
1044 		struct io_mapped_ubuf *imu = ctx->user_bufs[i];
1045 
1046 		for (j = 0; j < imu->nr_bvecs; j++) {
1047 			if (!PageCompound(imu->bvec[j].bv_page))
1048 				continue;
1049 			if (compound_head(imu->bvec[j].bv_page) == hpage)
1050 				return true;
1051 		}
1052 	}
1053 
1054 	return false;
1055 }
1056 
1057 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
1058 				 int nr_pages, struct io_mapped_ubuf *imu,
1059 				 struct page **last_hpage)
1060 {
1061 	int i, ret;
1062 
1063 	imu->acct_pages = 0;
1064 	for (i = 0; i < nr_pages; i++) {
1065 		if (!PageCompound(pages[i])) {
1066 			imu->acct_pages++;
1067 		} else {
1068 			struct page *hpage;
1069 
1070 			hpage = compound_head(pages[i]);
1071 			if (hpage == *last_hpage)
1072 				continue;
1073 			*last_hpage = hpage;
1074 			if (headpage_already_acct(ctx, pages, i, hpage))
1075 				continue;
1076 			imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
1077 		}
1078 	}
1079 
1080 	if (!imu->acct_pages)
1081 		return 0;
1082 
1083 	ret = io_account_mem(ctx, imu->acct_pages);
1084 	if (ret)
1085 		imu->acct_pages = 0;
1086 	return ret;
1087 }
1088 
1089 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
1090 {
1091 	unsigned long start, end, nr_pages;
1092 	struct vm_area_struct **vmas = NULL;
1093 	struct page **pages = NULL;
1094 	int i, pret, ret = -ENOMEM;
1095 
1096 	end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1097 	start = ubuf >> PAGE_SHIFT;
1098 	nr_pages = end - start;
1099 
1100 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
1101 	if (!pages)
1102 		goto done;
1103 
1104 	vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
1105 			      GFP_KERNEL);
1106 	if (!vmas)
1107 		goto done;
1108 
1109 	ret = 0;
1110 	mmap_read_lock(current->mm);
1111 	pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
1112 			      pages, vmas);
1113 	if (pret == nr_pages) {
1114 		struct file *file = vmas[0]->vm_file;
1115 
1116 		/* don't support file backed memory */
1117 		for (i = 0; i < nr_pages; i++) {
1118 			if (vmas[i]->vm_file != file) {
1119 				ret = -EINVAL;
1120 				break;
1121 			}
1122 			if (!file)
1123 				continue;
1124 			if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
1125 				ret = -EOPNOTSUPP;
1126 				break;
1127 			}
1128 		}
1129 		*npages = nr_pages;
1130 	} else {
1131 		ret = pret < 0 ? pret : -EFAULT;
1132 	}
1133 	mmap_read_unlock(current->mm);
1134 	if (ret) {
1135 		/*
1136 		 * if we did partial map, or found file backed vmas,
1137 		 * release any pages we did get
1138 		 */
1139 		if (pret > 0)
1140 			unpin_user_pages(pages, pret);
1141 		goto done;
1142 	}
1143 	ret = 0;
1144 done:
1145 	kvfree(vmas);
1146 	if (ret < 0) {
1147 		kvfree(pages);
1148 		pages = ERR_PTR(ret);
1149 	}
1150 	return pages;
1151 }
1152 
1153 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
1154 				  struct io_mapped_ubuf **pimu,
1155 				  struct page **last_hpage)
1156 {
1157 	struct io_mapped_ubuf *imu = NULL;
1158 	struct page **pages = NULL;
1159 	unsigned long off;
1160 	size_t size;
1161 	int ret, nr_pages, i;
1162 	struct folio *folio = NULL;
1163 
1164 	*pimu = ctx->dummy_ubuf;
1165 	if (!iov->iov_base)
1166 		return 0;
1167 
1168 	ret = -ENOMEM;
1169 	pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
1170 				&nr_pages);
1171 	if (IS_ERR(pages)) {
1172 		ret = PTR_ERR(pages);
1173 		pages = NULL;
1174 		goto done;
1175 	}
1176 
1177 	/* If it's a huge page, try to coalesce them into a single bvec entry */
1178 	if (nr_pages > 1) {
1179 		folio = page_folio(pages[0]);
1180 		for (i = 1; i < nr_pages; i++) {
1181 			if (page_folio(pages[i]) != folio) {
1182 				folio = NULL;
1183 				break;
1184 			}
1185 		}
1186 		if (folio) {
1187 			/*
1188 			 * The pages are bound to the folio, it doesn't
1189 			 * actually unpin them but drops all but one reference,
1190 			 * which is usually put down by io_buffer_unmap().
1191 			 * Note, needs a better helper.
1192 			 */
1193 			unpin_user_pages(&pages[1], nr_pages - 1);
1194 			nr_pages = 1;
1195 		}
1196 	}
1197 
1198 	imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
1199 	if (!imu)
1200 		goto done;
1201 
1202 	ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
1203 	if (ret) {
1204 		unpin_user_pages(pages, nr_pages);
1205 		goto done;
1206 	}
1207 
1208 	off = (unsigned long) iov->iov_base & ~PAGE_MASK;
1209 	size = iov->iov_len;
1210 	/* store original address for later verification */
1211 	imu->ubuf = (unsigned long) iov->iov_base;
1212 	imu->ubuf_end = imu->ubuf + iov->iov_len;
1213 	imu->nr_bvecs = nr_pages;
1214 	*pimu = imu;
1215 	ret = 0;
1216 
1217 	if (folio) {
1218 		bvec_set_page(&imu->bvec[0], pages[0], size, off);
1219 		goto done;
1220 	}
1221 	for (i = 0; i < nr_pages; i++) {
1222 		size_t vec_len;
1223 
1224 		vec_len = min_t(size_t, size, PAGE_SIZE - off);
1225 		bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
1226 		off = 0;
1227 		size -= vec_len;
1228 	}
1229 done:
1230 	if (ret)
1231 		kvfree(imu);
1232 	kvfree(pages);
1233 	return ret;
1234 }
1235 
1236 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1237 {
1238 	ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1239 	return ctx->user_bufs ? 0 : -ENOMEM;
1240 }
1241 
1242 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1243 			    unsigned int nr_args, u64 __user *tags)
1244 {
1245 	struct page *last_hpage = NULL;
1246 	struct io_rsrc_data *data;
1247 	int i, ret;
1248 	struct iovec iov;
1249 
1250 	BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1251 
1252 	if (ctx->user_bufs)
1253 		return -EBUSY;
1254 	if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1255 		return -EINVAL;
1256 	ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
1257 	if (ret)
1258 		return ret;
1259 	ret = io_buffers_map_alloc(ctx, nr_args);
1260 	if (ret) {
1261 		io_rsrc_data_free(data);
1262 		return ret;
1263 	}
1264 
1265 	for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1266 		if (arg) {
1267 			ret = io_copy_iov(ctx, &iov, arg, i);
1268 			if (ret)
1269 				break;
1270 			ret = io_buffer_validate(&iov);
1271 			if (ret)
1272 				break;
1273 		} else {
1274 			memset(&iov, 0, sizeof(iov));
1275 		}
1276 
1277 		if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1278 			ret = -EINVAL;
1279 			break;
1280 		}
1281 
1282 		ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1283 					     &last_hpage);
1284 		if (ret)
1285 			break;
1286 	}
1287 
1288 	WARN_ON_ONCE(ctx->buf_data);
1289 
1290 	ctx->buf_data = data;
1291 	if (ret)
1292 		__io_sqe_buffers_unregister(ctx);
1293 	return ret;
1294 }
1295 
1296 int io_import_fixed(int ddir, struct iov_iter *iter,
1297 			   struct io_mapped_ubuf *imu,
1298 			   u64 buf_addr, size_t len)
1299 {
1300 	u64 buf_end;
1301 	size_t offset;
1302 
1303 	if (WARN_ON_ONCE(!imu))
1304 		return -EFAULT;
1305 	if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1306 		return -EFAULT;
1307 	/* not inside the mapped region */
1308 	if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1309 		return -EFAULT;
1310 
1311 	/*
1312 	 * Might not be a start of buffer, set size appropriately
1313 	 * and advance us to the beginning.
1314 	 */
1315 	offset = buf_addr - imu->ubuf;
1316 	iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1317 
1318 	if (offset) {
1319 		/*
1320 		 * Don't use iov_iter_advance() here, as it's really slow for
1321 		 * using the latter parts of a big fixed buffer - it iterates
1322 		 * over each segment manually. We can cheat a bit here, because
1323 		 * we know that:
1324 		 *
1325 		 * 1) it's a BVEC iter, we set it up
1326 		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1327 		 *    first and last bvec
1328 		 *
1329 		 * So just find our index, and adjust the iterator afterwards.
1330 		 * If the offset is within the first bvec (or the whole first
1331 		 * bvec, just use iov_iter_advance(). This makes it easier
1332 		 * since we can just skip the first segment, which may not
1333 		 * be PAGE_SIZE aligned.
1334 		 */
1335 		const struct bio_vec *bvec = imu->bvec;
1336 
1337 		if (offset <= bvec->bv_len) {
1338 			/*
1339 			 * Note, huge pages buffers consists of one large
1340 			 * bvec entry and should always go this way. The other
1341 			 * branch doesn't expect non PAGE_SIZE'd chunks.
1342 			 */
1343 			iter->bvec = bvec;
1344 			iter->nr_segs = bvec->bv_len;
1345 			iter->count -= offset;
1346 			iter->iov_offset = offset;
1347 		} else {
1348 			unsigned long seg_skip;
1349 
1350 			/* skip first vec */
1351 			offset -= bvec->bv_len;
1352 			seg_skip = 1 + (offset >> PAGE_SHIFT);
1353 
1354 			iter->bvec = bvec + seg_skip;
1355 			iter->nr_segs -= seg_skip;
1356 			iter->count -= bvec->bv_len + offset;
1357 			iter->iov_offset = offset & ~PAGE_MASK;
1358 		}
1359 	}
1360 
1361 	return 0;
1362 }
1363