xref: /openbmc/linux/io_uring/rsrc.c (revision 2f2af35f)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "io_uring.h"
16 #include "openclose.h"
17 #include "rsrc.h"
18 
19 struct io_rsrc_update {
20 	struct file			*file;
21 	u64				arg;
22 	u32				nr_args;
23 	u32				offset;
24 };
25 
26 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
27 				  struct io_mapped_ubuf **pimu,
28 				  struct page **last_hpage);
29 
30 /* only define max */
31 #define IORING_MAX_FIXED_FILES	(1U << 20)
32 #define IORING_MAX_REG_BUFFERS	(1U << 14)
33 
34 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
35 {
36 	unsigned long page_limit, cur_pages, new_pages;
37 
38 	if (!nr_pages)
39 		return 0;
40 
41 	/* Don't allow more pages than we can safely lock */
42 	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
43 
44 	cur_pages = atomic_long_read(&user->locked_vm);
45 	do {
46 		new_pages = cur_pages + nr_pages;
47 		if (new_pages > page_limit)
48 			return -ENOMEM;
49 	} while (!atomic_long_try_cmpxchg(&user->locked_vm,
50 					  &cur_pages, new_pages));
51 	return 0;
52 }
53 
54 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
55 {
56 	if (ctx->user)
57 		__io_unaccount_mem(ctx->user, nr_pages);
58 
59 	if (ctx->mm_account)
60 		atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
61 }
62 
63 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
64 {
65 	int ret;
66 
67 	if (ctx->user) {
68 		ret = __io_account_mem(ctx->user, nr_pages);
69 		if (ret)
70 			return ret;
71 	}
72 
73 	if (ctx->mm_account)
74 		atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
75 
76 	return 0;
77 }
78 
79 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
80 		       void __user *arg, unsigned index)
81 {
82 	struct iovec __user *src;
83 
84 #ifdef CONFIG_COMPAT
85 	if (ctx->compat) {
86 		struct compat_iovec __user *ciovs;
87 		struct compat_iovec ciov;
88 
89 		ciovs = (struct compat_iovec __user *) arg;
90 		if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
91 			return -EFAULT;
92 
93 		dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
94 		dst->iov_len = ciov.iov_len;
95 		return 0;
96 	}
97 #endif
98 	src = (struct iovec __user *) arg;
99 	if (copy_from_user(dst, &src[index], sizeof(*dst)))
100 		return -EFAULT;
101 	return 0;
102 }
103 
104 static int io_buffer_validate(struct iovec *iov)
105 {
106 	unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
107 
108 	/*
109 	 * Don't impose further limits on the size and buffer
110 	 * constraints here, we'll -EINVAL later when IO is
111 	 * submitted if they are wrong.
112 	 */
113 	if (!iov->iov_base)
114 		return iov->iov_len ? -EFAULT : 0;
115 	if (!iov->iov_len)
116 		return -EFAULT;
117 
118 	/* arbitrary limit, but we need something */
119 	if (iov->iov_len > SZ_1G)
120 		return -EFAULT;
121 
122 	if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
123 		return -EOVERFLOW;
124 
125 	return 0;
126 }
127 
128 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
129 {
130 	struct io_mapped_ubuf *imu = *slot;
131 	unsigned int i;
132 
133 	if (imu != ctx->dummy_ubuf) {
134 		for (i = 0; i < imu->nr_bvecs; i++)
135 			unpin_user_page(imu->bvec[i].bv_page);
136 		if (imu->acct_pages)
137 			io_unaccount_mem(ctx, imu->acct_pages);
138 		kvfree(imu);
139 	}
140 	*slot = NULL;
141 }
142 
143 static void io_rsrc_put_work_one(struct io_rsrc_data *rsrc_data,
144 				 struct io_rsrc_put *prsrc)
145 {
146 	struct io_ring_ctx *ctx = rsrc_data->ctx;
147 
148 	if (prsrc->tag)
149 		io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
150 	rsrc_data->do_put(ctx, prsrc);
151 }
152 
153 static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
154 {
155 	struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
156 	struct io_rsrc_put *prsrc, *tmp;
157 
158 	if (ref_node->inline_items)
159 		io_rsrc_put_work_one(rsrc_data, &ref_node->item);
160 
161 	list_for_each_entry_safe(prsrc, tmp, &ref_node->item_list, list) {
162 		list_del(&prsrc->list);
163 		io_rsrc_put_work_one(rsrc_data, prsrc);
164 		kfree(prsrc);
165 	}
166 
167 	io_rsrc_node_destroy(rsrc_data->ctx, ref_node);
168 }
169 
170 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
171 {
172 	if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache))
173 		kfree(node);
174 }
175 
176 void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
177 	__must_hold(&node->rsrc_data->ctx->uring_lock)
178 {
179 	struct io_ring_ctx *ctx = node->rsrc_data->ctx;
180 
181 	while (!list_empty(&ctx->rsrc_ref_list)) {
182 		node = list_first_entry(&ctx->rsrc_ref_list,
183 					    struct io_rsrc_node, node);
184 		/* recycle ref nodes in order */
185 		if (node->refs)
186 			break;
187 		list_del(&node->node);
188 		__io_rsrc_put_work(node);
189 	}
190 	if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
191 		wake_up_all(&ctx->rsrc_quiesce_wq);
192 }
193 
194 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
195 {
196 	struct io_rsrc_node *ref_node;
197 	struct io_cache_entry *entry;
198 
199 	entry = io_alloc_cache_get(&ctx->rsrc_node_cache);
200 	if (entry) {
201 		ref_node = container_of(entry, struct io_rsrc_node, cache);
202 	} else {
203 		ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
204 		if (!ref_node)
205 			return NULL;
206 	}
207 
208 	ref_node->rsrc_data = NULL;
209 	ref_node->refs = 1;
210 	INIT_LIST_HEAD(&ref_node->node);
211 	INIT_LIST_HEAD(&ref_node->item_list);
212 	ref_node->inline_items = 0;
213 	return ref_node;
214 }
215 
216 void io_rsrc_node_switch(struct io_ring_ctx *ctx,
217 			 struct io_rsrc_data *data_to_kill)
218 	__must_hold(&ctx->uring_lock)
219 {
220 	struct io_rsrc_node *node = ctx->rsrc_node;
221 	struct io_rsrc_node *backup = io_rsrc_node_alloc(ctx);
222 
223 	if (WARN_ON_ONCE(!backup))
224 		return;
225 
226 	node->rsrc_data = data_to_kill;
227 	list_add_tail(&node->node, &ctx->rsrc_ref_list);
228 	/* put master ref */
229 	io_put_rsrc_node(ctx, node);
230 	ctx->rsrc_node = backup;
231 }
232 
233 int __io_rsrc_node_switch_start(struct io_ring_ctx *ctx)
234 {
235 	struct io_rsrc_node *node = kzalloc(sizeof(*node), GFP_KERNEL);
236 
237 	if (!node)
238 		return -ENOMEM;
239 	io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache);
240 	return 0;
241 }
242 
243 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
244 				      struct io_ring_ctx *ctx)
245 {
246 	DEFINE_WAIT(we);
247 	int ret;
248 
249 	/* As we may drop ->uring_lock, other task may have started quiesce */
250 	if (data->quiesce)
251 		return -ENXIO;
252 	ret = io_rsrc_node_switch_start(ctx);
253 	if (ret)
254 		return ret;
255 	io_rsrc_node_switch(ctx, data);
256 
257 	if (list_empty(&ctx->rsrc_ref_list))
258 		return 0;
259 
260 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
261 		atomic_set(&ctx->cq_wait_nr, 1);
262 		smp_mb();
263 	}
264 
265 	ctx->rsrc_quiesce++;
266 	data->quiesce = true;
267 	do {
268 		prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
269 		mutex_unlock(&ctx->uring_lock);
270 
271 		ret = io_run_task_work_sig(ctx);
272 		if (ret < 0) {
273 			mutex_lock(&ctx->uring_lock);
274 			if (list_empty(&ctx->rsrc_ref_list))
275 				ret = 0;
276 			break;
277 		}
278 
279 		schedule();
280 		__set_current_state(TASK_RUNNING);
281 		mutex_lock(&ctx->uring_lock);
282 		ret = 0;
283 	} while (!list_empty(&ctx->rsrc_ref_list));
284 
285 	finish_wait(&ctx->rsrc_quiesce_wq, &we);
286 	data->quiesce = false;
287 	ctx->rsrc_quiesce--;
288 
289 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
290 		atomic_set(&ctx->cq_wait_nr, 0);
291 		smp_mb();
292 	}
293 	return ret;
294 }
295 
296 static void io_free_page_table(void **table, size_t size)
297 {
298 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
299 
300 	for (i = 0; i < nr_tables; i++)
301 		kfree(table[i]);
302 	kfree(table);
303 }
304 
305 static void io_rsrc_data_free(struct io_rsrc_data *data)
306 {
307 	size_t size = data->nr * sizeof(data->tags[0][0]);
308 
309 	if (data->tags)
310 		io_free_page_table((void **)data->tags, size);
311 	kfree(data);
312 }
313 
314 static __cold void **io_alloc_page_table(size_t size)
315 {
316 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
317 	size_t init_size = size;
318 	void **table;
319 
320 	table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
321 	if (!table)
322 		return NULL;
323 
324 	for (i = 0; i < nr_tables; i++) {
325 		unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
326 
327 		table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
328 		if (!table[i]) {
329 			io_free_page_table(table, init_size);
330 			return NULL;
331 		}
332 		size -= this_size;
333 	}
334 	return table;
335 }
336 
337 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
338 				     rsrc_put_fn *do_put, u64 __user *utags,
339 				     unsigned nr, struct io_rsrc_data **pdata)
340 {
341 	struct io_rsrc_data *data;
342 	int ret = 0;
343 	unsigned i;
344 
345 	data = kzalloc(sizeof(*data), GFP_KERNEL);
346 	if (!data)
347 		return -ENOMEM;
348 	data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
349 	if (!data->tags) {
350 		kfree(data);
351 		return -ENOMEM;
352 	}
353 
354 	data->nr = nr;
355 	data->ctx = ctx;
356 	data->do_put = do_put;
357 	if (utags) {
358 		ret = -EFAULT;
359 		for (i = 0; i < nr; i++) {
360 			u64 *tag_slot = io_get_tag_slot(data, i);
361 
362 			if (copy_from_user(tag_slot, &utags[i],
363 					   sizeof(*tag_slot)))
364 				goto fail;
365 		}
366 	}
367 	*pdata = data;
368 	return 0;
369 fail:
370 	io_rsrc_data_free(data);
371 	return ret;
372 }
373 
374 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
375 				 struct io_uring_rsrc_update2 *up,
376 				 unsigned nr_args)
377 {
378 	u64 __user *tags = u64_to_user_ptr(up->tags);
379 	__s32 __user *fds = u64_to_user_ptr(up->data);
380 	struct io_rsrc_data *data = ctx->file_data;
381 	struct io_fixed_file *file_slot;
382 	struct file *file;
383 	int fd, i, err = 0;
384 	unsigned int done;
385 	bool needs_switch = false;
386 
387 	if (!ctx->file_data)
388 		return -ENXIO;
389 	if (up->offset + nr_args > ctx->nr_user_files)
390 		return -EINVAL;
391 
392 	for (done = 0; done < nr_args; done++) {
393 		u64 tag = 0;
394 
395 		if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
396 		    copy_from_user(&fd, &fds[done], sizeof(fd))) {
397 			err = -EFAULT;
398 			break;
399 		}
400 		if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
401 			err = -EINVAL;
402 			break;
403 		}
404 		if (fd == IORING_REGISTER_FILES_SKIP)
405 			continue;
406 
407 		i = array_index_nospec(up->offset + done, ctx->nr_user_files);
408 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
409 
410 		if (file_slot->file_ptr) {
411 			file = (struct file *)(file_slot->file_ptr & FFS_MASK);
412 			err = io_queue_rsrc_removal(data, i, ctx->rsrc_node, file);
413 			if (err)
414 				break;
415 			file_slot->file_ptr = 0;
416 			io_file_bitmap_clear(&ctx->file_table, i);
417 			needs_switch = true;
418 		}
419 		if (fd != -1) {
420 			file = fget(fd);
421 			if (!file) {
422 				err = -EBADF;
423 				break;
424 			}
425 			/*
426 			 * Don't allow io_uring instances to be registered. If
427 			 * UNIX isn't enabled, then this causes a reference
428 			 * cycle and this instance can never get freed. If UNIX
429 			 * is enabled we'll handle it just fine, but there's
430 			 * still no point in allowing a ring fd as it doesn't
431 			 * support regular read/write anyway.
432 			 */
433 			if (io_is_uring_fops(file)) {
434 				fput(file);
435 				err = -EBADF;
436 				break;
437 			}
438 			err = io_scm_file_account(ctx, file);
439 			if (err) {
440 				fput(file);
441 				break;
442 			}
443 			*io_get_tag_slot(data, i) = tag;
444 			io_fixed_file_set(file_slot, file);
445 			io_file_bitmap_set(&ctx->file_table, i);
446 		}
447 	}
448 
449 	if (needs_switch)
450 		io_rsrc_node_switch(ctx, data);
451 	return done ? done : err;
452 }
453 
454 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
455 				   struct io_uring_rsrc_update2 *up,
456 				   unsigned int nr_args)
457 {
458 	u64 __user *tags = u64_to_user_ptr(up->tags);
459 	struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
460 	struct page *last_hpage = NULL;
461 	bool needs_switch = false;
462 	__u32 done;
463 	int i, err;
464 
465 	if (!ctx->buf_data)
466 		return -ENXIO;
467 	if (up->offset + nr_args > ctx->nr_user_bufs)
468 		return -EINVAL;
469 
470 	for (done = 0; done < nr_args; done++) {
471 		struct io_mapped_ubuf *imu;
472 		int offset = up->offset + done;
473 		u64 tag = 0;
474 
475 		err = io_copy_iov(ctx, &iov, iovs, done);
476 		if (err)
477 			break;
478 		if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
479 			err = -EFAULT;
480 			break;
481 		}
482 		err = io_buffer_validate(&iov);
483 		if (err)
484 			break;
485 		if (!iov.iov_base && tag) {
486 			err = -EINVAL;
487 			break;
488 		}
489 		err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
490 		if (err)
491 			break;
492 
493 		i = array_index_nospec(offset, ctx->nr_user_bufs);
494 		if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
495 			err = io_queue_rsrc_removal(ctx->buf_data, i,
496 						    ctx->rsrc_node, ctx->user_bufs[i]);
497 			if (unlikely(err)) {
498 				io_buffer_unmap(ctx, &imu);
499 				break;
500 			}
501 			ctx->user_bufs[i] = ctx->dummy_ubuf;
502 			needs_switch = true;
503 		}
504 
505 		ctx->user_bufs[i] = imu;
506 		*io_get_tag_slot(ctx->buf_data, i) = tag;
507 	}
508 
509 	if (needs_switch)
510 		io_rsrc_node_switch(ctx, ctx->buf_data);
511 	return done ? done : err;
512 }
513 
514 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
515 				     struct io_uring_rsrc_update2 *up,
516 				     unsigned nr_args)
517 {
518 	__u32 tmp;
519 	int err;
520 
521 	lockdep_assert_held(&ctx->uring_lock);
522 
523 	if (check_add_overflow(up->offset, nr_args, &tmp))
524 		return -EOVERFLOW;
525 	err = io_rsrc_node_switch_start(ctx);
526 	if (err)
527 		return err;
528 
529 	switch (type) {
530 	case IORING_RSRC_FILE:
531 		return __io_sqe_files_update(ctx, up, nr_args);
532 	case IORING_RSRC_BUFFER:
533 		return __io_sqe_buffers_update(ctx, up, nr_args);
534 	}
535 	return -EINVAL;
536 }
537 
538 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
539 			     unsigned nr_args)
540 {
541 	struct io_uring_rsrc_update2 up;
542 
543 	if (!nr_args)
544 		return -EINVAL;
545 	memset(&up, 0, sizeof(up));
546 	if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
547 		return -EFAULT;
548 	if (up.resv || up.resv2)
549 		return -EINVAL;
550 	return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
551 }
552 
553 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
554 			    unsigned size, unsigned type)
555 {
556 	struct io_uring_rsrc_update2 up;
557 
558 	if (size != sizeof(up))
559 		return -EINVAL;
560 	if (copy_from_user(&up, arg, sizeof(up)))
561 		return -EFAULT;
562 	if (!up.nr || up.resv || up.resv2)
563 		return -EINVAL;
564 	return __io_register_rsrc_update(ctx, type, &up, up.nr);
565 }
566 
567 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
568 			    unsigned int size, unsigned int type)
569 {
570 	struct io_uring_rsrc_register rr;
571 
572 	/* keep it extendible */
573 	if (size != sizeof(rr))
574 		return -EINVAL;
575 
576 	memset(&rr, 0, sizeof(rr));
577 	if (copy_from_user(&rr, arg, size))
578 		return -EFAULT;
579 	if (!rr.nr || rr.resv2)
580 		return -EINVAL;
581 	if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
582 		return -EINVAL;
583 
584 	switch (type) {
585 	case IORING_RSRC_FILE:
586 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
587 			break;
588 		return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
589 					     rr.nr, u64_to_user_ptr(rr.tags));
590 	case IORING_RSRC_BUFFER:
591 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
592 			break;
593 		return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
594 					       rr.nr, u64_to_user_ptr(rr.tags));
595 	}
596 	return -EINVAL;
597 }
598 
599 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
600 {
601 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
602 
603 	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
604 		return -EINVAL;
605 	if (sqe->rw_flags || sqe->splice_fd_in)
606 		return -EINVAL;
607 
608 	up->offset = READ_ONCE(sqe->off);
609 	up->nr_args = READ_ONCE(sqe->len);
610 	if (!up->nr_args)
611 		return -EINVAL;
612 	up->arg = READ_ONCE(sqe->addr);
613 	return 0;
614 }
615 
616 static int io_files_update_with_index_alloc(struct io_kiocb *req,
617 					    unsigned int issue_flags)
618 {
619 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
620 	__s32 __user *fds = u64_to_user_ptr(up->arg);
621 	unsigned int done;
622 	struct file *file;
623 	int ret, fd;
624 
625 	if (!req->ctx->file_data)
626 		return -ENXIO;
627 
628 	for (done = 0; done < up->nr_args; done++) {
629 		if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
630 			ret = -EFAULT;
631 			break;
632 		}
633 
634 		file = fget(fd);
635 		if (!file) {
636 			ret = -EBADF;
637 			break;
638 		}
639 		ret = io_fixed_fd_install(req, issue_flags, file,
640 					  IORING_FILE_INDEX_ALLOC);
641 		if (ret < 0)
642 			break;
643 		if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
644 			__io_close_fixed(req->ctx, issue_flags, ret);
645 			ret = -EFAULT;
646 			break;
647 		}
648 	}
649 
650 	if (done)
651 		return done;
652 	return ret;
653 }
654 
655 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
656 {
657 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
658 	struct io_ring_ctx *ctx = req->ctx;
659 	struct io_uring_rsrc_update2 up2;
660 	int ret;
661 
662 	up2.offset = up->offset;
663 	up2.data = up->arg;
664 	up2.nr = 0;
665 	up2.tags = 0;
666 	up2.resv = 0;
667 	up2.resv2 = 0;
668 
669 	if (up->offset == IORING_FILE_INDEX_ALLOC) {
670 		ret = io_files_update_with_index_alloc(req, issue_flags);
671 	} else {
672 		io_ring_submit_lock(ctx, issue_flags);
673 		ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
674 						&up2, up->nr_args);
675 		io_ring_submit_unlock(ctx, issue_flags);
676 	}
677 
678 	if (ret < 0)
679 		req_set_fail(req);
680 	io_req_set_res(req, ret, 0);
681 	return IOU_OK;
682 }
683 
684 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx,
685 			  struct io_rsrc_node *node, void *rsrc)
686 {
687 	u64 *tag_slot = io_get_tag_slot(data, idx);
688 	struct io_rsrc_put *prsrc;
689 	bool inline_item = true;
690 
691 	if (!node->inline_items) {
692 		prsrc = &node->item;
693 		node->inline_items++;
694 	} else {
695 		prsrc = kzalloc(sizeof(*prsrc), GFP_KERNEL);
696 		if (!prsrc)
697 			return -ENOMEM;
698 		inline_item = false;
699 	}
700 
701 	prsrc->tag = *tag_slot;
702 	*tag_slot = 0;
703 	prsrc->rsrc = rsrc;
704 	if (!inline_item)
705 		list_add(&prsrc->list, &node->item_list);
706 	return 0;
707 }
708 
709 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
710 {
711 	int i;
712 
713 	for (i = 0; i < ctx->nr_user_files; i++) {
714 		struct file *file = io_file_from_index(&ctx->file_table, i);
715 
716 		/* skip scm accounted files, they'll be freed by ->ring_sock */
717 		if (!file || io_file_need_scm(file))
718 			continue;
719 		io_file_bitmap_clear(&ctx->file_table, i);
720 		fput(file);
721 	}
722 
723 #if defined(CONFIG_UNIX)
724 	if (ctx->ring_sock) {
725 		struct sock *sock = ctx->ring_sock->sk;
726 		struct sk_buff *skb;
727 
728 		while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
729 			kfree_skb(skb);
730 	}
731 #endif
732 	io_free_file_tables(&ctx->file_table);
733 	io_file_table_set_alloc_range(ctx, 0, 0);
734 	io_rsrc_data_free(ctx->file_data);
735 	ctx->file_data = NULL;
736 	ctx->nr_user_files = 0;
737 }
738 
739 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
740 {
741 	unsigned nr = ctx->nr_user_files;
742 	int ret;
743 
744 	if (!ctx->file_data)
745 		return -ENXIO;
746 
747 	/*
748 	 * Quiesce may unlock ->uring_lock, and while it's not held
749 	 * prevent new requests using the table.
750 	 */
751 	ctx->nr_user_files = 0;
752 	ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
753 	ctx->nr_user_files = nr;
754 	if (!ret)
755 		__io_sqe_files_unregister(ctx);
756 	return ret;
757 }
758 
759 /*
760  * Ensure the UNIX gc is aware of our file set, so we are certain that
761  * the io_uring can be safely unregistered on process exit, even if we have
762  * loops in the file referencing. We account only files that can hold other
763  * files because otherwise they can't form a loop and so are not interesting
764  * for GC.
765  */
766 int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
767 {
768 #if defined(CONFIG_UNIX)
769 	struct sock *sk = ctx->ring_sock->sk;
770 	struct sk_buff_head *head = &sk->sk_receive_queue;
771 	struct scm_fp_list *fpl;
772 	struct sk_buff *skb;
773 
774 	if (likely(!io_file_need_scm(file)))
775 		return 0;
776 
777 	/*
778 	 * See if we can merge this file into an existing skb SCM_RIGHTS
779 	 * file set. If there's no room, fall back to allocating a new skb
780 	 * and filling it in.
781 	 */
782 	spin_lock_irq(&head->lock);
783 	skb = skb_peek(head);
784 	if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
785 		__skb_unlink(skb, head);
786 	else
787 		skb = NULL;
788 	spin_unlock_irq(&head->lock);
789 
790 	if (!skb) {
791 		fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
792 		if (!fpl)
793 			return -ENOMEM;
794 
795 		skb = alloc_skb(0, GFP_KERNEL);
796 		if (!skb) {
797 			kfree(fpl);
798 			return -ENOMEM;
799 		}
800 
801 		fpl->user = get_uid(current_user());
802 		fpl->max = SCM_MAX_FD;
803 		fpl->count = 0;
804 
805 		UNIXCB(skb).fp = fpl;
806 		skb->sk = sk;
807 		skb->scm_io_uring = 1;
808 		skb->destructor = unix_destruct_scm;
809 		refcount_add(skb->truesize, &sk->sk_wmem_alloc);
810 	}
811 
812 	fpl = UNIXCB(skb).fp;
813 	fpl->fp[fpl->count++] = get_file(file);
814 	unix_inflight(fpl->user, file);
815 	skb_queue_head(head, skb);
816 	fput(file);
817 #endif
818 	return 0;
819 }
820 
821 static __cold void io_rsrc_file_scm_put(struct io_ring_ctx *ctx, struct file *file)
822 {
823 #if defined(CONFIG_UNIX)
824 	struct sock *sock = ctx->ring_sock->sk;
825 	struct sk_buff_head list, *head = &sock->sk_receive_queue;
826 	struct sk_buff *skb;
827 	int i;
828 
829 	__skb_queue_head_init(&list);
830 
831 	/*
832 	 * Find the skb that holds this file in its SCM_RIGHTS. When found,
833 	 * remove this entry and rearrange the file array.
834 	 */
835 	skb = skb_dequeue(head);
836 	while (skb) {
837 		struct scm_fp_list *fp;
838 
839 		fp = UNIXCB(skb).fp;
840 		for (i = 0; i < fp->count; i++) {
841 			int left;
842 
843 			if (fp->fp[i] != file)
844 				continue;
845 
846 			unix_notinflight(fp->user, fp->fp[i]);
847 			left = fp->count - 1 - i;
848 			if (left) {
849 				memmove(&fp->fp[i], &fp->fp[i + 1],
850 						left * sizeof(struct file *));
851 			}
852 			fp->count--;
853 			if (!fp->count) {
854 				kfree_skb(skb);
855 				skb = NULL;
856 			} else {
857 				__skb_queue_tail(&list, skb);
858 			}
859 			fput(file);
860 			file = NULL;
861 			break;
862 		}
863 
864 		if (!file)
865 			break;
866 
867 		__skb_queue_tail(&list, skb);
868 
869 		skb = skb_dequeue(head);
870 	}
871 
872 	if (skb_peek(&list)) {
873 		spin_lock_irq(&head->lock);
874 		while ((skb = __skb_dequeue(&list)) != NULL)
875 			__skb_queue_tail(head, skb);
876 		spin_unlock_irq(&head->lock);
877 	}
878 #endif
879 }
880 
881 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
882 {
883 	struct file *file = prsrc->file;
884 
885 	if (likely(!io_file_need_scm(file)))
886 		fput(file);
887 	else
888 		io_rsrc_file_scm_put(ctx, file);
889 }
890 
891 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
892 			  unsigned nr_args, u64 __user *tags)
893 {
894 	__s32 __user *fds = (__s32 __user *) arg;
895 	struct file *file;
896 	int fd, ret;
897 	unsigned i;
898 
899 	if (ctx->file_data)
900 		return -EBUSY;
901 	if (!nr_args)
902 		return -EINVAL;
903 	if (nr_args > IORING_MAX_FIXED_FILES)
904 		return -EMFILE;
905 	if (nr_args > rlimit(RLIMIT_NOFILE))
906 		return -EMFILE;
907 	ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
908 				 &ctx->file_data);
909 	if (ret)
910 		return ret;
911 
912 	if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
913 		io_rsrc_data_free(ctx->file_data);
914 		ctx->file_data = NULL;
915 		return -ENOMEM;
916 	}
917 
918 	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
919 		struct io_fixed_file *file_slot;
920 
921 		if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
922 			ret = -EFAULT;
923 			goto fail;
924 		}
925 		/* allow sparse sets */
926 		if (!fds || fd == -1) {
927 			ret = -EINVAL;
928 			if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
929 				goto fail;
930 			continue;
931 		}
932 
933 		file = fget(fd);
934 		ret = -EBADF;
935 		if (unlikely(!file))
936 			goto fail;
937 
938 		/*
939 		 * Don't allow io_uring instances to be registered. If UNIX
940 		 * isn't enabled, then this causes a reference cycle and this
941 		 * instance can never get freed. If UNIX is enabled we'll
942 		 * handle it just fine, but there's still no point in allowing
943 		 * a ring fd as it doesn't support regular read/write anyway.
944 		 */
945 		if (io_is_uring_fops(file)) {
946 			fput(file);
947 			goto fail;
948 		}
949 		ret = io_scm_file_account(ctx, file);
950 		if (ret) {
951 			fput(file);
952 			goto fail;
953 		}
954 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
955 		io_fixed_file_set(file_slot, file);
956 		io_file_bitmap_set(&ctx->file_table, i);
957 	}
958 
959 	/* default it to the whole table */
960 	io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
961 	return 0;
962 fail:
963 	__io_sqe_files_unregister(ctx);
964 	return ret;
965 }
966 
967 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
968 {
969 	io_buffer_unmap(ctx, &prsrc->buf);
970 	prsrc->buf = NULL;
971 }
972 
973 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
974 {
975 	unsigned int i;
976 
977 	for (i = 0; i < ctx->nr_user_bufs; i++)
978 		io_buffer_unmap(ctx, &ctx->user_bufs[i]);
979 	kfree(ctx->user_bufs);
980 	io_rsrc_data_free(ctx->buf_data);
981 	ctx->user_bufs = NULL;
982 	ctx->buf_data = NULL;
983 	ctx->nr_user_bufs = 0;
984 }
985 
986 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
987 {
988 	unsigned nr = ctx->nr_user_bufs;
989 	int ret;
990 
991 	if (!ctx->buf_data)
992 		return -ENXIO;
993 
994 	/*
995 	 * Quiesce may unlock ->uring_lock, and while it's not held
996 	 * prevent new requests using the table.
997 	 */
998 	ctx->nr_user_bufs = 0;
999 	ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
1000 	ctx->nr_user_bufs = nr;
1001 	if (!ret)
1002 		__io_sqe_buffers_unregister(ctx);
1003 	return ret;
1004 }
1005 
1006 /*
1007  * Not super efficient, but this is just a registration time. And we do cache
1008  * the last compound head, so generally we'll only do a full search if we don't
1009  * match that one.
1010  *
1011  * We check if the given compound head page has already been accounted, to
1012  * avoid double accounting it. This allows us to account the full size of the
1013  * page, not just the constituent pages of a huge page.
1014  */
1015 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
1016 				  int nr_pages, struct page *hpage)
1017 {
1018 	int i, j;
1019 
1020 	/* check current page array */
1021 	for (i = 0; i < nr_pages; i++) {
1022 		if (!PageCompound(pages[i]))
1023 			continue;
1024 		if (compound_head(pages[i]) == hpage)
1025 			return true;
1026 	}
1027 
1028 	/* check previously registered pages */
1029 	for (i = 0; i < ctx->nr_user_bufs; i++) {
1030 		struct io_mapped_ubuf *imu = ctx->user_bufs[i];
1031 
1032 		for (j = 0; j < imu->nr_bvecs; j++) {
1033 			if (!PageCompound(imu->bvec[j].bv_page))
1034 				continue;
1035 			if (compound_head(imu->bvec[j].bv_page) == hpage)
1036 				return true;
1037 		}
1038 	}
1039 
1040 	return false;
1041 }
1042 
1043 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
1044 				 int nr_pages, struct io_mapped_ubuf *imu,
1045 				 struct page **last_hpage)
1046 {
1047 	int i, ret;
1048 
1049 	imu->acct_pages = 0;
1050 	for (i = 0; i < nr_pages; i++) {
1051 		if (!PageCompound(pages[i])) {
1052 			imu->acct_pages++;
1053 		} else {
1054 			struct page *hpage;
1055 
1056 			hpage = compound_head(pages[i]);
1057 			if (hpage == *last_hpage)
1058 				continue;
1059 			*last_hpage = hpage;
1060 			if (headpage_already_acct(ctx, pages, i, hpage))
1061 				continue;
1062 			imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
1063 		}
1064 	}
1065 
1066 	if (!imu->acct_pages)
1067 		return 0;
1068 
1069 	ret = io_account_mem(ctx, imu->acct_pages);
1070 	if (ret)
1071 		imu->acct_pages = 0;
1072 	return ret;
1073 }
1074 
1075 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
1076 {
1077 	unsigned long start, end, nr_pages;
1078 	struct vm_area_struct **vmas = NULL;
1079 	struct page **pages = NULL;
1080 	int i, pret, ret = -ENOMEM;
1081 
1082 	end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1083 	start = ubuf >> PAGE_SHIFT;
1084 	nr_pages = end - start;
1085 
1086 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
1087 	if (!pages)
1088 		goto done;
1089 
1090 	vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
1091 			      GFP_KERNEL);
1092 	if (!vmas)
1093 		goto done;
1094 
1095 	ret = 0;
1096 	mmap_read_lock(current->mm);
1097 	pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
1098 			      pages, vmas);
1099 	if (pret == nr_pages) {
1100 		struct file *file = vmas[0]->vm_file;
1101 
1102 		/* don't support file backed memory */
1103 		for (i = 0; i < nr_pages; i++) {
1104 			if (vmas[i]->vm_file != file) {
1105 				ret = -EINVAL;
1106 				break;
1107 			}
1108 			if (!file)
1109 				continue;
1110 			if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
1111 				ret = -EOPNOTSUPP;
1112 				break;
1113 			}
1114 		}
1115 		*npages = nr_pages;
1116 	} else {
1117 		ret = pret < 0 ? pret : -EFAULT;
1118 	}
1119 	mmap_read_unlock(current->mm);
1120 	if (ret) {
1121 		/*
1122 		 * if we did partial map, or found file backed vmas,
1123 		 * release any pages we did get
1124 		 */
1125 		if (pret > 0)
1126 			unpin_user_pages(pages, pret);
1127 		goto done;
1128 	}
1129 	ret = 0;
1130 done:
1131 	kvfree(vmas);
1132 	if (ret < 0) {
1133 		kvfree(pages);
1134 		pages = ERR_PTR(ret);
1135 	}
1136 	return pages;
1137 }
1138 
1139 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
1140 				  struct io_mapped_ubuf **pimu,
1141 				  struct page **last_hpage)
1142 {
1143 	struct io_mapped_ubuf *imu = NULL;
1144 	struct page **pages = NULL;
1145 	unsigned long off;
1146 	size_t size;
1147 	int ret, nr_pages, i;
1148 	struct folio *folio = NULL;
1149 
1150 	*pimu = ctx->dummy_ubuf;
1151 	if (!iov->iov_base)
1152 		return 0;
1153 
1154 	ret = -ENOMEM;
1155 	pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
1156 				&nr_pages);
1157 	if (IS_ERR(pages)) {
1158 		ret = PTR_ERR(pages);
1159 		pages = NULL;
1160 		goto done;
1161 	}
1162 
1163 	/* If it's a huge page, try to coalesce them into a single bvec entry */
1164 	if (nr_pages > 1) {
1165 		folio = page_folio(pages[0]);
1166 		for (i = 1; i < nr_pages; i++) {
1167 			if (page_folio(pages[i]) != folio) {
1168 				folio = NULL;
1169 				break;
1170 			}
1171 		}
1172 		if (folio) {
1173 			/*
1174 			 * The pages are bound to the folio, it doesn't
1175 			 * actually unpin them but drops all but one reference,
1176 			 * which is usually put down by io_buffer_unmap().
1177 			 * Note, needs a better helper.
1178 			 */
1179 			unpin_user_pages(&pages[1], nr_pages - 1);
1180 			nr_pages = 1;
1181 		}
1182 	}
1183 
1184 	imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
1185 	if (!imu)
1186 		goto done;
1187 
1188 	ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
1189 	if (ret) {
1190 		unpin_user_pages(pages, nr_pages);
1191 		goto done;
1192 	}
1193 
1194 	off = (unsigned long) iov->iov_base & ~PAGE_MASK;
1195 	size = iov->iov_len;
1196 	/* store original address for later verification */
1197 	imu->ubuf = (unsigned long) iov->iov_base;
1198 	imu->ubuf_end = imu->ubuf + iov->iov_len;
1199 	imu->nr_bvecs = nr_pages;
1200 	*pimu = imu;
1201 	ret = 0;
1202 
1203 	if (folio) {
1204 		bvec_set_page(&imu->bvec[0], pages[0], size, off);
1205 		goto done;
1206 	}
1207 	for (i = 0; i < nr_pages; i++) {
1208 		size_t vec_len;
1209 
1210 		vec_len = min_t(size_t, size, PAGE_SIZE - off);
1211 		bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
1212 		off = 0;
1213 		size -= vec_len;
1214 	}
1215 done:
1216 	if (ret)
1217 		kvfree(imu);
1218 	kvfree(pages);
1219 	return ret;
1220 }
1221 
1222 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1223 {
1224 	ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1225 	return ctx->user_bufs ? 0 : -ENOMEM;
1226 }
1227 
1228 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1229 			    unsigned int nr_args, u64 __user *tags)
1230 {
1231 	struct page *last_hpage = NULL;
1232 	struct io_rsrc_data *data;
1233 	int i, ret;
1234 	struct iovec iov;
1235 
1236 	BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1237 
1238 	if (ctx->user_bufs)
1239 		return -EBUSY;
1240 	if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1241 		return -EINVAL;
1242 	ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
1243 	if (ret)
1244 		return ret;
1245 	ret = io_buffers_map_alloc(ctx, nr_args);
1246 	if (ret) {
1247 		io_rsrc_data_free(data);
1248 		return ret;
1249 	}
1250 
1251 	for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1252 		if (arg) {
1253 			ret = io_copy_iov(ctx, &iov, arg, i);
1254 			if (ret)
1255 				break;
1256 			ret = io_buffer_validate(&iov);
1257 			if (ret)
1258 				break;
1259 		} else {
1260 			memset(&iov, 0, sizeof(iov));
1261 		}
1262 
1263 		if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1264 			ret = -EINVAL;
1265 			break;
1266 		}
1267 
1268 		ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1269 					     &last_hpage);
1270 		if (ret)
1271 			break;
1272 	}
1273 
1274 	WARN_ON_ONCE(ctx->buf_data);
1275 
1276 	ctx->buf_data = data;
1277 	if (ret)
1278 		__io_sqe_buffers_unregister(ctx);
1279 	return ret;
1280 }
1281 
1282 int io_import_fixed(int ddir, struct iov_iter *iter,
1283 			   struct io_mapped_ubuf *imu,
1284 			   u64 buf_addr, size_t len)
1285 {
1286 	u64 buf_end;
1287 	size_t offset;
1288 
1289 	if (WARN_ON_ONCE(!imu))
1290 		return -EFAULT;
1291 	if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1292 		return -EFAULT;
1293 	/* not inside the mapped region */
1294 	if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1295 		return -EFAULT;
1296 
1297 	/*
1298 	 * Might not be a start of buffer, set size appropriately
1299 	 * and advance us to the beginning.
1300 	 */
1301 	offset = buf_addr - imu->ubuf;
1302 	iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1303 
1304 	if (offset) {
1305 		/*
1306 		 * Don't use iov_iter_advance() here, as it's really slow for
1307 		 * using the latter parts of a big fixed buffer - it iterates
1308 		 * over each segment manually. We can cheat a bit here, because
1309 		 * we know that:
1310 		 *
1311 		 * 1) it's a BVEC iter, we set it up
1312 		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1313 		 *    first and last bvec
1314 		 *
1315 		 * So just find our index, and adjust the iterator afterwards.
1316 		 * If the offset is within the first bvec (or the whole first
1317 		 * bvec, just use iov_iter_advance(). This makes it easier
1318 		 * since we can just skip the first segment, which may not
1319 		 * be PAGE_SIZE aligned.
1320 		 */
1321 		const struct bio_vec *bvec = imu->bvec;
1322 
1323 		if (offset <= bvec->bv_len) {
1324 			/*
1325 			 * Note, huge pages buffers consists of one large
1326 			 * bvec entry and should always go this way. The other
1327 			 * branch doesn't expect non PAGE_SIZE'd chunks.
1328 			 */
1329 			iter->bvec = bvec;
1330 			iter->nr_segs = bvec->bv_len;
1331 			iter->count -= offset;
1332 			iter->iov_offset = offset;
1333 		} else {
1334 			unsigned long seg_skip;
1335 
1336 			/* skip first vec */
1337 			offset -= bvec->bv_len;
1338 			seg_skip = 1 + (offset >> PAGE_SHIFT);
1339 
1340 			iter->bvec = bvec + seg_skip;
1341 			iter->nr_segs -= seg_skip;
1342 			iter->count -= bvec->bv_len + offset;
1343 			iter->iov_offset = offset & ~PAGE_MASK;
1344 		}
1345 	}
1346 
1347 	return 0;
1348 }
1349