xref: /openbmc/linux/io_uring/rsrc.c (revision 29b26c55)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "io_uring.h"
16 #include "openclose.h"
17 #include "rsrc.h"
18 
19 struct io_rsrc_update {
20 	struct file			*file;
21 	u64				arg;
22 	u32				nr_args;
23 	u32				offset;
24 };
25 
26 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
27 				  struct io_mapped_ubuf **pimu,
28 				  struct page **last_hpage);
29 
30 /* only define max */
31 #define IORING_MAX_FIXED_FILES	(1U << 20)
32 #define IORING_MAX_REG_BUFFERS	(1U << 14)
33 
34 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
35 {
36 	unsigned long page_limit, cur_pages, new_pages;
37 
38 	if (!nr_pages)
39 		return 0;
40 
41 	/* Don't allow more pages than we can safely lock */
42 	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
43 
44 	cur_pages = atomic_long_read(&user->locked_vm);
45 	do {
46 		new_pages = cur_pages + nr_pages;
47 		if (new_pages > page_limit)
48 			return -ENOMEM;
49 	} while (!atomic_long_try_cmpxchg(&user->locked_vm,
50 					  &cur_pages, new_pages));
51 	return 0;
52 }
53 
54 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
55 {
56 	if (ctx->user)
57 		__io_unaccount_mem(ctx->user, nr_pages);
58 
59 	if (ctx->mm_account)
60 		atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
61 }
62 
63 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
64 {
65 	int ret;
66 
67 	if (ctx->user) {
68 		ret = __io_account_mem(ctx->user, nr_pages);
69 		if (ret)
70 			return ret;
71 	}
72 
73 	if (ctx->mm_account)
74 		atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
75 
76 	return 0;
77 }
78 
79 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
80 		       void __user *arg, unsigned index)
81 {
82 	struct iovec __user *src;
83 
84 #ifdef CONFIG_COMPAT
85 	if (ctx->compat) {
86 		struct compat_iovec __user *ciovs;
87 		struct compat_iovec ciov;
88 
89 		ciovs = (struct compat_iovec __user *) arg;
90 		if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
91 			return -EFAULT;
92 
93 		dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
94 		dst->iov_len = ciov.iov_len;
95 		return 0;
96 	}
97 #endif
98 	src = (struct iovec __user *) arg;
99 	if (copy_from_user(dst, &src[index], sizeof(*dst)))
100 		return -EFAULT;
101 	return 0;
102 }
103 
104 static int io_buffer_validate(struct iovec *iov)
105 {
106 	unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
107 
108 	/*
109 	 * Don't impose further limits on the size and buffer
110 	 * constraints here, we'll -EINVAL later when IO is
111 	 * submitted if they are wrong.
112 	 */
113 	if (!iov->iov_base)
114 		return iov->iov_len ? -EFAULT : 0;
115 	if (!iov->iov_len)
116 		return -EFAULT;
117 
118 	/* arbitrary limit, but we need something */
119 	if (iov->iov_len > SZ_1G)
120 		return -EFAULT;
121 
122 	if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
123 		return -EOVERFLOW;
124 
125 	return 0;
126 }
127 
128 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
129 {
130 	struct io_mapped_ubuf *imu = *slot;
131 	unsigned int i;
132 
133 	if (imu != ctx->dummy_ubuf) {
134 		for (i = 0; i < imu->nr_bvecs; i++)
135 			unpin_user_page(imu->bvec[i].bv_page);
136 		if (imu->acct_pages)
137 			io_unaccount_mem(ctx, imu->acct_pages);
138 		kvfree(imu);
139 	}
140 	*slot = NULL;
141 }
142 
143 static void io_rsrc_put_work(struct io_rsrc_node *node)
144 {
145 	struct io_rsrc_data *data = node->rsrc_data;
146 	struct io_rsrc_put *prsrc = &node->item;
147 
148 	if (prsrc->tag)
149 		io_post_aux_cqe(data->ctx, prsrc->tag, 0, 0);
150 	data->do_put(data->ctx, prsrc);
151 }
152 
153 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
154 {
155 	if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache))
156 		kfree(node);
157 }
158 
159 void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
160 	__must_hold(&node->rsrc_data->ctx->uring_lock)
161 {
162 	struct io_ring_ctx *ctx = node->rsrc_data->ctx;
163 
164 	while (!list_empty(&ctx->rsrc_ref_list)) {
165 		node = list_first_entry(&ctx->rsrc_ref_list,
166 					    struct io_rsrc_node, node);
167 		/* recycle ref nodes in order */
168 		if (node->refs)
169 			break;
170 		list_del(&node->node);
171 
172 		if (likely(!node->empty))
173 			io_rsrc_put_work(node);
174 		io_rsrc_node_destroy(ctx, node);
175 	}
176 	if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
177 		wake_up_all(&ctx->rsrc_quiesce_wq);
178 }
179 
180 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
181 {
182 	struct io_rsrc_node *ref_node;
183 	struct io_cache_entry *entry;
184 
185 	entry = io_alloc_cache_get(&ctx->rsrc_node_cache);
186 	if (entry) {
187 		ref_node = container_of(entry, struct io_rsrc_node, cache);
188 	} else {
189 		ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
190 		if (!ref_node)
191 			return NULL;
192 	}
193 
194 	ref_node->rsrc_data = NULL;
195 	ref_node->empty = 0;
196 	ref_node->refs = 1;
197 	return ref_node;
198 }
199 
200 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
201 				      struct io_ring_ctx *ctx)
202 {
203 	struct io_rsrc_node *backup;
204 	DEFINE_WAIT(we);
205 	int ret;
206 
207 	/* As We may drop ->uring_lock, other task may have started quiesce */
208 	if (data->quiesce)
209 		return -ENXIO;
210 
211 	backup = io_rsrc_node_alloc(ctx);
212 	if (!backup)
213 		return -ENOMEM;
214 	ctx->rsrc_node->empty = true;
215 	ctx->rsrc_node->rsrc_data = data;
216 	list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list);
217 	io_put_rsrc_node(ctx, ctx->rsrc_node);
218 	ctx->rsrc_node = backup;
219 
220 	if (list_empty(&ctx->rsrc_ref_list))
221 		return 0;
222 
223 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
224 		atomic_set(&ctx->cq_wait_nr, 1);
225 		smp_mb();
226 	}
227 
228 	ctx->rsrc_quiesce++;
229 	data->quiesce = true;
230 	do {
231 		prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
232 		mutex_unlock(&ctx->uring_lock);
233 
234 		ret = io_run_task_work_sig(ctx);
235 		if (ret < 0) {
236 			mutex_lock(&ctx->uring_lock);
237 			if (list_empty(&ctx->rsrc_ref_list))
238 				ret = 0;
239 			break;
240 		}
241 
242 		schedule();
243 		__set_current_state(TASK_RUNNING);
244 		mutex_lock(&ctx->uring_lock);
245 		ret = 0;
246 	} while (!list_empty(&ctx->rsrc_ref_list));
247 
248 	finish_wait(&ctx->rsrc_quiesce_wq, &we);
249 	data->quiesce = false;
250 	ctx->rsrc_quiesce--;
251 
252 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
253 		atomic_set(&ctx->cq_wait_nr, 0);
254 		smp_mb();
255 	}
256 	return ret;
257 }
258 
259 static void io_free_page_table(void **table, size_t size)
260 {
261 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
262 
263 	for (i = 0; i < nr_tables; i++)
264 		kfree(table[i]);
265 	kfree(table);
266 }
267 
268 static void io_rsrc_data_free(struct io_rsrc_data *data)
269 {
270 	size_t size = data->nr * sizeof(data->tags[0][0]);
271 
272 	if (data->tags)
273 		io_free_page_table((void **)data->tags, size);
274 	kfree(data);
275 }
276 
277 static __cold void **io_alloc_page_table(size_t size)
278 {
279 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
280 	size_t init_size = size;
281 	void **table;
282 
283 	table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
284 	if (!table)
285 		return NULL;
286 
287 	for (i = 0; i < nr_tables; i++) {
288 		unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
289 
290 		table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
291 		if (!table[i]) {
292 			io_free_page_table(table, init_size);
293 			return NULL;
294 		}
295 		size -= this_size;
296 	}
297 	return table;
298 }
299 
300 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
301 				     rsrc_put_fn *do_put, u64 __user *utags,
302 				     unsigned nr, struct io_rsrc_data **pdata)
303 {
304 	struct io_rsrc_data *data;
305 	int ret = 0;
306 	unsigned i;
307 
308 	data = kzalloc(sizeof(*data), GFP_KERNEL);
309 	if (!data)
310 		return -ENOMEM;
311 	data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
312 	if (!data->tags) {
313 		kfree(data);
314 		return -ENOMEM;
315 	}
316 
317 	data->nr = nr;
318 	data->ctx = ctx;
319 	data->do_put = do_put;
320 	if (utags) {
321 		ret = -EFAULT;
322 		for (i = 0; i < nr; i++) {
323 			u64 *tag_slot = io_get_tag_slot(data, i);
324 
325 			if (copy_from_user(tag_slot, &utags[i],
326 					   sizeof(*tag_slot)))
327 				goto fail;
328 		}
329 	}
330 	*pdata = data;
331 	return 0;
332 fail:
333 	io_rsrc_data_free(data);
334 	return ret;
335 }
336 
337 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
338 				 struct io_uring_rsrc_update2 *up,
339 				 unsigned nr_args)
340 {
341 	u64 __user *tags = u64_to_user_ptr(up->tags);
342 	__s32 __user *fds = u64_to_user_ptr(up->data);
343 	struct io_rsrc_data *data = ctx->file_data;
344 	struct io_fixed_file *file_slot;
345 	struct file *file;
346 	int fd, i, err = 0;
347 	unsigned int done;
348 
349 	if (!ctx->file_data)
350 		return -ENXIO;
351 	if (up->offset + nr_args > ctx->nr_user_files)
352 		return -EINVAL;
353 
354 	for (done = 0; done < nr_args; done++) {
355 		u64 tag = 0;
356 
357 		if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
358 		    copy_from_user(&fd, &fds[done], sizeof(fd))) {
359 			err = -EFAULT;
360 			break;
361 		}
362 		if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
363 			err = -EINVAL;
364 			break;
365 		}
366 		if (fd == IORING_REGISTER_FILES_SKIP)
367 			continue;
368 
369 		i = array_index_nospec(up->offset + done, ctx->nr_user_files);
370 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
371 
372 		if (file_slot->file_ptr) {
373 			file = (struct file *)(file_slot->file_ptr & FFS_MASK);
374 			err = io_queue_rsrc_removal(data, i, file);
375 			if (err)
376 				break;
377 			file_slot->file_ptr = 0;
378 			io_file_bitmap_clear(&ctx->file_table, i);
379 		}
380 		if (fd != -1) {
381 			file = fget(fd);
382 			if (!file) {
383 				err = -EBADF;
384 				break;
385 			}
386 			/*
387 			 * Don't allow io_uring instances to be registered. If
388 			 * UNIX isn't enabled, then this causes a reference
389 			 * cycle and this instance can never get freed. If UNIX
390 			 * is enabled we'll handle it just fine, but there's
391 			 * still no point in allowing a ring fd as it doesn't
392 			 * support regular read/write anyway.
393 			 */
394 			if (io_is_uring_fops(file)) {
395 				fput(file);
396 				err = -EBADF;
397 				break;
398 			}
399 			err = io_scm_file_account(ctx, file);
400 			if (err) {
401 				fput(file);
402 				break;
403 			}
404 			*io_get_tag_slot(data, i) = tag;
405 			io_fixed_file_set(file_slot, file);
406 			io_file_bitmap_set(&ctx->file_table, i);
407 		}
408 	}
409 	return done ? done : err;
410 }
411 
412 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
413 				   struct io_uring_rsrc_update2 *up,
414 				   unsigned int nr_args)
415 {
416 	u64 __user *tags = u64_to_user_ptr(up->tags);
417 	struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
418 	struct page *last_hpage = NULL;
419 	__u32 done;
420 	int i, err;
421 
422 	if (!ctx->buf_data)
423 		return -ENXIO;
424 	if (up->offset + nr_args > ctx->nr_user_bufs)
425 		return -EINVAL;
426 
427 	for (done = 0; done < nr_args; done++) {
428 		struct io_mapped_ubuf *imu;
429 		u64 tag = 0;
430 
431 		err = io_copy_iov(ctx, &iov, iovs, done);
432 		if (err)
433 			break;
434 		if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
435 			err = -EFAULT;
436 			break;
437 		}
438 		err = io_buffer_validate(&iov);
439 		if (err)
440 			break;
441 		if (!iov.iov_base && tag) {
442 			err = -EINVAL;
443 			break;
444 		}
445 		err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
446 		if (err)
447 			break;
448 
449 		i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
450 		if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
451 			err = io_queue_rsrc_removal(ctx->buf_data, i,
452 						    ctx->user_bufs[i]);
453 			if (unlikely(err)) {
454 				io_buffer_unmap(ctx, &imu);
455 				break;
456 			}
457 			ctx->user_bufs[i] = ctx->dummy_ubuf;
458 		}
459 
460 		ctx->user_bufs[i] = imu;
461 		*io_get_tag_slot(ctx->buf_data, i) = tag;
462 	}
463 	return done ? done : err;
464 }
465 
466 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
467 				     struct io_uring_rsrc_update2 *up,
468 				     unsigned nr_args)
469 {
470 	__u32 tmp;
471 
472 	lockdep_assert_held(&ctx->uring_lock);
473 
474 	if (check_add_overflow(up->offset, nr_args, &tmp))
475 		return -EOVERFLOW;
476 
477 	switch (type) {
478 	case IORING_RSRC_FILE:
479 		return __io_sqe_files_update(ctx, up, nr_args);
480 	case IORING_RSRC_BUFFER:
481 		return __io_sqe_buffers_update(ctx, up, nr_args);
482 	}
483 	return -EINVAL;
484 }
485 
486 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
487 			     unsigned nr_args)
488 {
489 	struct io_uring_rsrc_update2 up;
490 
491 	if (!nr_args)
492 		return -EINVAL;
493 	memset(&up, 0, sizeof(up));
494 	if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
495 		return -EFAULT;
496 	if (up.resv || up.resv2)
497 		return -EINVAL;
498 	return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
499 }
500 
501 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
502 			    unsigned size, unsigned type)
503 {
504 	struct io_uring_rsrc_update2 up;
505 
506 	if (size != sizeof(up))
507 		return -EINVAL;
508 	if (copy_from_user(&up, arg, sizeof(up)))
509 		return -EFAULT;
510 	if (!up.nr || up.resv || up.resv2)
511 		return -EINVAL;
512 	return __io_register_rsrc_update(ctx, type, &up, up.nr);
513 }
514 
515 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
516 			    unsigned int size, unsigned int type)
517 {
518 	struct io_uring_rsrc_register rr;
519 
520 	/* keep it extendible */
521 	if (size != sizeof(rr))
522 		return -EINVAL;
523 
524 	memset(&rr, 0, sizeof(rr));
525 	if (copy_from_user(&rr, arg, size))
526 		return -EFAULT;
527 	if (!rr.nr || rr.resv2)
528 		return -EINVAL;
529 	if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
530 		return -EINVAL;
531 
532 	switch (type) {
533 	case IORING_RSRC_FILE:
534 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
535 			break;
536 		return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
537 					     rr.nr, u64_to_user_ptr(rr.tags));
538 	case IORING_RSRC_BUFFER:
539 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
540 			break;
541 		return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
542 					       rr.nr, u64_to_user_ptr(rr.tags));
543 	}
544 	return -EINVAL;
545 }
546 
547 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
548 {
549 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
550 
551 	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
552 		return -EINVAL;
553 	if (sqe->rw_flags || sqe->splice_fd_in)
554 		return -EINVAL;
555 
556 	up->offset = READ_ONCE(sqe->off);
557 	up->nr_args = READ_ONCE(sqe->len);
558 	if (!up->nr_args)
559 		return -EINVAL;
560 	up->arg = READ_ONCE(sqe->addr);
561 	return 0;
562 }
563 
564 static int io_files_update_with_index_alloc(struct io_kiocb *req,
565 					    unsigned int issue_flags)
566 {
567 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
568 	__s32 __user *fds = u64_to_user_ptr(up->arg);
569 	unsigned int done;
570 	struct file *file;
571 	int ret, fd;
572 
573 	if (!req->ctx->file_data)
574 		return -ENXIO;
575 
576 	for (done = 0; done < up->nr_args; done++) {
577 		if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
578 			ret = -EFAULT;
579 			break;
580 		}
581 
582 		file = fget(fd);
583 		if (!file) {
584 			ret = -EBADF;
585 			break;
586 		}
587 		ret = io_fixed_fd_install(req, issue_flags, file,
588 					  IORING_FILE_INDEX_ALLOC);
589 		if (ret < 0)
590 			break;
591 		if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
592 			__io_close_fixed(req->ctx, issue_flags, ret);
593 			ret = -EFAULT;
594 			break;
595 		}
596 	}
597 
598 	if (done)
599 		return done;
600 	return ret;
601 }
602 
603 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
604 {
605 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
606 	struct io_ring_ctx *ctx = req->ctx;
607 	struct io_uring_rsrc_update2 up2;
608 	int ret;
609 
610 	up2.offset = up->offset;
611 	up2.data = up->arg;
612 	up2.nr = 0;
613 	up2.tags = 0;
614 	up2.resv = 0;
615 	up2.resv2 = 0;
616 
617 	if (up->offset == IORING_FILE_INDEX_ALLOC) {
618 		ret = io_files_update_with_index_alloc(req, issue_flags);
619 	} else {
620 		io_ring_submit_lock(ctx, issue_flags);
621 		ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
622 						&up2, up->nr_args);
623 		io_ring_submit_unlock(ctx, issue_flags);
624 	}
625 
626 	if (ret < 0)
627 		req_set_fail(req);
628 	io_req_set_res(req, ret, 0);
629 	return IOU_OK;
630 }
631 
632 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)
633 {
634 	struct io_ring_ctx *ctx = data->ctx;
635 	struct io_rsrc_node *node = ctx->rsrc_node;
636 	u64 *tag_slot = io_get_tag_slot(data, idx);
637 
638 	ctx->rsrc_node = io_rsrc_node_alloc(ctx);
639 	if (unlikely(!ctx->rsrc_node)) {
640 		ctx->rsrc_node = node;
641 		return -ENOMEM;
642 	}
643 
644 	node->item.rsrc = rsrc;
645 	node->item.tag = *tag_slot;
646 	*tag_slot = 0;
647 
648 	node->rsrc_data = data;
649 	list_add_tail(&node->node, &ctx->rsrc_ref_list);
650 	io_put_rsrc_node(ctx, node);
651 	return 0;
652 }
653 
654 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
655 {
656 	int i;
657 
658 	for (i = 0; i < ctx->nr_user_files; i++) {
659 		struct file *file = io_file_from_index(&ctx->file_table, i);
660 
661 		/* skip scm accounted files, they'll be freed by ->ring_sock */
662 		if (!file || io_file_need_scm(file))
663 			continue;
664 		io_file_bitmap_clear(&ctx->file_table, i);
665 		fput(file);
666 	}
667 
668 #if defined(CONFIG_UNIX)
669 	if (ctx->ring_sock) {
670 		struct sock *sock = ctx->ring_sock->sk;
671 		struct sk_buff *skb;
672 
673 		while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
674 			kfree_skb(skb);
675 	}
676 #endif
677 	io_free_file_tables(&ctx->file_table);
678 	io_file_table_set_alloc_range(ctx, 0, 0);
679 	io_rsrc_data_free(ctx->file_data);
680 	ctx->file_data = NULL;
681 	ctx->nr_user_files = 0;
682 }
683 
684 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
685 {
686 	unsigned nr = ctx->nr_user_files;
687 	int ret;
688 
689 	if (!ctx->file_data)
690 		return -ENXIO;
691 
692 	/*
693 	 * Quiesce may unlock ->uring_lock, and while it's not held
694 	 * prevent new requests using the table.
695 	 */
696 	ctx->nr_user_files = 0;
697 	ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
698 	ctx->nr_user_files = nr;
699 	if (!ret)
700 		__io_sqe_files_unregister(ctx);
701 	return ret;
702 }
703 
704 /*
705  * Ensure the UNIX gc is aware of our file set, so we are certain that
706  * the io_uring can be safely unregistered on process exit, even if we have
707  * loops in the file referencing. We account only files that can hold other
708  * files because otherwise they can't form a loop and so are not interesting
709  * for GC.
710  */
711 int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
712 {
713 #if defined(CONFIG_UNIX)
714 	struct sock *sk = ctx->ring_sock->sk;
715 	struct sk_buff_head *head = &sk->sk_receive_queue;
716 	struct scm_fp_list *fpl;
717 	struct sk_buff *skb;
718 
719 	if (likely(!io_file_need_scm(file)))
720 		return 0;
721 
722 	/*
723 	 * See if we can merge this file into an existing skb SCM_RIGHTS
724 	 * file set. If there's no room, fall back to allocating a new skb
725 	 * and filling it in.
726 	 */
727 	spin_lock_irq(&head->lock);
728 	skb = skb_peek(head);
729 	if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
730 		__skb_unlink(skb, head);
731 	else
732 		skb = NULL;
733 	spin_unlock_irq(&head->lock);
734 
735 	if (!skb) {
736 		fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
737 		if (!fpl)
738 			return -ENOMEM;
739 
740 		skb = alloc_skb(0, GFP_KERNEL);
741 		if (!skb) {
742 			kfree(fpl);
743 			return -ENOMEM;
744 		}
745 
746 		fpl->user = get_uid(current_user());
747 		fpl->max = SCM_MAX_FD;
748 		fpl->count = 0;
749 
750 		UNIXCB(skb).fp = fpl;
751 		skb->sk = sk;
752 		skb->scm_io_uring = 1;
753 		skb->destructor = unix_destruct_scm;
754 		refcount_add(skb->truesize, &sk->sk_wmem_alloc);
755 	}
756 
757 	fpl = UNIXCB(skb).fp;
758 	fpl->fp[fpl->count++] = get_file(file);
759 	unix_inflight(fpl->user, file);
760 	skb_queue_head(head, skb);
761 	fput(file);
762 #endif
763 	return 0;
764 }
765 
766 static __cold void io_rsrc_file_scm_put(struct io_ring_ctx *ctx, struct file *file)
767 {
768 #if defined(CONFIG_UNIX)
769 	struct sock *sock = ctx->ring_sock->sk;
770 	struct sk_buff_head list, *head = &sock->sk_receive_queue;
771 	struct sk_buff *skb;
772 	int i;
773 
774 	__skb_queue_head_init(&list);
775 
776 	/*
777 	 * Find the skb that holds this file in its SCM_RIGHTS. When found,
778 	 * remove this entry and rearrange the file array.
779 	 */
780 	skb = skb_dequeue(head);
781 	while (skb) {
782 		struct scm_fp_list *fp;
783 
784 		fp = UNIXCB(skb).fp;
785 		for (i = 0; i < fp->count; i++) {
786 			int left;
787 
788 			if (fp->fp[i] != file)
789 				continue;
790 
791 			unix_notinflight(fp->user, fp->fp[i]);
792 			left = fp->count - 1 - i;
793 			if (left) {
794 				memmove(&fp->fp[i], &fp->fp[i + 1],
795 						left * sizeof(struct file *));
796 			}
797 			fp->count--;
798 			if (!fp->count) {
799 				kfree_skb(skb);
800 				skb = NULL;
801 			} else {
802 				__skb_queue_tail(&list, skb);
803 			}
804 			fput(file);
805 			file = NULL;
806 			break;
807 		}
808 
809 		if (!file)
810 			break;
811 
812 		__skb_queue_tail(&list, skb);
813 
814 		skb = skb_dequeue(head);
815 	}
816 
817 	if (skb_peek(&list)) {
818 		spin_lock_irq(&head->lock);
819 		while ((skb = __skb_dequeue(&list)) != NULL)
820 			__skb_queue_tail(head, skb);
821 		spin_unlock_irq(&head->lock);
822 	}
823 #endif
824 }
825 
826 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
827 {
828 	struct file *file = prsrc->file;
829 
830 	if (likely(!io_file_need_scm(file)))
831 		fput(file);
832 	else
833 		io_rsrc_file_scm_put(ctx, file);
834 }
835 
836 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
837 			  unsigned nr_args, u64 __user *tags)
838 {
839 	__s32 __user *fds = (__s32 __user *) arg;
840 	struct file *file;
841 	int fd, ret;
842 	unsigned i;
843 
844 	if (ctx->file_data)
845 		return -EBUSY;
846 	if (!nr_args)
847 		return -EINVAL;
848 	if (nr_args > IORING_MAX_FIXED_FILES)
849 		return -EMFILE;
850 	if (nr_args > rlimit(RLIMIT_NOFILE))
851 		return -EMFILE;
852 	ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
853 				 &ctx->file_data);
854 	if (ret)
855 		return ret;
856 
857 	if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
858 		io_rsrc_data_free(ctx->file_data);
859 		ctx->file_data = NULL;
860 		return -ENOMEM;
861 	}
862 
863 	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
864 		struct io_fixed_file *file_slot;
865 
866 		if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
867 			ret = -EFAULT;
868 			goto fail;
869 		}
870 		/* allow sparse sets */
871 		if (!fds || fd == -1) {
872 			ret = -EINVAL;
873 			if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
874 				goto fail;
875 			continue;
876 		}
877 
878 		file = fget(fd);
879 		ret = -EBADF;
880 		if (unlikely(!file))
881 			goto fail;
882 
883 		/*
884 		 * Don't allow io_uring instances to be registered. If UNIX
885 		 * isn't enabled, then this causes a reference cycle and this
886 		 * instance can never get freed. If UNIX is enabled we'll
887 		 * handle it just fine, but there's still no point in allowing
888 		 * a ring fd as it doesn't support regular read/write anyway.
889 		 */
890 		if (io_is_uring_fops(file)) {
891 			fput(file);
892 			goto fail;
893 		}
894 		ret = io_scm_file_account(ctx, file);
895 		if (ret) {
896 			fput(file);
897 			goto fail;
898 		}
899 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
900 		io_fixed_file_set(file_slot, file);
901 		io_file_bitmap_set(&ctx->file_table, i);
902 	}
903 
904 	/* default it to the whole table */
905 	io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
906 	return 0;
907 fail:
908 	__io_sqe_files_unregister(ctx);
909 	return ret;
910 }
911 
912 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
913 {
914 	io_buffer_unmap(ctx, &prsrc->buf);
915 	prsrc->buf = NULL;
916 }
917 
918 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
919 {
920 	unsigned int i;
921 
922 	for (i = 0; i < ctx->nr_user_bufs; i++)
923 		io_buffer_unmap(ctx, &ctx->user_bufs[i]);
924 	kfree(ctx->user_bufs);
925 	io_rsrc_data_free(ctx->buf_data);
926 	ctx->user_bufs = NULL;
927 	ctx->buf_data = NULL;
928 	ctx->nr_user_bufs = 0;
929 }
930 
931 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
932 {
933 	unsigned nr = ctx->nr_user_bufs;
934 	int ret;
935 
936 	if (!ctx->buf_data)
937 		return -ENXIO;
938 
939 	/*
940 	 * Quiesce may unlock ->uring_lock, and while it's not held
941 	 * prevent new requests using the table.
942 	 */
943 	ctx->nr_user_bufs = 0;
944 	ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
945 	ctx->nr_user_bufs = nr;
946 	if (!ret)
947 		__io_sqe_buffers_unregister(ctx);
948 	return ret;
949 }
950 
951 /*
952  * Not super efficient, but this is just a registration time. And we do cache
953  * the last compound head, so generally we'll only do a full search if we don't
954  * match that one.
955  *
956  * We check if the given compound head page has already been accounted, to
957  * avoid double accounting it. This allows us to account the full size of the
958  * page, not just the constituent pages of a huge page.
959  */
960 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
961 				  int nr_pages, struct page *hpage)
962 {
963 	int i, j;
964 
965 	/* check current page array */
966 	for (i = 0; i < nr_pages; i++) {
967 		if (!PageCompound(pages[i]))
968 			continue;
969 		if (compound_head(pages[i]) == hpage)
970 			return true;
971 	}
972 
973 	/* check previously registered pages */
974 	for (i = 0; i < ctx->nr_user_bufs; i++) {
975 		struct io_mapped_ubuf *imu = ctx->user_bufs[i];
976 
977 		for (j = 0; j < imu->nr_bvecs; j++) {
978 			if (!PageCompound(imu->bvec[j].bv_page))
979 				continue;
980 			if (compound_head(imu->bvec[j].bv_page) == hpage)
981 				return true;
982 		}
983 	}
984 
985 	return false;
986 }
987 
988 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
989 				 int nr_pages, struct io_mapped_ubuf *imu,
990 				 struct page **last_hpage)
991 {
992 	int i, ret;
993 
994 	imu->acct_pages = 0;
995 	for (i = 0; i < nr_pages; i++) {
996 		if (!PageCompound(pages[i])) {
997 			imu->acct_pages++;
998 		} else {
999 			struct page *hpage;
1000 
1001 			hpage = compound_head(pages[i]);
1002 			if (hpage == *last_hpage)
1003 				continue;
1004 			*last_hpage = hpage;
1005 			if (headpage_already_acct(ctx, pages, i, hpage))
1006 				continue;
1007 			imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
1008 		}
1009 	}
1010 
1011 	if (!imu->acct_pages)
1012 		return 0;
1013 
1014 	ret = io_account_mem(ctx, imu->acct_pages);
1015 	if (ret)
1016 		imu->acct_pages = 0;
1017 	return ret;
1018 }
1019 
1020 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
1021 {
1022 	unsigned long start, end, nr_pages;
1023 	struct vm_area_struct **vmas = NULL;
1024 	struct page **pages = NULL;
1025 	int i, pret, ret = -ENOMEM;
1026 
1027 	end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1028 	start = ubuf >> PAGE_SHIFT;
1029 	nr_pages = end - start;
1030 
1031 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
1032 	if (!pages)
1033 		goto done;
1034 
1035 	vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
1036 			      GFP_KERNEL);
1037 	if (!vmas)
1038 		goto done;
1039 
1040 	ret = 0;
1041 	mmap_read_lock(current->mm);
1042 	pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
1043 			      pages, vmas);
1044 	if (pret == nr_pages) {
1045 		struct file *file = vmas[0]->vm_file;
1046 
1047 		/* don't support file backed memory */
1048 		for (i = 0; i < nr_pages; i++) {
1049 			if (vmas[i]->vm_file != file) {
1050 				ret = -EINVAL;
1051 				break;
1052 			}
1053 			if (!file)
1054 				continue;
1055 			if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
1056 				ret = -EOPNOTSUPP;
1057 				break;
1058 			}
1059 		}
1060 		*npages = nr_pages;
1061 	} else {
1062 		ret = pret < 0 ? pret : -EFAULT;
1063 	}
1064 	mmap_read_unlock(current->mm);
1065 	if (ret) {
1066 		/*
1067 		 * if we did partial map, or found file backed vmas,
1068 		 * release any pages we did get
1069 		 */
1070 		if (pret > 0)
1071 			unpin_user_pages(pages, pret);
1072 		goto done;
1073 	}
1074 	ret = 0;
1075 done:
1076 	kvfree(vmas);
1077 	if (ret < 0) {
1078 		kvfree(pages);
1079 		pages = ERR_PTR(ret);
1080 	}
1081 	return pages;
1082 }
1083 
1084 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
1085 				  struct io_mapped_ubuf **pimu,
1086 				  struct page **last_hpage)
1087 {
1088 	struct io_mapped_ubuf *imu = NULL;
1089 	struct page **pages = NULL;
1090 	unsigned long off;
1091 	size_t size;
1092 	int ret, nr_pages, i;
1093 	struct folio *folio = NULL;
1094 
1095 	*pimu = ctx->dummy_ubuf;
1096 	if (!iov->iov_base)
1097 		return 0;
1098 
1099 	ret = -ENOMEM;
1100 	pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
1101 				&nr_pages);
1102 	if (IS_ERR(pages)) {
1103 		ret = PTR_ERR(pages);
1104 		pages = NULL;
1105 		goto done;
1106 	}
1107 
1108 	/* If it's a huge page, try to coalesce them into a single bvec entry */
1109 	if (nr_pages > 1) {
1110 		folio = page_folio(pages[0]);
1111 		for (i = 1; i < nr_pages; i++) {
1112 			if (page_folio(pages[i]) != folio) {
1113 				folio = NULL;
1114 				break;
1115 			}
1116 		}
1117 		if (folio) {
1118 			/*
1119 			 * The pages are bound to the folio, it doesn't
1120 			 * actually unpin them but drops all but one reference,
1121 			 * which is usually put down by io_buffer_unmap().
1122 			 * Note, needs a better helper.
1123 			 */
1124 			unpin_user_pages(&pages[1], nr_pages - 1);
1125 			nr_pages = 1;
1126 		}
1127 	}
1128 
1129 	imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
1130 	if (!imu)
1131 		goto done;
1132 
1133 	ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
1134 	if (ret) {
1135 		unpin_user_pages(pages, nr_pages);
1136 		goto done;
1137 	}
1138 
1139 	off = (unsigned long) iov->iov_base & ~PAGE_MASK;
1140 	size = iov->iov_len;
1141 	/* store original address for later verification */
1142 	imu->ubuf = (unsigned long) iov->iov_base;
1143 	imu->ubuf_end = imu->ubuf + iov->iov_len;
1144 	imu->nr_bvecs = nr_pages;
1145 	*pimu = imu;
1146 	ret = 0;
1147 
1148 	if (folio) {
1149 		bvec_set_page(&imu->bvec[0], pages[0], size, off);
1150 		goto done;
1151 	}
1152 	for (i = 0; i < nr_pages; i++) {
1153 		size_t vec_len;
1154 
1155 		vec_len = min_t(size_t, size, PAGE_SIZE - off);
1156 		bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
1157 		off = 0;
1158 		size -= vec_len;
1159 	}
1160 done:
1161 	if (ret)
1162 		kvfree(imu);
1163 	kvfree(pages);
1164 	return ret;
1165 }
1166 
1167 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1168 {
1169 	ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1170 	return ctx->user_bufs ? 0 : -ENOMEM;
1171 }
1172 
1173 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1174 			    unsigned int nr_args, u64 __user *tags)
1175 {
1176 	struct page *last_hpage = NULL;
1177 	struct io_rsrc_data *data;
1178 	int i, ret;
1179 	struct iovec iov;
1180 
1181 	BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1182 
1183 	if (ctx->user_bufs)
1184 		return -EBUSY;
1185 	if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1186 		return -EINVAL;
1187 	ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
1188 	if (ret)
1189 		return ret;
1190 	ret = io_buffers_map_alloc(ctx, nr_args);
1191 	if (ret) {
1192 		io_rsrc_data_free(data);
1193 		return ret;
1194 	}
1195 
1196 	for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1197 		if (arg) {
1198 			ret = io_copy_iov(ctx, &iov, arg, i);
1199 			if (ret)
1200 				break;
1201 			ret = io_buffer_validate(&iov);
1202 			if (ret)
1203 				break;
1204 		} else {
1205 			memset(&iov, 0, sizeof(iov));
1206 		}
1207 
1208 		if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1209 			ret = -EINVAL;
1210 			break;
1211 		}
1212 
1213 		ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1214 					     &last_hpage);
1215 		if (ret)
1216 			break;
1217 	}
1218 
1219 	WARN_ON_ONCE(ctx->buf_data);
1220 
1221 	ctx->buf_data = data;
1222 	if (ret)
1223 		__io_sqe_buffers_unregister(ctx);
1224 	return ret;
1225 }
1226 
1227 int io_import_fixed(int ddir, struct iov_iter *iter,
1228 			   struct io_mapped_ubuf *imu,
1229 			   u64 buf_addr, size_t len)
1230 {
1231 	u64 buf_end;
1232 	size_t offset;
1233 
1234 	if (WARN_ON_ONCE(!imu))
1235 		return -EFAULT;
1236 	if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1237 		return -EFAULT;
1238 	/* not inside the mapped region */
1239 	if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1240 		return -EFAULT;
1241 
1242 	/*
1243 	 * Might not be a start of buffer, set size appropriately
1244 	 * and advance us to the beginning.
1245 	 */
1246 	offset = buf_addr - imu->ubuf;
1247 	iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1248 
1249 	if (offset) {
1250 		/*
1251 		 * Don't use iov_iter_advance() here, as it's really slow for
1252 		 * using the latter parts of a big fixed buffer - it iterates
1253 		 * over each segment manually. We can cheat a bit here, because
1254 		 * we know that:
1255 		 *
1256 		 * 1) it's a BVEC iter, we set it up
1257 		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1258 		 *    first and last bvec
1259 		 *
1260 		 * So just find our index, and adjust the iterator afterwards.
1261 		 * If the offset is within the first bvec (or the whole first
1262 		 * bvec, just use iov_iter_advance(). This makes it easier
1263 		 * since we can just skip the first segment, which may not
1264 		 * be PAGE_SIZE aligned.
1265 		 */
1266 		const struct bio_vec *bvec = imu->bvec;
1267 
1268 		if (offset <= bvec->bv_len) {
1269 			/*
1270 			 * Note, huge pages buffers consists of one large
1271 			 * bvec entry and should always go this way. The other
1272 			 * branch doesn't expect non PAGE_SIZE'd chunks.
1273 			 */
1274 			iter->bvec = bvec;
1275 			iter->nr_segs = bvec->bv_len;
1276 			iter->count -= offset;
1277 			iter->iov_offset = offset;
1278 		} else {
1279 			unsigned long seg_skip;
1280 
1281 			/* skip first vec */
1282 			offset -= bvec->bv_len;
1283 			seg_skip = 1 + (offset >> PAGE_SHIFT);
1284 
1285 			iter->bvec = bvec + seg_skip;
1286 			iter->nr_segs -= seg_skip;
1287 			iter->count -= bvec->bv_len + offset;
1288 			iter->iov_offset = offset & ~PAGE_MASK;
1289 		}
1290 	}
1291 
1292 	return 0;
1293 }
1294