xref: /openbmc/linux/io_uring/rsrc.c (revision 26147da3)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/errno.h>
4 #include <linux/fs.h>
5 #include <linux/file.h>
6 #include <linux/mm.h>
7 #include <linux/slab.h>
8 #include <linux/nospec.h>
9 #include <linux/hugetlb.h>
10 #include <linux/compat.h>
11 #include <linux/io_uring.h>
12 
13 #include <uapi/linux/io_uring.h>
14 
15 #include "io_uring.h"
16 #include "openclose.h"
17 #include "rsrc.h"
18 
19 struct io_rsrc_update {
20 	struct file			*file;
21 	u64				arg;
22 	u32				nr_args;
23 	u32				offset;
24 };
25 
26 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
27 				  struct io_mapped_ubuf **pimu,
28 				  struct page **last_hpage);
29 
30 /* only define max */
31 #define IORING_MAX_FIXED_FILES	(1U << 20)
32 #define IORING_MAX_REG_BUFFERS	(1U << 14)
33 
34 int __io_account_mem(struct user_struct *user, unsigned long nr_pages)
35 {
36 	unsigned long page_limit, cur_pages, new_pages;
37 
38 	if (!nr_pages)
39 		return 0;
40 
41 	/* Don't allow more pages than we can safely lock */
42 	page_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
43 
44 	cur_pages = atomic_long_read(&user->locked_vm);
45 	do {
46 		new_pages = cur_pages + nr_pages;
47 		if (new_pages > page_limit)
48 			return -ENOMEM;
49 	} while (!atomic_long_try_cmpxchg(&user->locked_vm,
50 					  &cur_pages, new_pages));
51 	return 0;
52 }
53 
54 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
55 {
56 	if (ctx->user)
57 		__io_unaccount_mem(ctx->user, nr_pages);
58 
59 	if (ctx->mm_account)
60 		atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm);
61 }
62 
63 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages)
64 {
65 	int ret;
66 
67 	if (ctx->user) {
68 		ret = __io_account_mem(ctx->user, nr_pages);
69 		if (ret)
70 			return ret;
71 	}
72 
73 	if (ctx->mm_account)
74 		atomic64_add(nr_pages, &ctx->mm_account->pinned_vm);
75 
76 	return 0;
77 }
78 
79 static int io_copy_iov(struct io_ring_ctx *ctx, struct iovec *dst,
80 		       void __user *arg, unsigned index)
81 {
82 	struct iovec __user *src;
83 
84 #ifdef CONFIG_COMPAT
85 	if (ctx->compat) {
86 		struct compat_iovec __user *ciovs;
87 		struct compat_iovec ciov;
88 
89 		ciovs = (struct compat_iovec __user *) arg;
90 		if (copy_from_user(&ciov, &ciovs[index], sizeof(ciov)))
91 			return -EFAULT;
92 
93 		dst->iov_base = u64_to_user_ptr((u64)ciov.iov_base);
94 		dst->iov_len = ciov.iov_len;
95 		return 0;
96 	}
97 #endif
98 	src = (struct iovec __user *) arg;
99 	if (copy_from_user(dst, &src[index], sizeof(*dst)))
100 		return -EFAULT;
101 	return 0;
102 }
103 
104 static int io_buffer_validate(struct iovec *iov)
105 {
106 	unsigned long tmp, acct_len = iov->iov_len + (PAGE_SIZE - 1);
107 
108 	/*
109 	 * Don't impose further limits on the size and buffer
110 	 * constraints here, we'll -EINVAL later when IO is
111 	 * submitted if they are wrong.
112 	 */
113 	if (!iov->iov_base)
114 		return iov->iov_len ? -EFAULT : 0;
115 	if (!iov->iov_len)
116 		return -EFAULT;
117 
118 	/* arbitrary limit, but we need something */
119 	if (iov->iov_len > SZ_1G)
120 		return -EFAULT;
121 
122 	if (check_add_overflow((unsigned long)iov->iov_base, acct_len, &tmp))
123 		return -EOVERFLOW;
124 
125 	return 0;
126 }
127 
128 static void io_buffer_unmap(struct io_ring_ctx *ctx, struct io_mapped_ubuf **slot)
129 {
130 	struct io_mapped_ubuf *imu = *slot;
131 	unsigned int i;
132 
133 	if (imu != ctx->dummy_ubuf) {
134 		for (i = 0; i < imu->nr_bvecs; i++)
135 			unpin_user_page(imu->bvec[i].bv_page);
136 		if (imu->acct_pages)
137 			io_unaccount_mem(ctx, imu->acct_pages);
138 		kvfree(imu);
139 	}
140 	*slot = NULL;
141 }
142 
143 static void io_rsrc_put_work_one(struct io_rsrc_data *rsrc_data,
144 				 struct io_rsrc_put *prsrc)
145 {
146 	struct io_ring_ctx *ctx = rsrc_data->ctx;
147 
148 	if (prsrc->tag)
149 		io_post_aux_cqe(ctx, prsrc->tag, 0, 0);
150 	rsrc_data->do_put(ctx, prsrc);
151 }
152 
153 static void __io_rsrc_put_work(struct io_rsrc_node *ref_node)
154 {
155 	struct io_rsrc_data *rsrc_data = ref_node->rsrc_data;
156 
157 	if (likely(!ref_node->empty))
158 		io_rsrc_put_work_one(rsrc_data, &ref_node->item);
159 
160 	io_rsrc_node_destroy(rsrc_data->ctx, ref_node);
161 }
162 
163 void io_rsrc_node_destroy(struct io_ring_ctx *ctx, struct io_rsrc_node *node)
164 {
165 	if (!io_alloc_cache_put(&ctx->rsrc_node_cache, &node->cache))
166 		kfree(node);
167 }
168 
169 void io_rsrc_node_ref_zero(struct io_rsrc_node *node)
170 	__must_hold(&node->rsrc_data->ctx->uring_lock)
171 {
172 	struct io_ring_ctx *ctx = node->rsrc_data->ctx;
173 
174 	while (!list_empty(&ctx->rsrc_ref_list)) {
175 		node = list_first_entry(&ctx->rsrc_ref_list,
176 					    struct io_rsrc_node, node);
177 		/* recycle ref nodes in order */
178 		if (node->refs)
179 			break;
180 		list_del(&node->node);
181 		__io_rsrc_put_work(node);
182 	}
183 	if (list_empty(&ctx->rsrc_ref_list) && unlikely(ctx->rsrc_quiesce))
184 		wake_up_all(&ctx->rsrc_quiesce_wq);
185 }
186 
187 struct io_rsrc_node *io_rsrc_node_alloc(struct io_ring_ctx *ctx)
188 {
189 	struct io_rsrc_node *ref_node;
190 	struct io_cache_entry *entry;
191 
192 	entry = io_alloc_cache_get(&ctx->rsrc_node_cache);
193 	if (entry) {
194 		ref_node = container_of(entry, struct io_rsrc_node, cache);
195 	} else {
196 		ref_node = kzalloc(sizeof(*ref_node), GFP_KERNEL);
197 		if (!ref_node)
198 			return NULL;
199 	}
200 
201 	ref_node->rsrc_data = NULL;
202 	ref_node->empty = 0;
203 	ref_node->refs = 1;
204 	return ref_node;
205 }
206 
207 __cold static int io_rsrc_ref_quiesce(struct io_rsrc_data *data,
208 				      struct io_ring_ctx *ctx)
209 {
210 	struct io_rsrc_node *backup;
211 	DEFINE_WAIT(we);
212 	int ret;
213 
214 	/* As We may drop ->uring_lock, other task may have started quiesce */
215 	if (data->quiesce)
216 		return -ENXIO;
217 
218 	backup = io_rsrc_node_alloc(ctx);
219 	if (!backup)
220 		return -ENOMEM;
221 	ctx->rsrc_node->empty = true;
222 	ctx->rsrc_node->rsrc_data = data;
223 	list_add_tail(&ctx->rsrc_node->node, &ctx->rsrc_ref_list);
224 	io_put_rsrc_node(ctx, ctx->rsrc_node);
225 	ctx->rsrc_node = backup;
226 
227 	if (list_empty(&ctx->rsrc_ref_list))
228 		return 0;
229 
230 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
231 		atomic_set(&ctx->cq_wait_nr, 1);
232 		smp_mb();
233 	}
234 
235 	ctx->rsrc_quiesce++;
236 	data->quiesce = true;
237 	do {
238 		prepare_to_wait(&ctx->rsrc_quiesce_wq, &we, TASK_INTERRUPTIBLE);
239 		mutex_unlock(&ctx->uring_lock);
240 
241 		ret = io_run_task_work_sig(ctx);
242 		if (ret < 0) {
243 			mutex_lock(&ctx->uring_lock);
244 			if (list_empty(&ctx->rsrc_ref_list))
245 				ret = 0;
246 			break;
247 		}
248 
249 		schedule();
250 		__set_current_state(TASK_RUNNING);
251 		mutex_lock(&ctx->uring_lock);
252 		ret = 0;
253 	} while (!list_empty(&ctx->rsrc_ref_list));
254 
255 	finish_wait(&ctx->rsrc_quiesce_wq, &we);
256 	data->quiesce = false;
257 	ctx->rsrc_quiesce--;
258 
259 	if (ctx->flags & IORING_SETUP_DEFER_TASKRUN) {
260 		atomic_set(&ctx->cq_wait_nr, 0);
261 		smp_mb();
262 	}
263 	return ret;
264 }
265 
266 static void io_free_page_table(void **table, size_t size)
267 {
268 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
269 
270 	for (i = 0; i < nr_tables; i++)
271 		kfree(table[i]);
272 	kfree(table);
273 }
274 
275 static void io_rsrc_data_free(struct io_rsrc_data *data)
276 {
277 	size_t size = data->nr * sizeof(data->tags[0][0]);
278 
279 	if (data->tags)
280 		io_free_page_table((void **)data->tags, size);
281 	kfree(data);
282 }
283 
284 static __cold void **io_alloc_page_table(size_t size)
285 {
286 	unsigned i, nr_tables = DIV_ROUND_UP(size, PAGE_SIZE);
287 	size_t init_size = size;
288 	void **table;
289 
290 	table = kcalloc(nr_tables, sizeof(*table), GFP_KERNEL_ACCOUNT);
291 	if (!table)
292 		return NULL;
293 
294 	for (i = 0; i < nr_tables; i++) {
295 		unsigned int this_size = min_t(size_t, size, PAGE_SIZE);
296 
297 		table[i] = kzalloc(this_size, GFP_KERNEL_ACCOUNT);
298 		if (!table[i]) {
299 			io_free_page_table(table, init_size);
300 			return NULL;
301 		}
302 		size -= this_size;
303 	}
304 	return table;
305 }
306 
307 __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx,
308 				     rsrc_put_fn *do_put, u64 __user *utags,
309 				     unsigned nr, struct io_rsrc_data **pdata)
310 {
311 	struct io_rsrc_data *data;
312 	int ret = 0;
313 	unsigned i;
314 
315 	data = kzalloc(sizeof(*data), GFP_KERNEL);
316 	if (!data)
317 		return -ENOMEM;
318 	data->tags = (u64 **)io_alloc_page_table(nr * sizeof(data->tags[0][0]));
319 	if (!data->tags) {
320 		kfree(data);
321 		return -ENOMEM;
322 	}
323 
324 	data->nr = nr;
325 	data->ctx = ctx;
326 	data->do_put = do_put;
327 	if (utags) {
328 		ret = -EFAULT;
329 		for (i = 0; i < nr; i++) {
330 			u64 *tag_slot = io_get_tag_slot(data, i);
331 
332 			if (copy_from_user(tag_slot, &utags[i],
333 					   sizeof(*tag_slot)))
334 				goto fail;
335 		}
336 	}
337 	*pdata = data;
338 	return 0;
339 fail:
340 	io_rsrc_data_free(data);
341 	return ret;
342 }
343 
344 static int __io_sqe_files_update(struct io_ring_ctx *ctx,
345 				 struct io_uring_rsrc_update2 *up,
346 				 unsigned nr_args)
347 {
348 	u64 __user *tags = u64_to_user_ptr(up->tags);
349 	__s32 __user *fds = u64_to_user_ptr(up->data);
350 	struct io_rsrc_data *data = ctx->file_data;
351 	struct io_fixed_file *file_slot;
352 	struct file *file;
353 	int fd, i, err = 0;
354 	unsigned int done;
355 
356 	if (!ctx->file_data)
357 		return -ENXIO;
358 	if (up->offset + nr_args > ctx->nr_user_files)
359 		return -EINVAL;
360 
361 	for (done = 0; done < nr_args; done++) {
362 		u64 tag = 0;
363 
364 		if ((tags && copy_from_user(&tag, &tags[done], sizeof(tag))) ||
365 		    copy_from_user(&fd, &fds[done], sizeof(fd))) {
366 			err = -EFAULT;
367 			break;
368 		}
369 		if ((fd == IORING_REGISTER_FILES_SKIP || fd == -1) && tag) {
370 			err = -EINVAL;
371 			break;
372 		}
373 		if (fd == IORING_REGISTER_FILES_SKIP)
374 			continue;
375 
376 		i = array_index_nospec(up->offset + done, ctx->nr_user_files);
377 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
378 
379 		if (file_slot->file_ptr) {
380 			file = (struct file *)(file_slot->file_ptr & FFS_MASK);
381 			err = io_queue_rsrc_removal(data, i, file);
382 			if (err)
383 				break;
384 			file_slot->file_ptr = 0;
385 			io_file_bitmap_clear(&ctx->file_table, i);
386 		}
387 		if (fd != -1) {
388 			file = fget(fd);
389 			if (!file) {
390 				err = -EBADF;
391 				break;
392 			}
393 			/*
394 			 * Don't allow io_uring instances to be registered. If
395 			 * UNIX isn't enabled, then this causes a reference
396 			 * cycle and this instance can never get freed. If UNIX
397 			 * is enabled we'll handle it just fine, but there's
398 			 * still no point in allowing a ring fd as it doesn't
399 			 * support regular read/write anyway.
400 			 */
401 			if (io_is_uring_fops(file)) {
402 				fput(file);
403 				err = -EBADF;
404 				break;
405 			}
406 			err = io_scm_file_account(ctx, file);
407 			if (err) {
408 				fput(file);
409 				break;
410 			}
411 			*io_get_tag_slot(data, i) = tag;
412 			io_fixed_file_set(file_slot, file);
413 			io_file_bitmap_set(&ctx->file_table, i);
414 		}
415 	}
416 	return done ? done : err;
417 }
418 
419 static int __io_sqe_buffers_update(struct io_ring_ctx *ctx,
420 				   struct io_uring_rsrc_update2 *up,
421 				   unsigned int nr_args)
422 {
423 	u64 __user *tags = u64_to_user_ptr(up->tags);
424 	struct iovec iov, __user *iovs = u64_to_user_ptr(up->data);
425 	struct page *last_hpage = NULL;
426 	__u32 done;
427 	int i, err;
428 
429 	if (!ctx->buf_data)
430 		return -ENXIO;
431 	if (up->offset + nr_args > ctx->nr_user_bufs)
432 		return -EINVAL;
433 
434 	for (done = 0; done < nr_args; done++) {
435 		struct io_mapped_ubuf *imu;
436 		u64 tag = 0;
437 
438 		err = io_copy_iov(ctx, &iov, iovs, done);
439 		if (err)
440 			break;
441 		if (tags && copy_from_user(&tag, &tags[done], sizeof(tag))) {
442 			err = -EFAULT;
443 			break;
444 		}
445 		err = io_buffer_validate(&iov);
446 		if (err)
447 			break;
448 		if (!iov.iov_base && tag) {
449 			err = -EINVAL;
450 			break;
451 		}
452 		err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
453 		if (err)
454 			break;
455 
456 		i = array_index_nospec(up->offset + done, ctx->nr_user_bufs);
457 		if (ctx->user_bufs[i] != ctx->dummy_ubuf) {
458 			err = io_queue_rsrc_removal(ctx->buf_data, i,
459 						    ctx->user_bufs[i]);
460 			if (unlikely(err)) {
461 				io_buffer_unmap(ctx, &imu);
462 				break;
463 			}
464 			ctx->user_bufs[i] = ctx->dummy_ubuf;
465 		}
466 
467 		ctx->user_bufs[i] = imu;
468 		*io_get_tag_slot(ctx->buf_data, i) = tag;
469 	}
470 	return done ? done : err;
471 }
472 
473 static int __io_register_rsrc_update(struct io_ring_ctx *ctx, unsigned type,
474 				     struct io_uring_rsrc_update2 *up,
475 				     unsigned nr_args)
476 {
477 	__u32 tmp;
478 
479 	lockdep_assert_held(&ctx->uring_lock);
480 
481 	if (check_add_overflow(up->offset, nr_args, &tmp))
482 		return -EOVERFLOW;
483 
484 	switch (type) {
485 	case IORING_RSRC_FILE:
486 		return __io_sqe_files_update(ctx, up, nr_args);
487 	case IORING_RSRC_BUFFER:
488 		return __io_sqe_buffers_update(ctx, up, nr_args);
489 	}
490 	return -EINVAL;
491 }
492 
493 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg,
494 			     unsigned nr_args)
495 {
496 	struct io_uring_rsrc_update2 up;
497 
498 	if (!nr_args)
499 		return -EINVAL;
500 	memset(&up, 0, sizeof(up));
501 	if (copy_from_user(&up, arg, sizeof(struct io_uring_rsrc_update)))
502 		return -EFAULT;
503 	if (up.resv || up.resv2)
504 		return -EINVAL;
505 	return __io_register_rsrc_update(ctx, IORING_RSRC_FILE, &up, nr_args);
506 }
507 
508 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg,
509 			    unsigned size, unsigned type)
510 {
511 	struct io_uring_rsrc_update2 up;
512 
513 	if (size != sizeof(up))
514 		return -EINVAL;
515 	if (copy_from_user(&up, arg, sizeof(up)))
516 		return -EFAULT;
517 	if (!up.nr || up.resv || up.resv2)
518 		return -EINVAL;
519 	return __io_register_rsrc_update(ctx, type, &up, up.nr);
520 }
521 
522 __cold int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg,
523 			    unsigned int size, unsigned int type)
524 {
525 	struct io_uring_rsrc_register rr;
526 
527 	/* keep it extendible */
528 	if (size != sizeof(rr))
529 		return -EINVAL;
530 
531 	memset(&rr, 0, sizeof(rr));
532 	if (copy_from_user(&rr, arg, size))
533 		return -EFAULT;
534 	if (!rr.nr || rr.resv2)
535 		return -EINVAL;
536 	if (rr.flags & ~IORING_RSRC_REGISTER_SPARSE)
537 		return -EINVAL;
538 
539 	switch (type) {
540 	case IORING_RSRC_FILE:
541 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
542 			break;
543 		return io_sqe_files_register(ctx, u64_to_user_ptr(rr.data),
544 					     rr.nr, u64_to_user_ptr(rr.tags));
545 	case IORING_RSRC_BUFFER:
546 		if (rr.flags & IORING_RSRC_REGISTER_SPARSE && rr.data)
547 			break;
548 		return io_sqe_buffers_register(ctx, u64_to_user_ptr(rr.data),
549 					       rr.nr, u64_to_user_ptr(rr.tags));
550 	}
551 	return -EINVAL;
552 }
553 
554 int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
555 {
556 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
557 
558 	if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT)))
559 		return -EINVAL;
560 	if (sqe->rw_flags || sqe->splice_fd_in)
561 		return -EINVAL;
562 
563 	up->offset = READ_ONCE(sqe->off);
564 	up->nr_args = READ_ONCE(sqe->len);
565 	if (!up->nr_args)
566 		return -EINVAL;
567 	up->arg = READ_ONCE(sqe->addr);
568 	return 0;
569 }
570 
571 static int io_files_update_with_index_alloc(struct io_kiocb *req,
572 					    unsigned int issue_flags)
573 {
574 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
575 	__s32 __user *fds = u64_to_user_ptr(up->arg);
576 	unsigned int done;
577 	struct file *file;
578 	int ret, fd;
579 
580 	if (!req->ctx->file_data)
581 		return -ENXIO;
582 
583 	for (done = 0; done < up->nr_args; done++) {
584 		if (copy_from_user(&fd, &fds[done], sizeof(fd))) {
585 			ret = -EFAULT;
586 			break;
587 		}
588 
589 		file = fget(fd);
590 		if (!file) {
591 			ret = -EBADF;
592 			break;
593 		}
594 		ret = io_fixed_fd_install(req, issue_flags, file,
595 					  IORING_FILE_INDEX_ALLOC);
596 		if (ret < 0)
597 			break;
598 		if (copy_to_user(&fds[done], &ret, sizeof(ret))) {
599 			__io_close_fixed(req->ctx, issue_flags, ret);
600 			ret = -EFAULT;
601 			break;
602 		}
603 	}
604 
605 	if (done)
606 		return done;
607 	return ret;
608 }
609 
610 int io_files_update(struct io_kiocb *req, unsigned int issue_flags)
611 {
612 	struct io_rsrc_update *up = io_kiocb_to_cmd(req, struct io_rsrc_update);
613 	struct io_ring_ctx *ctx = req->ctx;
614 	struct io_uring_rsrc_update2 up2;
615 	int ret;
616 
617 	up2.offset = up->offset;
618 	up2.data = up->arg;
619 	up2.nr = 0;
620 	up2.tags = 0;
621 	up2.resv = 0;
622 	up2.resv2 = 0;
623 
624 	if (up->offset == IORING_FILE_INDEX_ALLOC) {
625 		ret = io_files_update_with_index_alloc(req, issue_flags);
626 	} else {
627 		io_ring_submit_lock(ctx, issue_flags);
628 		ret = __io_register_rsrc_update(ctx, IORING_RSRC_FILE,
629 						&up2, up->nr_args);
630 		io_ring_submit_unlock(ctx, issue_flags);
631 	}
632 
633 	if (ret < 0)
634 		req_set_fail(req);
635 	io_req_set_res(req, ret, 0);
636 	return IOU_OK;
637 }
638 
639 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, void *rsrc)
640 {
641 	struct io_ring_ctx *ctx = data->ctx;
642 	struct io_rsrc_node *node = ctx->rsrc_node;
643 	u64 *tag_slot = io_get_tag_slot(data, idx);
644 
645 	ctx->rsrc_node = io_rsrc_node_alloc(ctx);
646 	if (unlikely(!ctx->rsrc_node)) {
647 		ctx->rsrc_node = node;
648 		return -ENOMEM;
649 	}
650 
651 	node->item.rsrc = rsrc;
652 	node->item.tag = *tag_slot;
653 	*tag_slot = 0;
654 
655 	node->rsrc_data = data;
656 	list_add_tail(&node->node, &ctx->rsrc_ref_list);
657 	io_put_rsrc_node(ctx, node);
658 	return 0;
659 }
660 
661 void __io_sqe_files_unregister(struct io_ring_ctx *ctx)
662 {
663 	int i;
664 
665 	for (i = 0; i < ctx->nr_user_files; i++) {
666 		struct file *file = io_file_from_index(&ctx->file_table, i);
667 
668 		/* skip scm accounted files, they'll be freed by ->ring_sock */
669 		if (!file || io_file_need_scm(file))
670 			continue;
671 		io_file_bitmap_clear(&ctx->file_table, i);
672 		fput(file);
673 	}
674 
675 #if defined(CONFIG_UNIX)
676 	if (ctx->ring_sock) {
677 		struct sock *sock = ctx->ring_sock->sk;
678 		struct sk_buff *skb;
679 
680 		while ((skb = skb_dequeue(&sock->sk_receive_queue)) != NULL)
681 			kfree_skb(skb);
682 	}
683 #endif
684 	io_free_file_tables(&ctx->file_table);
685 	io_file_table_set_alloc_range(ctx, 0, 0);
686 	io_rsrc_data_free(ctx->file_data);
687 	ctx->file_data = NULL;
688 	ctx->nr_user_files = 0;
689 }
690 
691 int io_sqe_files_unregister(struct io_ring_ctx *ctx)
692 {
693 	unsigned nr = ctx->nr_user_files;
694 	int ret;
695 
696 	if (!ctx->file_data)
697 		return -ENXIO;
698 
699 	/*
700 	 * Quiesce may unlock ->uring_lock, and while it's not held
701 	 * prevent new requests using the table.
702 	 */
703 	ctx->nr_user_files = 0;
704 	ret = io_rsrc_ref_quiesce(ctx->file_data, ctx);
705 	ctx->nr_user_files = nr;
706 	if (!ret)
707 		__io_sqe_files_unregister(ctx);
708 	return ret;
709 }
710 
711 /*
712  * Ensure the UNIX gc is aware of our file set, so we are certain that
713  * the io_uring can be safely unregistered on process exit, even if we have
714  * loops in the file referencing. We account only files that can hold other
715  * files because otherwise they can't form a loop and so are not interesting
716  * for GC.
717  */
718 int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file)
719 {
720 #if defined(CONFIG_UNIX)
721 	struct sock *sk = ctx->ring_sock->sk;
722 	struct sk_buff_head *head = &sk->sk_receive_queue;
723 	struct scm_fp_list *fpl;
724 	struct sk_buff *skb;
725 
726 	if (likely(!io_file_need_scm(file)))
727 		return 0;
728 
729 	/*
730 	 * See if we can merge this file into an existing skb SCM_RIGHTS
731 	 * file set. If there's no room, fall back to allocating a new skb
732 	 * and filling it in.
733 	 */
734 	spin_lock_irq(&head->lock);
735 	skb = skb_peek(head);
736 	if (skb && UNIXCB(skb).fp->count < SCM_MAX_FD)
737 		__skb_unlink(skb, head);
738 	else
739 		skb = NULL;
740 	spin_unlock_irq(&head->lock);
741 
742 	if (!skb) {
743 		fpl = kzalloc(sizeof(*fpl), GFP_KERNEL);
744 		if (!fpl)
745 			return -ENOMEM;
746 
747 		skb = alloc_skb(0, GFP_KERNEL);
748 		if (!skb) {
749 			kfree(fpl);
750 			return -ENOMEM;
751 		}
752 
753 		fpl->user = get_uid(current_user());
754 		fpl->max = SCM_MAX_FD;
755 		fpl->count = 0;
756 
757 		UNIXCB(skb).fp = fpl;
758 		skb->sk = sk;
759 		skb->scm_io_uring = 1;
760 		skb->destructor = unix_destruct_scm;
761 		refcount_add(skb->truesize, &sk->sk_wmem_alloc);
762 	}
763 
764 	fpl = UNIXCB(skb).fp;
765 	fpl->fp[fpl->count++] = get_file(file);
766 	unix_inflight(fpl->user, file);
767 	skb_queue_head(head, skb);
768 	fput(file);
769 #endif
770 	return 0;
771 }
772 
773 static __cold void io_rsrc_file_scm_put(struct io_ring_ctx *ctx, struct file *file)
774 {
775 #if defined(CONFIG_UNIX)
776 	struct sock *sock = ctx->ring_sock->sk;
777 	struct sk_buff_head list, *head = &sock->sk_receive_queue;
778 	struct sk_buff *skb;
779 	int i;
780 
781 	__skb_queue_head_init(&list);
782 
783 	/*
784 	 * Find the skb that holds this file in its SCM_RIGHTS. When found,
785 	 * remove this entry and rearrange the file array.
786 	 */
787 	skb = skb_dequeue(head);
788 	while (skb) {
789 		struct scm_fp_list *fp;
790 
791 		fp = UNIXCB(skb).fp;
792 		for (i = 0; i < fp->count; i++) {
793 			int left;
794 
795 			if (fp->fp[i] != file)
796 				continue;
797 
798 			unix_notinflight(fp->user, fp->fp[i]);
799 			left = fp->count - 1 - i;
800 			if (left) {
801 				memmove(&fp->fp[i], &fp->fp[i + 1],
802 						left * sizeof(struct file *));
803 			}
804 			fp->count--;
805 			if (!fp->count) {
806 				kfree_skb(skb);
807 				skb = NULL;
808 			} else {
809 				__skb_queue_tail(&list, skb);
810 			}
811 			fput(file);
812 			file = NULL;
813 			break;
814 		}
815 
816 		if (!file)
817 			break;
818 
819 		__skb_queue_tail(&list, skb);
820 
821 		skb = skb_dequeue(head);
822 	}
823 
824 	if (skb_peek(&list)) {
825 		spin_lock_irq(&head->lock);
826 		while ((skb = __skb_dequeue(&list)) != NULL)
827 			__skb_queue_tail(head, skb);
828 		spin_unlock_irq(&head->lock);
829 	}
830 #endif
831 }
832 
833 static void io_rsrc_file_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
834 {
835 	struct file *file = prsrc->file;
836 
837 	if (likely(!io_file_need_scm(file)))
838 		fput(file);
839 	else
840 		io_rsrc_file_scm_put(ctx, file);
841 }
842 
843 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg,
844 			  unsigned nr_args, u64 __user *tags)
845 {
846 	__s32 __user *fds = (__s32 __user *) arg;
847 	struct file *file;
848 	int fd, ret;
849 	unsigned i;
850 
851 	if (ctx->file_data)
852 		return -EBUSY;
853 	if (!nr_args)
854 		return -EINVAL;
855 	if (nr_args > IORING_MAX_FIXED_FILES)
856 		return -EMFILE;
857 	if (nr_args > rlimit(RLIMIT_NOFILE))
858 		return -EMFILE;
859 	ret = io_rsrc_data_alloc(ctx, io_rsrc_file_put, tags, nr_args,
860 				 &ctx->file_data);
861 	if (ret)
862 		return ret;
863 
864 	if (!io_alloc_file_tables(&ctx->file_table, nr_args)) {
865 		io_rsrc_data_free(ctx->file_data);
866 		ctx->file_data = NULL;
867 		return -ENOMEM;
868 	}
869 
870 	for (i = 0; i < nr_args; i++, ctx->nr_user_files++) {
871 		struct io_fixed_file *file_slot;
872 
873 		if (fds && copy_from_user(&fd, &fds[i], sizeof(fd))) {
874 			ret = -EFAULT;
875 			goto fail;
876 		}
877 		/* allow sparse sets */
878 		if (!fds || fd == -1) {
879 			ret = -EINVAL;
880 			if (unlikely(*io_get_tag_slot(ctx->file_data, i)))
881 				goto fail;
882 			continue;
883 		}
884 
885 		file = fget(fd);
886 		ret = -EBADF;
887 		if (unlikely(!file))
888 			goto fail;
889 
890 		/*
891 		 * Don't allow io_uring instances to be registered. If UNIX
892 		 * isn't enabled, then this causes a reference cycle and this
893 		 * instance can never get freed. If UNIX is enabled we'll
894 		 * handle it just fine, but there's still no point in allowing
895 		 * a ring fd as it doesn't support regular read/write anyway.
896 		 */
897 		if (io_is_uring_fops(file)) {
898 			fput(file);
899 			goto fail;
900 		}
901 		ret = io_scm_file_account(ctx, file);
902 		if (ret) {
903 			fput(file);
904 			goto fail;
905 		}
906 		file_slot = io_fixed_file_slot(&ctx->file_table, i);
907 		io_fixed_file_set(file_slot, file);
908 		io_file_bitmap_set(&ctx->file_table, i);
909 	}
910 
911 	/* default it to the whole table */
912 	io_file_table_set_alloc_range(ctx, 0, ctx->nr_user_files);
913 	return 0;
914 fail:
915 	__io_sqe_files_unregister(ctx);
916 	return ret;
917 }
918 
919 static void io_rsrc_buf_put(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc)
920 {
921 	io_buffer_unmap(ctx, &prsrc->buf);
922 	prsrc->buf = NULL;
923 }
924 
925 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
926 {
927 	unsigned int i;
928 
929 	for (i = 0; i < ctx->nr_user_bufs; i++)
930 		io_buffer_unmap(ctx, &ctx->user_bufs[i]);
931 	kfree(ctx->user_bufs);
932 	io_rsrc_data_free(ctx->buf_data);
933 	ctx->user_bufs = NULL;
934 	ctx->buf_data = NULL;
935 	ctx->nr_user_bufs = 0;
936 }
937 
938 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx)
939 {
940 	unsigned nr = ctx->nr_user_bufs;
941 	int ret;
942 
943 	if (!ctx->buf_data)
944 		return -ENXIO;
945 
946 	/*
947 	 * Quiesce may unlock ->uring_lock, and while it's not held
948 	 * prevent new requests using the table.
949 	 */
950 	ctx->nr_user_bufs = 0;
951 	ret = io_rsrc_ref_quiesce(ctx->buf_data, ctx);
952 	ctx->nr_user_bufs = nr;
953 	if (!ret)
954 		__io_sqe_buffers_unregister(ctx);
955 	return ret;
956 }
957 
958 /*
959  * Not super efficient, but this is just a registration time. And we do cache
960  * the last compound head, so generally we'll only do a full search if we don't
961  * match that one.
962  *
963  * We check if the given compound head page has already been accounted, to
964  * avoid double accounting it. This allows us to account the full size of the
965  * page, not just the constituent pages of a huge page.
966  */
967 static bool headpage_already_acct(struct io_ring_ctx *ctx, struct page **pages,
968 				  int nr_pages, struct page *hpage)
969 {
970 	int i, j;
971 
972 	/* check current page array */
973 	for (i = 0; i < nr_pages; i++) {
974 		if (!PageCompound(pages[i]))
975 			continue;
976 		if (compound_head(pages[i]) == hpage)
977 			return true;
978 	}
979 
980 	/* check previously registered pages */
981 	for (i = 0; i < ctx->nr_user_bufs; i++) {
982 		struct io_mapped_ubuf *imu = ctx->user_bufs[i];
983 
984 		for (j = 0; j < imu->nr_bvecs; j++) {
985 			if (!PageCompound(imu->bvec[j].bv_page))
986 				continue;
987 			if (compound_head(imu->bvec[j].bv_page) == hpage)
988 				return true;
989 		}
990 	}
991 
992 	return false;
993 }
994 
995 static int io_buffer_account_pin(struct io_ring_ctx *ctx, struct page **pages,
996 				 int nr_pages, struct io_mapped_ubuf *imu,
997 				 struct page **last_hpage)
998 {
999 	int i, ret;
1000 
1001 	imu->acct_pages = 0;
1002 	for (i = 0; i < nr_pages; i++) {
1003 		if (!PageCompound(pages[i])) {
1004 			imu->acct_pages++;
1005 		} else {
1006 			struct page *hpage;
1007 
1008 			hpage = compound_head(pages[i]);
1009 			if (hpage == *last_hpage)
1010 				continue;
1011 			*last_hpage = hpage;
1012 			if (headpage_already_acct(ctx, pages, i, hpage))
1013 				continue;
1014 			imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
1015 		}
1016 	}
1017 
1018 	if (!imu->acct_pages)
1019 		return 0;
1020 
1021 	ret = io_account_mem(ctx, imu->acct_pages);
1022 	if (ret)
1023 		imu->acct_pages = 0;
1024 	return ret;
1025 }
1026 
1027 struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages)
1028 {
1029 	unsigned long start, end, nr_pages;
1030 	struct vm_area_struct **vmas = NULL;
1031 	struct page **pages = NULL;
1032 	int i, pret, ret = -ENOMEM;
1033 
1034 	end = (ubuf + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1035 	start = ubuf >> PAGE_SHIFT;
1036 	nr_pages = end - start;
1037 
1038 	pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL);
1039 	if (!pages)
1040 		goto done;
1041 
1042 	vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *),
1043 			      GFP_KERNEL);
1044 	if (!vmas)
1045 		goto done;
1046 
1047 	ret = 0;
1048 	mmap_read_lock(current->mm);
1049 	pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM,
1050 			      pages, vmas);
1051 	if (pret == nr_pages) {
1052 		struct file *file = vmas[0]->vm_file;
1053 
1054 		/* don't support file backed memory */
1055 		for (i = 0; i < nr_pages; i++) {
1056 			if (vmas[i]->vm_file != file) {
1057 				ret = -EINVAL;
1058 				break;
1059 			}
1060 			if (!file)
1061 				continue;
1062 			if (!vma_is_shmem(vmas[i]) && !is_file_hugepages(file)) {
1063 				ret = -EOPNOTSUPP;
1064 				break;
1065 			}
1066 		}
1067 		*npages = nr_pages;
1068 	} else {
1069 		ret = pret < 0 ? pret : -EFAULT;
1070 	}
1071 	mmap_read_unlock(current->mm);
1072 	if (ret) {
1073 		/*
1074 		 * if we did partial map, or found file backed vmas,
1075 		 * release any pages we did get
1076 		 */
1077 		if (pret > 0)
1078 			unpin_user_pages(pages, pret);
1079 		goto done;
1080 	}
1081 	ret = 0;
1082 done:
1083 	kvfree(vmas);
1084 	if (ret < 0) {
1085 		kvfree(pages);
1086 		pages = ERR_PTR(ret);
1087 	}
1088 	return pages;
1089 }
1090 
1091 static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov,
1092 				  struct io_mapped_ubuf **pimu,
1093 				  struct page **last_hpage)
1094 {
1095 	struct io_mapped_ubuf *imu = NULL;
1096 	struct page **pages = NULL;
1097 	unsigned long off;
1098 	size_t size;
1099 	int ret, nr_pages, i;
1100 	struct folio *folio = NULL;
1101 
1102 	*pimu = ctx->dummy_ubuf;
1103 	if (!iov->iov_base)
1104 		return 0;
1105 
1106 	ret = -ENOMEM;
1107 	pages = io_pin_pages((unsigned long) iov->iov_base, iov->iov_len,
1108 				&nr_pages);
1109 	if (IS_ERR(pages)) {
1110 		ret = PTR_ERR(pages);
1111 		pages = NULL;
1112 		goto done;
1113 	}
1114 
1115 	/* If it's a huge page, try to coalesce them into a single bvec entry */
1116 	if (nr_pages > 1) {
1117 		folio = page_folio(pages[0]);
1118 		for (i = 1; i < nr_pages; i++) {
1119 			if (page_folio(pages[i]) != folio) {
1120 				folio = NULL;
1121 				break;
1122 			}
1123 		}
1124 		if (folio) {
1125 			/*
1126 			 * The pages are bound to the folio, it doesn't
1127 			 * actually unpin them but drops all but one reference,
1128 			 * which is usually put down by io_buffer_unmap().
1129 			 * Note, needs a better helper.
1130 			 */
1131 			unpin_user_pages(&pages[1], nr_pages - 1);
1132 			nr_pages = 1;
1133 		}
1134 	}
1135 
1136 	imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
1137 	if (!imu)
1138 		goto done;
1139 
1140 	ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
1141 	if (ret) {
1142 		unpin_user_pages(pages, nr_pages);
1143 		goto done;
1144 	}
1145 
1146 	off = (unsigned long) iov->iov_base & ~PAGE_MASK;
1147 	size = iov->iov_len;
1148 	/* store original address for later verification */
1149 	imu->ubuf = (unsigned long) iov->iov_base;
1150 	imu->ubuf_end = imu->ubuf + iov->iov_len;
1151 	imu->nr_bvecs = nr_pages;
1152 	*pimu = imu;
1153 	ret = 0;
1154 
1155 	if (folio) {
1156 		bvec_set_page(&imu->bvec[0], pages[0], size, off);
1157 		goto done;
1158 	}
1159 	for (i = 0; i < nr_pages; i++) {
1160 		size_t vec_len;
1161 
1162 		vec_len = min_t(size_t, size, PAGE_SIZE - off);
1163 		bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
1164 		off = 0;
1165 		size -= vec_len;
1166 	}
1167 done:
1168 	if (ret)
1169 		kvfree(imu);
1170 	kvfree(pages);
1171 	return ret;
1172 }
1173 
1174 static int io_buffers_map_alloc(struct io_ring_ctx *ctx, unsigned int nr_args)
1175 {
1176 	ctx->user_bufs = kcalloc(nr_args, sizeof(*ctx->user_bufs), GFP_KERNEL);
1177 	return ctx->user_bufs ? 0 : -ENOMEM;
1178 }
1179 
1180 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg,
1181 			    unsigned int nr_args, u64 __user *tags)
1182 {
1183 	struct page *last_hpage = NULL;
1184 	struct io_rsrc_data *data;
1185 	int i, ret;
1186 	struct iovec iov;
1187 
1188 	BUILD_BUG_ON(IORING_MAX_REG_BUFFERS >= (1u << 16));
1189 
1190 	if (ctx->user_bufs)
1191 		return -EBUSY;
1192 	if (!nr_args || nr_args > IORING_MAX_REG_BUFFERS)
1193 		return -EINVAL;
1194 	ret = io_rsrc_data_alloc(ctx, io_rsrc_buf_put, tags, nr_args, &data);
1195 	if (ret)
1196 		return ret;
1197 	ret = io_buffers_map_alloc(ctx, nr_args);
1198 	if (ret) {
1199 		io_rsrc_data_free(data);
1200 		return ret;
1201 	}
1202 
1203 	for (i = 0; i < nr_args; i++, ctx->nr_user_bufs++) {
1204 		if (arg) {
1205 			ret = io_copy_iov(ctx, &iov, arg, i);
1206 			if (ret)
1207 				break;
1208 			ret = io_buffer_validate(&iov);
1209 			if (ret)
1210 				break;
1211 		} else {
1212 			memset(&iov, 0, sizeof(iov));
1213 		}
1214 
1215 		if (!iov.iov_base && *io_get_tag_slot(data, i)) {
1216 			ret = -EINVAL;
1217 			break;
1218 		}
1219 
1220 		ret = io_sqe_buffer_register(ctx, &iov, &ctx->user_bufs[i],
1221 					     &last_hpage);
1222 		if (ret)
1223 			break;
1224 	}
1225 
1226 	WARN_ON_ONCE(ctx->buf_data);
1227 
1228 	ctx->buf_data = data;
1229 	if (ret)
1230 		__io_sqe_buffers_unregister(ctx);
1231 	return ret;
1232 }
1233 
1234 int io_import_fixed(int ddir, struct iov_iter *iter,
1235 			   struct io_mapped_ubuf *imu,
1236 			   u64 buf_addr, size_t len)
1237 {
1238 	u64 buf_end;
1239 	size_t offset;
1240 
1241 	if (WARN_ON_ONCE(!imu))
1242 		return -EFAULT;
1243 	if (unlikely(check_add_overflow(buf_addr, (u64)len, &buf_end)))
1244 		return -EFAULT;
1245 	/* not inside the mapped region */
1246 	if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1247 		return -EFAULT;
1248 
1249 	/*
1250 	 * Might not be a start of buffer, set size appropriately
1251 	 * and advance us to the beginning.
1252 	 */
1253 	offset = buf_addr - imu->ubuf;
1254 	iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1255 
1256 	if (offset) {
1257 		/*
1258 		 * Don't use iov_iter_advance() here, as it's really slow for
1259 		 * using the latter parts of a big fixed buffer - it iterates
1260 		 * over each segment manually. We can cheat a bit here, because
1261 		 * we know that:
1262 		 *
1263 		 * 1) it's a BVEC iter, we set it up
1264 		 * 2) all bvecs are PAGE_SIZE in size, except potentially the
1265 		 *    first and last bvec
1266 		 *
1267 		 * So just find our index, and adjust the iterator afterwards.
1268 		 * If the offset is within the first bvec (or the whole first
1269 		 * bvec, just use iov_iter_advance(). This makes it easier
1270 		 * since we can just skip the first segment, which may not
1271 		 * be PAGE_SIZE aligned.
1272 		 */
1273 		const struct bio_vec *bvec = imu->bvec;
1274 
1275 		if (offset <= bvec->bv_len) {
1276 			/*
1277 			 * Note, huge pages buffers consists of one large
1278 			 * bvec entry and should always go this way. The other
1279 			 * branch doesn't expect non PAGE_SIZE'd chunks.
1280 			 */
1281 			iter->bvec = bvec;
1282 			iter->nr_segs = bvec->bv_len;
1283 			iter->count -= offset;
1284 			iter->iov_offset = offset;
1285 		} else {
1286 			unsigned long seg_skip;
1287 
1288 			/* skip first vec */
1289 			offset -= bvec->bv_len;
1290 			seg_skip = 1 + (offset >> PAGE_SHIFT);
1291 
1292 			iter->bvec = bvec + seg_skip;
1293 			iter->nr_segs -= seg_skip;
1294 			iter->count -= bvec->bv_len + offset;
1295 			iter->iov_offset = offset & ~PAGE_MASK;
1296 		}
1297 	}
1298 
1299 	return 0;
1300 }
1301