xref: /openbmc/linux/fs/remap_range.c (revision 0b26ca68)
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/slab.h>
3 #include <linux/stat.h>
4 #include <linux/sched/xacct.h>
5 #include <linux/fcntl.h>
6 #include <linux/file.h>
7 #include <linux/uio.h>
8 #include <linux/fsnotify.h>
9 #include <linux/security.h>
10 #include <linux/export.h>
11 #include <linux/syscalls.h>
12 #include <linux/pagemap.h>
13 #include <linux/splice.h>
14 #include <linux/compat.h>
15 #include <linux/mount.h>
16 #include <linux/fs.h>
17 #include "internal.h"
18 
19 #include <linux/uaccess.h>
20 #include <asm/unistd.h>
21 
22 /*
23  * Performs necessary checks before doing a clone.
24  *
25  * Can adjust amount of bytes to clone via @req_count argument.
26  * Returns appropriate error code that caller should return or
27  * zero in case the clone should be allowed.
28  */
29 static int generic_remap_checks(struct file *file_in, loff_t pos_in,
30 				struct file *file_out, loff_t pos_out,
31 				loff_t *req_count, unsigned int remap_flags)
32 {
33 	struct inode *inode_in = file_in->f_mapping->host;
34 	struct inode *inode_out = file_out->f_mapping->host;
35 	uint64_t count = *req_count;
36 	uint64_t bcount;
37 	loff_t size_in, size_out;
38 	loff_t bs = inode_out->i_sb->s_blocksize;
39 	int ret;
40 
41 	/* The start of both ranges must be aligned to an fs block. */
42 	if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs))
43 		return -EINVAL;
44 
45 	/* Ensure offsets don't wrap. */
46 	if (pos_in + count < pos_in || pos_out + count < pos_out)
47 		return -EINVAL;
48 
49 	size_in = i_size_read(inode_in);
50 	size_out = i_size_read(inode_out);
51 
52 	/* Dedupe requires both ranges to be within EOF. */
53 	if ((remap_flags & REMAP_FILE_DEDUP) &&
54 	    (pos_in >= size_in || pos_in + count > size_in ||
55 	     pos_out >= size_out || pos_out + count > size_out))
56 		return -EINVAL;
57 
58 	/* Ensure the infile range is within the infile. */
59 	if (pos_in >= size_in)
60 		return -EINVAL;
61 	count = min(count, size_in - (uint64_t)pos_in);
62 
63 	ret = generic_write_check_limits(file_out, pos_out, &count);
64 	if (ret)
65 		return ret;
66 
67 	/*
68 	 * If the user wanted us to link to the infile's EOF, round up to the
69 	 * next block boundary for this check.
70 	 *
71 	 * Otherwise, make sure the count is also block-aligned, having
72 	 * already confirmed the starting offsets' block alignment.
73 	 */
74 	if (pos_in + count == size_in) {
75 		bcount = ALIGN(size_in, bs) - pos_in;
76 	} else {
77 		if (!IS_ALIGNED(count, bs))
78 			count = ALIGN_DOWN(count, bs);
79 		bcount = count;
80 	}
81 
82 	/* Don't allow overlapped cloning within the same file. */
83 	if (inode_in == inode_out &&
84 	    pos_out + bcount > pos_in &&
85 	    pos_out < pos_in + bcount)
86 		return -EINVAL;
87 
88 	/*
89 	 * We shortened the request but the caller can't deal with that, so
90 	 * bounce the request back to userspace.
91 	 */
92 	if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
93 		return -EINVAL;
94 
95 	*req_count = count;
96 	return 0;
97 }
98 
99 static int remap_verify_area(struct file *file, loff_t pos, loff_t len,
100 			     bool write)
101 {
102 	struct inode *inode = file_inode(file);
103 
104 	if (unlikely(pos < 0 || len < 0))
105 		return -EINVAL;
106 
107 	if (unlikely((loff_t) (pos + len) < 0))
108 		return -EINVAL;
109 
110 	if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
111 		loff_t end = len ? pos + len - 1 : OFFSET_MAX;
112 		int retval;
113 
114 		retval = locks_mandatory_area(inode, file, pos, end,
115 				write ? F_WRLCK : F_RDLCK);
116 		if (retval < 0)
117 			return retval;
118 	}
119 
120 	return security_file_permission(file, write ? MAY_WRITE : MAY_READ);
121 }
122 
123 /*
124  * Ensure that we don't remap a partial EOF block in the middle of something
125  * else.  Assume that the offsets have already been checked for block
126  * alignment.
127  *
128  * For clone we only link a partial EOF block above or at the destination file's
129  * EOF.  For deduplication we accept a partial EOF block only if it ends at the
130  * destination file's EOF (can not link it into the middle of a file).
131  *
132  * Shorten the request if possible.
133  */
134 static int generic_remap_check_len(struct inode *inode_in,
135 				   struct inode *inode_out,
136 				   loff_t pos_out,
137 				   loff_t *len,
138 				   unsigned int remap_flags)
139 {
140 	u64 blkmask = i_blocksize(inode_in) - 1;
141 	loff_t new_len = *len;
142 
143 	if ((*len & blkmask) == 0)
144 		return 0;
145 
146 	if (pos_out + *len < i_size_read(inode_out))
147 		new_len &= ~blkmask;
148 
149 	if (new_len == *len)
150 		return 0;
151 
152 	if (remap_flags & REMAP_FILE_CAN_SHORTEN) {
153 		*len = new_len;
154 		return 0;
155 	}
156 
157 	return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL;
158 }
159 
160 /* Read a page's worth of file data into the page cache. */
161 static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset)
162 {
163 	struct page *page;
164 
165 	page = read_mapping_page(inode->i_mapping, offset >> PAGE_SHIFT, NULL);
166 	if (IS_ERR(page))
167 		return page;
168 	if (!PageUptodate(page)) {
169 		put_page(page);
170 		return ERR_PTR(-EIO);
171 	}
172 	return page;
173 }
174 
175 /*
176  * Lock two pages, ensuring that we lock in offset order if the pages are from
177  * the same file.
178  */
179 static void vfs_lock_two_pages(struct page *page1, struct page *page2)
180 {
181 	/* Always lock in order of increasing index. */
182 	if (page1->index > page2->index)
183 		swap(page1, page2);
184 
185 	lock_page(page1);
186 	if (page1 != page2)
187 		lock_page(page2);
188 }
189 
190 /* Unlock two pages, being careful not to unlock the same page twice. */
191 static void vfs_unlock_two_pages(struct page *page1, struct page *page2)
192 {
193 	unlock_page(page1);
194 	if (page1 != page2)
195 		unlock_page(page2);
196 }
197 
198 /*
199  * Compare extents of two files to see if they are the same.
200  * Caller must have locked both inodes to prevent write races.
201  */
202 static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
203 					 struct inode *dest, loff_t destoff,
204 					 loff_t len, bool *is_same)
205 {
206 	loff_t src_poff;
207 	loff_t dest_poff;
208 	void *src_addr;
209 	void *dest_addr;
210 	struct page *src_page;
211 	struct page *dest_page;
212 	loff_t cmp_len;
213 	bool same;
214 	int error;
215 
216 	error = -EINVAL;
217 	same = true;
218 	while (len) {
219 		src_poff = srcoff & (PAGE_SIZE - 1);
220 		dest_poff = destoff & (PAGE_SIZE - 1);
221 		cmp_len = min(PAGE_SIZE - src_poff,
222 			      PAGE_SIZE - dest_poff);
223 		cmp_len = min(cmp_len, len);
224 		if (cmp_len <= 0)
225 			goto out_error;
226 
227 		src_page = vfs_dedupe_get_page(src, srcoff);
228 		if (IS_ERR(src_page)) {
229 			error = PTR_ERR(src_page);
230 			goto out_error;
231 		}
232 		dest_page = vfs_dedupe_get_page(dest, destoff);
233 		if (IS_ERR(dest_page)) {
234 			error = PTR_ERR(dest_page);
235 			put_page(src_page);
236 			goto out_error;
237 		}
238 
239 		vfs_lock_two_pages(src_page, dest_page);
240 
241 		/*
242 		 * Now that we've locked both pages, make sure they're still
243 		 * mapped to the file data we're interested in.  If not,
244 		 * someone is invalidating pages on us and we lose.
245 		 */
246 		if (!PageUptodate(src_page) || !PageUptodate(dest_page) ||
247 		    src_page->mapping != src->i_mapping ||
248 		    dest_page->mapping != dest->i_mapping) {
249 			same = false;
250 			goto unlock;
251 		}
252 
253 		src_addr = kmap_atomic(src_page);
254 		dest_addr = kmap_atomic(dest_page);
255 
256 		flush_dcache_page(src_page);
257 		flush_dcache_page(dest_page);
258 
259 		if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len))
260 			same = false;
261 
262 		kunmap_atomic(dest_addr);
263 		kunmap_atomic(src_addr);
264 unlock:
265 		vfs_unlock_two_pages(src_page, dest_page);
266 		put_page(dest_page);
267 		put_page(src_page);
268 
269 		if (!same)
270 			break;
271 
272 		srcoff += cmp_len;
273 		destoff += cmp_len;
274 		len -= cmp_len;
275 	}
276 
277 	*is_same = same;
278 	return 0;
279 
280 out_error:
281 	return error;
282 }
283 
284 /*
285  * Check that the two inodes are eligible for cloning, the ranges make
286  * sense, and then flush all dirty data.  Caller must ensure that the
287  * inodes have been locked against any other modifications.
288  *
289  * If there's an error, then the usual negative error code is returned.
290  * Otherwise returns 0 with *len set to the request length.
291  */
292 int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in,
293 				  struct file *file_out, loff_t pos_out,
294 				  loff_t *len, unsigned int remap_flags)
295 {
296 	struct inode *inode_in = file_inode(file_in);
297 	struct inode *inode_out = file_inode(file_out);
298 	bool same_inode = (inode_in == inode_out);
299 	int ret;
300 
301 	/* Don't touch certain kinds of inodes */
302 	if (IS_IMMUTABLE(inode_out))
303 		return -EPERM;
304 
305 	if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out))
306 		return -ETXTBSY;
307 
308 	/* Don't reflink dirs, pipes, sockets... */
309 	if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode))
310 		return -EISDIR;
311 	if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode))
312 		return -EINVAL;
313 
314 	/* Zero length dedupe exits immediately; reflink goes to EOF. */
315 	if (*len == 0) {
316 		loff_t isize = i_size_read(inode_in);
317 
318 		if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize)
319 			return 0;
320 		if (pos_in > isize)
321 			return -EINVAL;
322 		*len = isize - pos_in;
323 		if (*len == 0)
324 			return 0;
325 	}
326 
327 	/* Check that we don't violate system file offset limits. */
328 	ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len,
329 			remap_flags);
330 	if (ret)
331 		return ret;
332 
333 	/* Wait for the completion of any pending IOs on both files */
334 	inode_dio_wait(inode_in);
335 	if (!same_inode)
336 		inode_dio_wait(inode_out);
337 
338 	ret = filemap_write_and_wait_range(inode_in->i_mapping,
339 			pos_in, pos_in + *len - 1);
340 	if (ret)
341 		return ret;
342 
343 	ret = filemap_write_and_wait_range(inode_out->i_mapping,
344 			pos_out, pos_out + *len - 1);
345 	if (ret)
346 		return ret;
347 
348 	/*
349 	 * Check that the extents are the same.
350 	 */
351 	if (remap_flags & REMAP_FILE_DEDUP) {
352 		bool		is_same = false;
353 
354 		ret = vfs_dedupe_file_range_compare(inode_in, pos_in,
355 				inode_out, pos_out, *len, &is_same);
356 		if (ret)
357 			return ret;
358 		if (!is_same)
359 			return -EBADE;
360 	}
361 
362 	ret = generic_remap_check_len(inode_in, inode_out, pos_out, len,
363 			remap_flags);
364 	if (ret)
365 		return ret;
366 
367 	/* If can't alter the file contents, we're done. */
368 	if (!(remap_flags & REMAP_FILE_DEDUP))
369 		ret = file_modified(file_out);
370 
371 	return ret;
372 }
373 EXPORT_SYMBOL(generic_remap_file_range_prep);
374 
375 loff_t do_clone_file_range(struct file *file_in, loff_t pos_in,
376 			   struct file *file_out, loff_t pos_out,
377 			   loff_t len, unsigned int remap_flags)
378 {
379 	loff_t ret;
380 
381 	WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP);
382 
383 	/*
384 	 * FICLONE/FICLONERANGE ioctls enforce that src and dest files are on
385 	 * the same mount. Practically, they only need to be on the same file
386 	 * system.
387 	 */
388 	if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb)
389 		return -EXDEV;
390 
391 	ret = generic_file_rw_checks(file_in, file_out);
392 	if (ret < 0)
393 		return ret;
394 
395 	if (!file_in->f_op->remap_file_range)
396 		return -EOPNOTSUPP;
397 
398 	ret = remap_verify_area(file_in, pos_in, len, false);
399 	if (ret)
400 		return ret;
401 
402 	ret = remap_verify_area(file_out, pos_out, len, true);
403 	if (ret)
404 		return ret;
405 
406 	ret = file_in->f_op->remap_file_range(file_in, pos_in,
407 			file_out, pos_out, len, remap_flags);
408 	if (ret < 0)
409 		return ret;
410 
411 	fsnotify_access(file_in);
412 	fsnotify_modify(file_out);
413 	return ret;
414 }
415 EXPORT_SYMBOL(do_clone_file_range);
416 
417 loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in,
418 			    struct file *file_out, loff_t pos_out,
419 			    loff_t len, unsigned int remap_flags)
420 {
421 	loff_t ret;
422 
423 	file_start_write(file_out);
424 	ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len,
425 				  remap_flags);
426 	file_end_write(file_out);
427 
428 	return ret;
429 }
430 EXPORT_SYMBOL(vfs_clone_file_range);
431 
432 /* Check whether we are allowed to dedupe the destination file */
433 static bool allow_file_dedupe(struct file *file)
434 {
435 	if (capable(CAP_SYS_ADMIN))
436 		return true;
437 	if (file->f_mode & FMODE_WRITE)
438 		return true;
439 	if (uid_eq(current_fsuid(), file_inode(file)->i_uid))
440 		return true;
441 	if (!inode_permission(file_inode(file), MAY_WRITE))
442 		return true;
443 	return false;
444 }
445 
446 loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
447 				 struct file *dst_file, loff_t dst_pos,
448 				 loff_t len, unsigned int remap_flags)
449 {
450 	loff_t ret;
451 
452 	WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP |
453 				     REMAP_FILE_CAN_SHORTEN));
454 
455 	ret = mnt_want_write_file(dst_file);
456 	if (ret)
457 		return ret;
458 
459 	/*
460 	 * This is redundant if called from vfs_dedupe_file_range(), but other
461 	 * callers need it and it's not performance sesitive...
462 	 */
463 	ret = remap_verify_area(src_file, src_pos, len, false);
464 	if (ret)
465 		goto out_drop_write;
466 
467 	ret = remap_verify_area(dst_file, dst_pos, len, true);
468 	if (ret)
469 		goto out_drop_write;
470 
471 	ret = -EPERM;
472 	if (!allow_file_dedupe(dst_file))
473 		goto out_drop_write;
474 
475 	ret = -EXDEV;
476 	if (src_file->f_path.mnt != dst_file->f_path.mnt)
477 		goto out_drop_write;
478 
479 	ret = -EISDIR;
480 	if (S_ISDIR(file_inode(dst_file)->i_mode))
481 		goto out_drop_write;
482 
483 	ret = -EINVAL;
484 	if (!dst_file->f_op->remap_file_range)
485 		goto out_drop_write;
486 
487 	if (len == 0) {
488 		ret = 0;
489 		goto out_drop_write;
490 	}
491 
492 	ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file,
493 			dst_pos, len, remap_flags | REMAP_FILE_DEDUP);
494 out_drop_write:
495 	mnt_drop_write_file(dst_file);
496 
497 	return ret;
498 }
499 EXPORT_SYMBOL(vfs_dedupe_file_range_one);
500 
501 int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same)
502 {
503 	struct file_dedupe_range_info *info;
504 	struct inode *src = file_inode(file);
505 	u64 off;
506 	u64 len;
507 	int i;
508 	int ret;
509 	u16 count = same->dest_count;
510 	loff_t deduped;
511 
512 	if (!(file->f_mode & FMODE_READ))
513 		return -EINVAL;
514 
515 	if (same->reserved1 || same->reserved2)
516 		return -EINVAL;
517 
518 	off = same->src_offset;
519 	len = same->src_length;
520 
521 	if (S_ISDIR(src->i_mode))
522 		return -EISDIR;
523 
524 	if (!S_ISREG(src->i_mode))
525 		return -EINVAL;
526 
527 	if (!file->f_op->remap_file_range)
528 		return -EOPNOTSUPP;
529 
530 	ret = remap_verify_area(file, off, len, false);
531 	if (ret < 0)
532 		return ret;
533 	ret = 0;
534 
535 	if (off + len > i_size_read(src))
536 		return -EINVAL;
537 
538 	/* Arbitrary 1G limit on a single dedupe request, can be raised. */
539 	len = min_t(u64, len, 1 << 30);
540 
541 	/* pre-format output fields to sane values */
542 	for (i = 0; i < count; i++) {
543 		same->info[i].bytes_deduped = 0ULL;
544 		same->info[i].status = FILE_DEDUPE_RANGE_SAME;
545 	}
546 
547 	for (i = 0, info = same->info; i < count; i++, info++) {
548 		struct fd dst_fd = fdget(info->dest_fd);
549 		struct file *dst_file = dst_fd.file;
550 
551 		if (!dst_file) {
552 			info->status = -EBADF;
553 			goto next_loop;
554 		}
555 
556 		if (info->reserved) {
557 			info->status = -EINVAL;
558 			goto next_fdput;
559 		}
560 
561 		deduped = vfs_dedupe_file_range_one(file, off, dst_file,
562 						    info->dest_offset, len,
563 						    REMAP_FILE_CAN_SHORTEN);
564 		if (deduped == -EBADE)
565 			info->status = FILE_DEDUPE_RANGE_DIFFERS;
566 		else if (deduped < 0)
567 			info->status = deduped;
568 		else
569 			info->bytes_deduped = len;
570 
571 next_fdput:
572 		fdput(dst_fd);
573 next_loop:
574 		if (fatal_signal_pending(current))
575 			break;
576 	}
577 	return ret;
578 }
579 EXPORT_SYMBOL(vfs_dedupe_file_range);
580