1 // SPDX-License-Identifier: GPL-2.0-only 2 #include <linux/slab.h> 3 #include <linux/stat.h> 4 #include <linux/sched/xacct.h> 5 #include <linux/fcntl.h> 6 #include <linux/file.h> 7 #include <linux/uio.h> 8 #include <linux/fsnotify.h> 9 #include <linux/security.h> 10 #include <linux/export.h> 11 #include <linux/syscalls.h> 12 #include <linux/pagemap.h> 13 #include <linux/splice.h> 14 #include <linux/compat.h> 15 #include <linux/mount.h> 16 #include <linux/fs.h> 17 #include <linux/dax.h> 18 #include "internal.h" 19 20 #include <linux/uaccess.h> 21 #include <asm/unistd.h> 22 23 /* 24 * Performs necessary checks before doing a clone. 25 * 26 * Can adjust amount of bytes to clone via @req_count argument. 27 * Returns appropriate error code that caller should return or 28 * zero in case the clone should be allowed. 29 */ 30 static int generic_remap_checks(struct file *file_in, loff_t pos_in, 31 struct file *file_out, loff_t pos_out, 32 loff_t *req_count, unsigned int remap_flags) 33 { 34 struct inode *inode_in = file_in->f_mapping->host; 35 struct inode *inode_out = file_out->f_mapping->host; 36 uint64_t count = *req_count; 37 uint64_t bcount; 38 loff_t size_in, size_out; 39 loff_t bs = inode_out->i_sb->s_blocksize; 40 int ret; 41 42 /* The start of both ranges must be aligned to an fs block. */ 43 if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs)) 44 return -EINVAL; 45 46 /* Ensure offsets don't wrap. */ 47 if (pos_in + count < pos_in || pos_out + count < pos_out) 48 return -EINVAL; 49 50 size_in = i_size_read(inode_in); 51 size_out = i_size_read(inode_out); 52 53 /* Dedupe requires both ranges to be within EOF. */ 54 if ((remap_flags & REMAP_FILE_DEDUP) && 55 (pos_in >= size_in || pos_in + count > size_in || 56 pos_out >= size_out || pos_out + count > size_out)) 57 return -EINVAL; 58 59 /* Ensure the infile range is within the infile. */ 60 if (pos_in >= size_in) 61 return -EINVAL; 62 count = min(count, size_in - (uint64_t)pos_in); 63 64 ret = generic_write_check_limits(file_out, pos_out, &count); 65 if (ret) 66 return ret; 67 68 /* 69 * If the user wanted us to link to the infile's EOF, round up to the 70 * next block boundary for this check. 71 * 72 * Otherwise, make sure the count is also block-aligned, having 73 * already confirmed the starting offsets' block alignment. 74 */ 75 if (pos_in + count == size_in && 76 (!(remap_flags & REMAP_FILE_DEDUP) || pos_out + count == size_out)) { 77 bcount = ALIGN(size_in, bs) - pos_in; 78 } else { 79 if (!IS_ALIGNED(count, bs)) 80 count = ALIGN_DOWN(count, bs); 81 bcount = count; 82 } 83 84 /* Don't allow overlapped cloning within the same file. */ 85 if (inode_in == inode_out && 86 pos_out + bcount > pos_in && 87 pos_out < pos_in + bcount) 88 return -EINVAL; 89 90 /* 91 * We shortened the request but the caller can't deal with that, so 92 * bounce the request back to userspace. 93 */ 94 if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN)) 95 return -EINVAL; 96 97 *req_count = count; 98 return 0; 99 } 100 101 static int remap_verify_area(struct file *file, loff_t pos, loff_t len, 102 bool write) 103 { 104 if (unlikely(pos < 0 || len < 0)) 105 return -EINVAL; 106 107 if (unlikely((loff_t) (pos + len) < 0)) 108 return -EINVAL; 109 110 return security_file_permission(file, write ? MAY_WRITE : MAY_READ); 111 } 112 113 /* 114 * Ensure that we don't remap a partial EOF block in the middle of something 115 * else. Assume that the offsets have already been checked for block 116 * alignment. 117 * 118 * For clone we only link a partial EOF block above or at the destination file's 119 * EOF. For deduplication we accept a partial EOF block only if it ends at the 120 * destination file's EOF (can not link it into the middle of a file). 121 * 122 * Shorten the request if possible. 123 */ 124 static int generic_remap_check_len(struct inode *inode_in, 125 struct inode *inode_out, 126 loff_t pos_out, 127 loff_t *len, 128 unsigned int remap_flags) 129 { 130 u64 blkmask = i_blocksize(inode_in) - 1; 131 loff_t new_len = *len; 132 133 if ((*len & blkmask) == 0) 134 return 0; 135 136 if (pos_out + *len < i_size_read(inode_out)) 137 new_len &= ~blkmask; 138 139 if (new_len == *len) 140 return 0; 141 142 if (remap_flags & REMAP_FILE_CAN_SHORTEN) { 143 *len = new_len; 144 return 0; 145 } 146 147 return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; 148 } 149 150 /* Read a page's worth of file data into the page cache. */ 151 static struct folio *vfs_dedupe_get_folio(struct file *file, loff_t pos) 152 { 153 return read_mapping_folio(file->f_mapping, pos >> PAGE_SHIFT, file); 154 } 155 156 /* 157 * Lock two folios, ensuring that we lock in offset order if the folios 158 * are from the same file. 159 */ 160 static void vfs_lock_two_folios(struct folio *folio1, struct folio *folio2) 161 { 162 /* Always lock in order of increasing index. */ 163 if (folio1->index > folio2->index) 164 swap(folio1, folio2); 165 166 folio_lock(folio1); 167 if (folio1 != folio2) 168 folio_lock(folio2); 169 } 170 171 /* Unlock two folios, being careful not to unlock the same folio twice. */ 172 static void vfs_unlock_two_folios(struct folio *folio1, struct folio *folio2) 173 { 174 folio_unlock(folio1); 175 if (folio1 != folio2) 176 folio_unlock(folio2); 177 } 178 179 /* 180 * Compare extents of two files to see if they are the same. 181 * Caller must have locked both inodes to prevent write races. 182 */ 183 static int vfs_dedupe_file_range_compare(struct file *src, loff_t srcoff, 184 struct file *dest, loff_t dstoff, 185 loff_t len, bool *is_same) 186 { 187 bool same = true; 188 int error = -EINVAL; 189 190 while (len) { 191 struct folio *src_folio, *dst_folio; 192 void *src_addr, *dst_addr; 193 loff_t cmp_len = min(PAGE_SIZE - offset_in_page(srcoff), 194 PAGE_SIZE - offset_in_page(dstoff)); 195 196 cmp_len = min(cmp_len, len); 197 if (cmp_len <= 0) 198 goto out_error; 199 200 src_folio = vfs_dedupe_get_folio(src, srcoff); 201 if (IS_ERR(src_folio)) { 202 error = PTR_ERR(src_folio); 203 goto out_error; 204 } 205 dst_folio = vfs_dedupe_get_folio(dest, dstoff); 206 if (IS_ERR(dst_folio)) { 207 error = PTR_ERR(dst_folio); 208 folio_put(src_folio); 209 goto out_error; 210 } 211 212 vfs_lock_two_folios(src_folio, dst_folio); 213 214 /* 215 * Now that we've locked both folios, make sure they're still 216 * mapped to the file data we're interested in. If not, 217 * someone is invalidating pages on us and we lose. 218 */ 219 if (!folio_test_uptodate(src_folio) || !folio_test_uptodate(dst_folio) || 220 src_folio->mapping != src->f_mapping || 221 dst_folio->mapping != dest->f_mapping) { 222 same = false; 223 goto unlock; 224 } 225 226 src_addr = kmap_local_folio(src_folio, 227 offset_in_folio(src_folio, srcoff)); 228 dst_addr = kmap_local_folio(dst_folio, 229 offset_in_folio(dst_folio, dstoff)); 230 231 flush_dcache_folio(src_folio); 232 flush_dcache_folio(dst_folio); 233 234 if (memcmp(src_addr, dst_addr, cmp_len)) 235 same = false; 236 237 kunmap_local(dst_addr); 238 kunmap_local(src_addr); 239 unlock: 240 vfs_unlock_two_folios(src_folio, dst_folio); 241 folio_put(dst_folio); 242 folio_put(src_folio); 243 244 if (!same) 245 break; 246 247 srcoff += cmp_len; 248 dstoff += cmp_len; 249 len -= cmp_len; 250 } 251 252 *is_same = same; 253 return 0; 254 255 out_error: 256 return error; 257 } 258 259 /* 260 * Check that the two inodes are eligible for cloning, the ranges make 261 * sense, and then flush all dirty data. Caller must ensure that the 262 * inodes have been locked against any other modifications. 263 * 264 * If there's an error, then the usual negative error code is returned. 265 * Otherwise returns 0 with *len set to the request length. 266 */ 267 int 268 __generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, 269 struct file *file_out, loff_t pos_out, 270 loff_t *len, unsigned int remap_flags, 271 const struct iomap_ops *dax_read_ops) 272 { 273 struct inode *inode_in = file_inode(file_in); 274 struct inode *inode_out = file_inode(file_out); 275 bool same_inode = (inode_in == inode_out); 276 int ret; 277 278 /* Don't touch certain kinds of inodes */ 279 if (IS_IMMUTABLE(inode_out)) 280 return -EPERM; 281 282 if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out)) 283 return -ETXTBSY; 284 285 /* Don't reflink dirs, pipes, sockets... */ 286 if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) 287 return -EISDIR; 288 if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) 289 return -EINVAL; 290 291 /* Zero length dedupe exits immediately; reflink goes to EOF. */ 292 if (*len == 0) { 293 loff_t isize = i_size_read(inode_in); 294 295 if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize) 296 return 0; 297 if (pos_in > isize) 298 return -EINVAL; 299 *len = isize - pos_in; 300 if (*len == 0) 301 return 0; 302 } 303 304 /* Check that we don't violate system file offset limits. */ 305 ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len, 306 remap_flags); 307 if (ret) 308 return ret; 309 310 /* Wait for the completion of any pending IOs on both files */ 311 inode_dio_wait(inode_in); 312 if (!same_inode) 313 inode_dio_wait(inode_out); 314 315 ret = filemap_write_and_wait_range(inode_in->i_mapping, 316 pos_in, pos_in + *len - 1); 317 if (ret) 318 return ret; 319 320 ret = filemap_write_and_wait_range(inode_out->i_mapping, 321 pos_out, pos_out + *len - 1); 322 if (ret) 323 return ret; 324 325 /* 326 * Check that the extents are the same. 327 */ 328 if (remap_flags & REMAP_FILE_DEDUP) { 329 bool is_same = false; 330 331 if (*len == 0) 332 return 0; 333 334 if (!IS_DAX(inode_in)) 335 ret = vfs_dedupe_file_range_compare(file_in, pos_in, 336 file_out, pos_out, *len, &is_same); 337 else if (dax_read_ops) 338 ret = dax_dedupe_file_range_compare(inode_in, pos_in, 339 inode_out, pos_out, *len, &is_same, 340 dax_read_ops); 341 else 342 return -EINVAL; 343 if (ret) 344 return ret; 345 if (!is_same) 346 return -EBADE; 347 } 348 349 ret = generic_remap_check_len(inode_in, inode_out, pos_out, len, 350 remap_flags); 351 if (ret) 352 return ret; 353 354 /* If can't alter the file contents, we're done. */ 355 if (!(remap_flags & REMAP_FILE_DEDUP)) 356 ret = file_modified(file_out); 357 358 return ret; 359 } 360 361 int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, 362 struct file *file_out, loff_t pos_out, 363 loff_t *len, unsigned int remap_flags) 364 { 365 return __generic_remap_file_range_prep(file_in, pos_in, file_out, 366 pos_out, len, remap_flags, NULL); 367 } 368 EXPORT_SYMBOL(generic_remap_file_range_prep); 369 370 loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, 371 struct file *file_out, loff_t pos_out, 372 loff_t len, unsigned int remap_flags) 373 { 374 loff_t ret; 375 376 WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP); 377 378 if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) 379 return -EXDEV; 380 381 ret = generic_file_rw_checks(file_in, file_out); 382 if (ret < 0) 383 return ret; 384 385 if (!file_in->f_op->remap_file_range) 386 return -EOPNOTSUPP; 387 388 ret = remap_verify_area(file_in, pos_in, len, false); 389 if (ret) 390 return ret; 391 392 ret = remap_verify_area(file_out, pos_out, len, true); 393 if (ret) 394 return ret; 395 396 ret = file_in->f_op->remap_file_range(file_in, pos_in, 397 file_out, pos_out, len, remap_flags); 398 if (ret < 0) 399 return ret; 400 401 fsnotify_access(file_in); 402 fsnotify_modify(file_out); 403 return ret; 404 } 405 EXPORT_SYMBOL(do_clone_file_range); 406 407 loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in, 408 struct file *file_out, loff_t pos_out, 409 loff_t len, unsigned int remap_flags) 410 { 411 loff_t ret; 412 413 file_start_write(file_out); 414 ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len, 415 remap_flags); 416 file_end_write(file_out); 417 418 return ret; 419 } 420 EXPORT_SYMBOL(vfs_clone_file_range); 421 422 /* Check whether we are allowed to dedupe the destination file */ 423 static bool allow_file_dedupe(struct file *file) 424 { 425 struct user_namespace *mnt_userns = file_mnt_user_ns(file); 426 struct inode *inode = file_inode(file); 427 428 if (capable(CAP_SYS_ADMIN)) 429 return true; 430 if (file->f_mode & FMODE_WRITE) 431 return true; 432 if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode))) 433 return true; 434 if (!inode_permission(mnt_userns, inode, MAY_WRITE)) 435 return true; 436 return false; 437 } 438 439 loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, 440 struct file *dst_file, loff_t dst_pos, 441 loff_t len, unsigned int remap_flags) 442 { 443 loff_t ret; 444 445 WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP | 446 REMAP_FILE_CAN_SHORTEN)); 447 448 ret = mnt_want_write_file(dst_file); 449 if (ret) 450 return ret; 451 452 /* 453 * This is redundant if called from vfs_dedupe_file_range(), but other 454 * callers need it and it's not performance sesitive... 455 */ 456 ret = remap_verify_area(src_file, src_pos, len, false); 457 if (ret) 458 goto out_drop_write; 459 460 ret = remap_verify_area(dst_file, dst_pos, len, true); 461 if (ret) 462 goto out_drop_write; 463 464 ret = -EPERM; 465 if (!allow_file_dedupe(dst_file)) 466 goto out_drop_write; 467 468 ret = -EXDEV; 469 if (file_inode(src_file)->i_sb != file_inode(dst_file)->i_sb) 470 goto out_drop_write; 471 472 ret = -EISDIR; 473 if (S_ISDIR(file_inode(dst_file)->i_mode)) 474 goto out_drop_write; 475 476 ret = -EINVAL; 477 if (!dst_file->f_op->remap_file_range) 478 goto out_drop_write; 479 480 if (len == 0) { 481 ret = 0; 482 goto out_drop_write; 483 } 484 485 ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file, 486 dst_pos, len, remap_flags | REMAP_FILE_DEDUP); 487 out_drop_write: 488 mnt_drop_write_file(dst_file); 489 490 return ret; 491 } 492 EXPORT_SYMBOL(vfs_dedupe_file_range_one); 493 494 int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same) 495 { 496 struct file_dedupe_range_info *info; 497 struct inode *src = file_inode(file); 498 u64 off; 499 u64 len; 500 int i; 501 int ret; 502 u16 count = same->dest_count; 503 loff_t deduped; 504 505 if (!(file->f_mode & FMODE_READ)) 506 return -EINVAL; 507 508 if (same->reserved1 || same->reserved2) 509 return -EINVAL; 510 511 off = same->src_offset; 512 len = same->src_length; 513 514 if (S_ISDIR(src->i_mode)) 515 return -EISDIR; 516 517 if (!S_ISREG(src->i_mode)) 518 return -EINVAL; 519 520 if (!file->f_op->remap_file_range) 521 return -EOPNOTSUPP; 522 523 ret = remap_verify_area(file, off, len, false); 524 if (ret < 0) 525 return ret; 526 ret = 0; 527 528 if (off + len > i_size_read(src)) 529 return -EINVAL; 530 531 /* Arbitrary 1G limit on a single dedupe request, can be raised. */ 532 len = min_t(u64, len, 1 << 30); 533 534 /* pre-format output fields to sane values */ 535 for (i = 0; i < count; i++) { 536 same->info[i].bytes_deduped = 0ULL; 537 same->info[i].status = FILE_DEDUPE_RANGE_SAME; 538 } 539 540 for (i = 0, info = same->info; i < count; i++, info++) { 541 struct fd dst_fd = fdget(info->dest_fd); 542 struct file *dst_file = dst_fd.file; 543 544 if (!dst_file) { 545 info->status = -EBADF; 546 goto next_loop; 547 } 548 549 if (info->reserved) { 550 info->status = -EINVAL; 551 goto next_fdput; 552 } 553 554 deduped = vfs_dedupe_file_range_one(file, off, dst_file, 555 info->dest_offset, len, 556 REMAP_FILE_CAN_SHORTEN); 557 if (deduped == -EBADE) 558 info->status = FILE_DEDUPE_RANGE_DIFFERS; 559 else if (deduped < 0) 560 info->status = deduped; 561 else 562 info->bytes_deduped = len; 563 564 next_fdput: 565 fdput(dst_fd); 566 next_loop: 567 if (fatal_signal_pending(current)) 568 break; 569 } 570 return ret; 571 } 572 EXPORT_SYMBOL(vfs_dedupe_file_range); 573