1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/blkdev.h> 4 #include <linux/iversion.h> 5 #include "compression.h" 6 #include "ctree.h" 7 #include "delalloc-space.h" 8 #include "reflink.h" 9 #include "transaction.h" 10 11 #define BTRFS_MAX_DEDUPE_LEN SZ_16M 12 13 static int clone_finish_inode_update(struct btrfs_trans_handle *trans, 14 struct inode *inode, 15 u64 endoff, 16 const u64 destoff, 17 const u64 olen, 18 int no_time_update) 19 { 20 struct btrfs_root *root = BTRFS_I(inode)->root; 21 int ret; 22 23 inode_inc_iversion(inode); 24 if (!no_time_update) 25 inode->i_mtime = inode->i_ctime = current_time(inode); 26 /* 27 * We round up to the block size at eof when determining which 28 * extents to clone above, but shouldn't round up the file size. 29 */ 30 if (endoff > destoff + olen) 31 endoff = destoff + olen; 32 if (endoff > inode->i_size) { 33 i_size_write(inode, endoff); 34 btrfs_inode_safe_disk_i_size_write(inode, 0); 35 } 36 37 ret = btrfs_update_inode(trans, root, inode); 38 if (ret) { 39 btrfs_abort_transaction(trans, ret); 40 btrfs_end_transaction(trans); 41 goto out; 42 } 43 ret = btrfs_end_transaction(trans); 44 out: 45 return ret; 46 } 47 48 static int copy_inline_to_page(struct inode *inode, 49 const u64 file_offset, 50 char *inline_data, 51 const u64 size, 52 const u64 datal, 53 const u8 comp_type) 54 { 55 const u64 block_size = btrfs_inode_sectorsize(inode); 56 const u64 range_end = file_offset + block_size - 1; 57 const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0); 58 char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0); 59 struct extent_changeset *data_reserved = NULL; 60 struct page *page = NULL; 61 int ret; 62 63 ASSERT(IS_ALIGNED(file_offset, block_size)); 64 65 /* 66 * We have flushed and locked the ranges of the source and destination 67 * inodes, we also have locked the inodes, so we are safe to do a 68 * reservation here. Also we must not do the reservation while holding 69 * a transaction open, otherwise we would deadlock. 70 */ 71 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset, 72 block_size); 73 if (ret) 74 goto out; 75 76 page = find_or_create_page(inode->i_mapping, file_offset >> PAGE_SHIFT, 77 btrfs_alloc_write_mask(inode->i_mapping)); 78 if (!page) { 79 ret = -ENOMEM; 80 goto out_unlock; 81 } 82 83 set_page_extent_mapped(page); 84 clear_extent_bit(&BTRFS_I(inode)->io_tree, file_offset, range_end, 85 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 86 0, 0, NULL); 87 ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL); 88 if (ret) 89 goto out_unlock; 90 91 if (comp_type == BTRFS_COMPRESS_NONE) { 92 char *map; 93 94 map = kmap(page); 95 memcpy(map, data_start, datal); 96 flush_dcache_page(page); 97 kunmap(page); 98 } else { 99 ret = btrfs_decompress(comp_type, data_start, page, 0, 100 inline_size, datal); 101 if (ret) 102 goto out_unlock; 103 flush_dcache_page(page); 104 } 105 106 /* 107 * If our inline data is smaller then the block/page size, then the 108 * remaining of the block/page is equivalent to zeroes. We had something 109 * like the following done: 110 * 111 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file 112 * $ sync # (or fsync) 113 * $ xfs_io -c "falloc 0 4K" file 114 * $ xfs_io -c "pwrite -S 0xcd 4K 4K" 115 * 116 * So what's in the range [500, 4095] corresponds to zeroes. 117 */ 118 if (datal < block_size) { 119 char *map; 120 121 map = kmap(page); 122 memset(map + datal, 0, block_size - datal); 123 flush_dcache_page(page); 124 kunmap(page); 125 } 126 127 SetPageUptodate(page); 128 ClearPageChecked(page); 129 set_page_dirty(page); 130 out_unlock: 131 if (page) { 132 unlock_page(page); 133 put_page(page); 134 } 135 if (ret) 136 btrfs_delalloc_release_space(inode, data_reserved, file_offset, 137 block_size, true); 138 btrfs_delalloc_release_extents(BTRFS_I(inode), block_size); 139 out: 140 extent_changeset_free(data_reserved); 141 142 return ret; 143 } 144 145 /* 146 * Deal with cloning of inline extents. We try to copy the inline extent from 147 * the source inode to destination inode when possible. When not possible we 148 * copy the inline extent's data into the respective page of the inode. 149 */ 150 static int clone_copy_inline_extent(struct inode *dst, 151 struct btrfs_path *path, 152 struct btrfs_key *new_key, 153 const u64 drop_start, 154 const u64 datal, 155 const u64 size, 156 const u8 comp_type, 157 char *inline_data, 158 struct btrfs_trans_handle **trans_out) 159 { 160 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); 161 struct btrfs_root *root = BTRFS_I(dst)->root; 162 const u64 aligned_end = ALIGN(new_key->offset + datal, 163 fs_info->sectorsize); 164 struct btrfs_trans_handle *trans = NULL; 165 int ret; 166 struct btrfs_key key; 167 168 if (new_key->offset > 0) { 169 ret = copy_inline_to_page(dst, new_key->offset, inline_data, 170 size, datal, comp_type); 171 goto out; 172 } 173 174 key.objectid = btrfs_ino(BTRFS_I(dst)); 175 key.type = BTRFS_EXTENT_DATA_KEY; 176 key.offset = 0; 177 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 178 if (ret < 0) { 179 return ret; 180 } else if (ret > 0) { 181 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 182 ret = btrfs_next_leaf(root, path); 183 if (ret < 0) 184 return ret; 185 else if (ret > 0) 186 goto copy_inline_extent; 187 } 188 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 189 if (key.objectid == btrfs_ino(BTRFS_I(dst)) && 190 key.type == BTRFS_EXTENT_DATA_KEY) { 191 /* 192 * There's an implicit hole at file offset 0, copy the 193 * inline extent's data to the page. 194 */ 195 ASSERT(key.offset > 0); 196 ret = copy_inline_to_page(dst, new_key->offset, 197 inline_data, size, datal, 198 comp_type); 199 goto out; 200 } 201 } else if (i_size_read(dst) <= datal) { 202 struct btrfs_file_extent_item *ei; 203 204 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 205 struct btrfs_file_extent_item); 206 /* 207 * If it's an inline extent replace it with the source inline 208 * extent, otherwise copy the source inline extent data into 209 * the respective page at the destination inode. 210 */ 211 if (btrfs_file_extent_type(path->nodes[0], ei) == 212 BTRFS_FILE_EXTENT_INLINE) 213 goto copy_inline_extent; 214 215 ret = copy_inline_to_page(dst, new_key->offset, inline_data, 216 size, datal, comp_type); 217 goto out; 218 } 219 220 copy_inline_extent: 221 ret = 0; 222 /* 223 * We have no extent items, or we have an extent at offset 0 which may 224 * or may not be inlined. All these cases are dealt the same way. 225 */ 226 if (i_size_read(dst) > datal) { 227 /* 228 * At the destination offset 0 we have either a hole, a regular 229 * extent or an inline extent larger then the one we want to 230 * clone. Deal with all these cases by copying the inline extent 231 * data into the respective page at the destination inode. 232 */ 233 ret = copy_inline_to_page(dst, new_key->offset, inline_data, 234 size, datal, comp_type); 235 goto out; 236 } 237 238 btrfs_release_path(path); 239 /* 240 * If we end up here it means were copy the inline extent into a leaf 241 * of the destination inode. We know we will drop or adjust at most one 242 * extent item in the destination root. 243 * 244 * 1 unit - adjusting old extent (we may have to split it) 245 * 1 unit - add new extent 246 * 1 unit - inode update 247 */ 248 trans = btrfs_start_transaction(root, 3); 249 if (IS_ERR(trans)) { 250 ret = PTR_ERR(trans); 251 trans = NULL; 252 goto out; 253 } 254 ret = btrfs_drop_extents(trans, root, dst, drop_start, aligned_end, 1); 255 if (ret) 256 goto out; 257 ret = btrfs_insert_empty_item(trans, root, path, new_key, size); 258 if (ret) 259 goto out; 260 261 write_extent_buffer(path->nodes[0], inline_data, 262 btrfs_item_ptr_offset(path->nodes[0], 263 path->slots[0]), 264 size); 265 inode_add_bytes(dst, datal); 266 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags); 267 ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end); 268 out: 269 if (!ret && !trans) { 270 /* 271 * No transaction here means we copied the inline extent into a 272 * page of the destination inode. 273 * 274 * 1 unit to update inode item 275 */ 276 trans = btrfs_start_transaction(root, 1); 277 if (IS_ERR(trans)) { 278 ret = PTR_ERR(trans); 279 trans = NULL; 280 } 281 } 282 if (ret && trans) { 283 btrfs_abort_transaction(trans, ret); 284 btrfs_end_transaction(trans); 285 } 286 if (!ret) 287 *trans_out = trans; 288 289 return ret; 290 } 291 292 /** 293 * btrfs_clone() - clone a range from inode file to another 294 * 295 * @src: Inode to clone from 296 * @inode: Inode to clone to 297 * @off: Offset within source to start clone from 298 * @olen: Original length, passed by user, of range to clone 299 * @olen_aligned: Block-aligned value of olen 300 * @destoff: Offset within @inode to start clone 301 * @no_time_update: Whether to update mtime/ctime on the target inode 302 */ 303 static int btrfs_clone(struct inode *src, struct inode *inode, 304 const u64 off, const u64 olen, const u64 olen_aligned, 305 const u64 destoff, int no_time_update) 306 { 307 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 308 struct btrfs_path *path = NULL; 309 struct extent_buffer *leaf; 310 struct btrfs_trans_handle *trans; 311 char *buf = NULL; 312 struct btrfs_key key; 313 u32 nritems; 314 int slot; 315 int ret; 316 const u64 len = olen_aligned; 317 u64 last_dest_end = destoff; 318 319 ret = -ENOMEM; 320 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); 321 if (!buf) 322 return ret; 323 324 path = btrfs_alloc_path(); 325 if (!path) { 326 kvfree(buf); 327 return ret; 328 } 329 330 path->reada = READA_FORWARD; 331 /* Clone data */ 332 key.objectid = btrfs_ino(BTRFS_I(src)); 333 key.type = BTRFS_EXTENT_DATA_KEY; 334 key.offset = off; 335 336 while (1) { 337 u64 next_key_min_offset = key.offset + 1; 338 struct btrfs_file_extent_item *extent; 339 int type; 340 u32 size; 341 struct btrfs_key new_key; 342 u64 disko = 0, diskl = 0; 343 u64 datao = 0, datal = 0; 344 u8 comp; 345 u64 drop_start; 346 347 /* Note the key will change type as we walk through the tree */ 348 path->leave_spinning = 1; 349 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path, 350 0, 0); 351 if (ret < 0) 352 goto out; 353 /* 354 * First search, if no extent item that starts at offset off was 355 * found but the previous item is an extent item, it's possible 356 * it might overlap our target range, therefore process it. 357 */ 358 if (key.offset == off && ret > 0 && path->slots[0] > 0) { 359 btrfs_item_key_to_cpu(path->nodes[0], &key, 360 path->slots[0] - 1); 361 if (key.type == BTRFS_EXTENT_DATA_KEY) 362 path->slots[0]--; 363 } 364 365 nritems = btrfs_header_nritems(path->nodes[0]); 366 process_slot: 367 if (path->slots[0] >= nritems) { 368 ret = btrfs_next_leaf(BTRFS_I(src)->root, path); 369 if (ret < 0) 370 goto out; 371 if (ret > 0) 372 break; 373 nritems = btrfs_header_nritems(path->nodes[0]); 374 } 375 leaf = path->nodes[0]; 376 slot = path->slots[0]; 377 378 btrfs_item_key_to_cpu(leaf, &key, slot); 379 if (key.type > BTRFS_EXTENT_DATA_KEY || 380 key.objectid != btrfs_ino(BTRFS_I(src))) 381 break; 382 383 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); 384 385 extent = btrfs_item_ptr(leaf, slot, 386 struct btrfs_file_extent_item); 387 comp = btrfs_file_extent_compression(leaf, extent); 388 type = btrfs_file_extent_type(leaf, extent); 389 if (type == BTRFS_FILE_EXTENT_REG || 390 type == BTRFS_FILE_EXTENT_PREALLOC) { 391 disko = btrfs_file_extent_disk_bytenr(leaf, extent); 392 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); 393 datao = btrfs_file_extent_offset(leaf, extent); 394 datal = btrfs_file_extent_num_bytes(leaf, extent); 395 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 396 /* Take upper bound, may be compressed */ 397 datal = btrfs_file_extent_ram_bytes(leaf, extent); 398 } 399 400 /* 401 * The first search might have left us at an extent item that 402 * ends before our target range's start, can happen if we have 403 * holes and NO_HOLES feature enabled. 404 */ 405 if (key.offset + datal <= off) { 406 path->slots[0]++; 407 goto process_slot; 408 } else if (key.offset >= off + len) { 409 break; 410 } 411 next_key_min_offset = key.offset + datal; 412 size = btrfs_item_size_nr(leaf, slot); 413 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), 414 size); 415 416 btrfs_release_path(path); 417 path->leave_spinning = 0; 418 419 memcpy(&new_key, &key, sizeof(new_key)); 420 new_key.objectid = btrfs_ino(BTRFS_I(inode)); 421 if (off <= key.offset) 422 new_key.offset = key.offset + destoff - off; 423 else 424 new_key.offset = destoff; 425 426 /* 427 * Deal with a hole that doesn't have an extent item that 428 * represents it (NO_HOLES feature enabled). 429 * This hole is either in the middle of the cloning range or at 430 * the beginning (fully overlaps it or partially overlaps it). 431 */ 432 if (new_key.offset != last_dest_end) 433 drop_start = last_dest_end; 434 else 435 drop_start = new_key.offset; 436 437 if (type == BTRFS_FILE_EXTENT_REG || 438 type == BTRFS_FILE_EXTENT_PREALLOC) { 439 struct btrfs_clone_extent_info clone_info; 440 441 /* 442 * a | --- range to clone ---| b 443 * | ------------- extent ------------- | 444 */ 445 446 /* Subtract range b */ 447 if (key.offset + datal > off + len) 448 datal = off + len - key.offset; 449 450 /* Subtract range a */ 451 if (off > key.offset) { 452 datao += off - key.offset; 453 datal -= off - key.offset; 454 } 455 456 clone_info.disk_offset = disko; 457 clone_info.disk_len = diskl; 458 clone_info.data_offset = datao; 459 clone_info.data_len = datal; 460 clone_info.file_offset = new_key.offset; 461 clone_info.extent_buf = buf; 462 clone_info.item_size = size; 463 ret = btrfs_punch_hole_range(inode, path, drop_start, 464 new_key.offset + datal - 1, &clone_info, 465 &trans); 466 if (ret) 467 goto out; 468 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 469 /* 470 * Inline extents always have to start at file offset 0 471 * and can never be bigger then the sector size. We can 472 * never clone only parts of an inline extent, since all 473 * reflink operations must start at a sector size aligned 474 * offset, and the length must be aligned too or end at 475 * the i_size (which implies the whole inlined data). 476 */ 477 ASSERT(key.offset == 0); 478 ASSERT(datal <= fs_info->sectorsize); 479 if (key.offset != 0 || datal > fs_info->sectorsize) 480 return -EUCLEAN; 481 482 ret = clone_copy_inline_extent(inode, path, &new_key, 483 drop_start, datal, size, 484 comp, buf, &trans); 485 if (ret) 486 goto out; 487 } 488 489 btrfs_release_path(path); 490 491 last_dest_end = ALIGN(new_key.offset + datal, 492 fs_info->sectorsize); 493 ret = clone_finish_inode_update(trans, inode, last_dest_end, 494 destoff, olen, no_time_update); 495 if (ret) 496 goto out; 497 if (new_key.offset + datal >= destoff + len) 498 break; 499 500 btrfs_release_path(path); 501 key.offset = next_key_min_offset; 502 503 if (fatal_signal_pending(current)) { 504 ret = -EINTR; 505 goto out; 506 } 507 } 508 ret = 0; 509 510 if (last_dest_end < destoff + len) { 511 /* 512 * We have an implicit hole that fully or partially overlaps our 513 * cloning range at its end. This means that we either have the 514 * NO_HOLES feature enabled or the implicit hole happened due to 515 * mixing buffered and direct IO writes against this file. 516 */ 517 btrfs_release_path(path); 518 path->leave_spinning = 0; 519 520 ret = btrfs_punch_hole_range(inode, path, last_dest_end, 521 destoff + len - 1, NULL, &trans); 522 if (ret) 523 goto out; 524 525 ret = clone_finish_inode_update(trans, inode, destoff + len, 526 destoff, olen, no_time_update); 527 } 528 529 out: 530 btrfs_free_path(path); 531 kvfree(buf); 532 return ret; 533 } 534 535 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, 536 struct inode *inode2, u64 loff2, u64 len) 537 { 538 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); 539 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); 540 } 541 542 static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, 543 struct inode *inode2, u64 loff2, u64 len) 544 { 545 if (inode1 < inode2) { 546 swap(inode1, inode2); 547 swap(loff1, loff2); 548 } else if (inode1 == inode2 && loff2 < loff1) { 549 swap(loff1, loff2); 550 } 551 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); 552 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); 553 } 554 555 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, 556 struct inode *dst, u64 dst_loff) 557 { 558 const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; 559 int ret; 560 561 /* 562 * Lock destination range to serialize with concurrent readpages() and 563 * source range to serialize with relocation. 564 */ 565 btrfs_double_extent_lock(src, loff, dst, dst_loff, len); 566 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1); 567 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); 568 569 return ret; 570 } 571 572 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, 573 struct inode *dst, u64 dst_loff) 574 { 575 int ret; 576 u64 i, tail_len, chunk_count; 577 struct btrfs_root *root_dst = BTRFS_I(dst)->root; 578 579 spin_lock(&root_dst->root_item_lock); 580 if (root_dst->send_in_progress) { 581 btrfs_warn_rl(root_dst->fs_info, 582 "cannot deduplicate to root %llu while send operations are using it (%d in progress)", 583 root_dst->root_key.objectid, 584 root_dst->send_in_progress); 585 spin_unlock(&root_dst->root_item_lock); 586 return -EAGAIN; 587 } 588 root_dst->dedupe_in_progress++; 589 spin_unlock(&root_dst->root_item_lock); 590 591 tail_len = olen % BTRFS_MAX_DEDUPE_LEN; 592 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN); 593 594 for (i = 0; i < chunk_count; i++) { 595 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, 596 dst, dst_loff); 597 if (ret) 598 goto out; 599 600 loff += BTRFS_MAX_DEDUPE_LEN; 601 dst_loff += BTRFS_MAX_DEDUPE_LEN; 602 } 603 604 if (tail_len > 0) 605 ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff); 606 out: 607 spin_lock(&root_dst->root_item_lock); 608 root_dst->dedupe_in_progress--; 609 spin_unlock(&root_dst->root_item_lock); 610 611 return ret; 612 } 613 614 static noinline int btrfs_clone_files(struct file *file, struct file *file_src, 615 u64 off, u64 olen, u64 destoff) 616 { 617 struct inode *inode = file_inode(file); 618 struct inode *src = file_inode(file_src); 619 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 620 int ret; 621 int wb_ret; 622 u64 len = olen; 623 u64 bs = fs_info->sb->s_blocksize; 624 625 /* 626 * VFS's generic_remap_file_range_prep() protects us from cloning the 627 * eof block into the middle of a file, which would result in corruption 628 * if the file size is not blocksize aligned. So we don't need to check 629 * for that case here. 630 */ 631 if (off + len == src->i_size) 632 len = ALIGN(src->i_size, bs) - off; 633 634 if (destoff > inode->i_size) { 635 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); 636 637 ret = btrfs_cont_expand(inode, inode->i_size, destoff); 638 if (ret) 639 return ret; 640 /* 641 * We may have truncated the last block if the inode's size is 642 * not sector size aligned, so we need to wait for writeback to 643 * complete before proceeding further, otherwise we can race 644 * with cloning and attempt to increment a reference to an 645 * extent that no longer exists (writeback completed right after 646 * we found the previous extent covering eof and before we 647 * attempted to increment its reference count). 648 */ 649 ret = btrfs_wait_ordered_range(inode, wb_start, 650 destoff - wb_start); 651 if (ret) 652 return ret; 653 } 654 655 /* 656 * Lock destination range to serialize with concurrent readpages() and 657 * source range to serialize with relocation. 658 */ 659 btrfs_double_extent_lock(src, off, inode, destoff, len); 660 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); 661 btrfs_double_extent_unlock(src, off, inode, destoff, len); 662 663 /* 664 * We may have copied an inline extent into a page of the destination 665 * range, so wait for writeback to complete before truncating pages 666 * from the page cache. This is a rare case. 667 */ 668 wb_ret = btrfs_wait_ordered_range(inode, destoff, len); 669 ret = ret ? ret : wb_ret; 670 /* 671 * Truncate page cache pages so that future reads will see the cloned 672 * data immediately and not the previous data. 673 */ 674 truncate_inode_pages_range(&inode->i_data, 675 round_down(destoff, PAGE_SIZE), 676 round_up(destoff + len, PAGE_SIZE) - 1); 677 678 return ret; 679 } 680 681 static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, 682 struct file *file_out, loff_t pos_out, 683 loff_t *len, unsigned int remap_flags) 684 { 685 struct inode *inode_in = file_inode(file_in); 686 struct inode *inode_out = file_inode(file_out); 687 u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize; 688 bool same_inode = inode_out == inode_in; 689 u64 wb_len; 690 int ret; 691 692 if (!(remap_flags & REMAP_FILE_DEDUP)) { 693 struct btrfs_root *root_out = BTRFS_I(inode_out)->root; 694 695 if (btrfs_root_readonly(root_out)) 696 return -EROFS; 697 698 if (file_in->f_path.mnt != file_out->f_path.mnt || 699 inode_in->i_sb != inode_out->i_sb) 700 return -EXDEV; 701 } 702 703 /* Don't make the dst file partly checksummed */ 704 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) != 705 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) { 706 return -EINVAL; 707 } 708 709 /* 710 * Now that the inodes are locked, we need to start writeback ourselves 711 * and can not rely on the writeback from the VFS's generic helper 712 * generic_remap_file_range_prep() because: 713 * 714 * 1) For compression we must call filemap_fdatawrite_range() range 715 * twice (btrfs_fdatawrite_range() does it for us), and the generic 716 * helper only calls it once; 717 * 718 * 2) filemap_fdatawrite_range(), called by the generic helper only 719 * waits for the writeback to complete, i.e. for IO to be done, and 720 * not for the ordered extents to complete. We need to wait for them 721 * to complete so that new file extent items are in the fs tree. 722 */ 723 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP)) 724 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs); 725 else 726 wb_len = ALIGN(*len, bs); 727 728 /* 729 * Since we don't lock ranges, wait for ongoing lockless dio writes (as 730 * any in progress could create its ordered extents after we wait for 731 * existing ordered extents below). 732 */ 733 inode_dio_wait(inode_in); 734 if (!same_inode) 735 inode_dio_wait(inode_out); 736 737 /* 738 * Workaround to make sure NOCOW buffered write reach disk as NOCOW. 739 * 740 * Btrfs' back references do not have a block level granularity, they 741 * work at the whole extent level. 742 * NOCOW buffered write without data space reserved may not be able 743 * to fall back to CoW due to lack of data space, thus could cause 744 * data loss. 745 * 746 * Here we take a shortcut by flushing the whole inode, so that all 747 * nocow write should reach disk as nocow before we increase the 748 * reference of the extent. We could do better by only flushing NOCOW 749 * data, but that needs extra accounting. 750 * 751 * Also we don't need to check ASYNC_EXTENT, as async extent will be 752 * CoWed anyway, not affecting nocow part. 753 */ 754 ret = filemap_flush(inode_in->i_mapping); 755 if (ret < 0) 756 return ret; 757 758 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), 759 wb_len); 760 if (ret < 0) 761 return ret; 762 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), 763 wb_len); 764 if (ret < 0) 765 return ret; 766 767 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, 768 len, remap_flags); 769 } 770 771 loff_t btrfs_remap_file_range(struct file *src_file, loff_t off, 772 struct file *dst_file, loff_t destoff, loff_t len, 773 unsigned int remap_flags) 774 { 775 struct inode *src_inode = file_inode(src_file); 776 struct inode *dst_inode = file_inode(dst_file); 777 bool same_inode = dst_inode == src_inode; 778 int ret; 779 780 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) 781 return -EINVAL; 782 783 if (same_inode) 784 inode_lock(src_inode); 785 else 786 lock_two_nondirectories(src_inode, dst_inode); 787 788 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff, 789 &len, remap_flags); 790 if (ret < 0 || len == 0) 791 goto out_unlock; 792 793 if (remap_flags & REMAP_FILE_DEDUP) 794 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff); 795 else 796 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff); 797 798 out_unlock: 799 if (same_inode) 800 inode_unlock(src_inode); 801 else 802 unlock_two_nondirectories(src_inode, dst_inode); 803 804 return ret < 0 ? ret : len; 805 } 806