1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/blkdev.h> 4 #include <linux/iversion.h> 5 #include "compression.h" 6 #include "ctree.h" 7 #include "delalloc-space.h" 8 #include "reflink.h" 9 #include "transaction.h" 10 11 #define BTRFS_MAX_DEDUPE_LEN SZ_16M 12 13 static int clone_finish_inode_update(struct btrfs_trans_handle *trans, 14 struct inode *inode, 15 u64 endoff, 16 const u64 destoff, 17 const u64 olen, 18 int no_time_update) 19 { 20 struct btrfs_root *root = BTRFS_I(inode)->root; 21 int ret; 22 23 inode_inc_iversion(inode); 24 if (!no_time_update) 25 inode->i_mtime = inode->i_ctime = current_time(inode); 26 /* 27 * We round up to the block size at eof when determining which 28 * extents to clone above, but shouldn't round up the file size. 29 */ 30 if (endoff > destoff + olen) 31 endoff = destoff + olen; 32 if (endoff > inode->i_size) { 33 i_size_write(inode, endoff); 34 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 35 } 36 37 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 38 if (ret) { 39 btrfs_abort_transaction(trans, ret); 40 btrfs_end_transaction(trans); 41 goto out; 42 } 43 ret = btrfs_end_transaction(trans); 44 out: 45 return ret; 46 } 47 48 static int copy_inline_to_page(struct btrfs_inode *inode, 49 const u64 file_offset, 50 char *inline_data, 51 const u64 size, 52 const u64 datal, 53 const u8 comp_type) 54 { 55 const u64 block_size = btrfs_inode_sectorsize(inode); 56 const u64 range_end = file_offset + block_size - 1; 57 const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0); 58 char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0); 59 struct extent_changeset *data_reserved = NULL; 60 struct page *page = NULL; 61 struct address_space *mapping = inode->vfs_inode.i_mapping; 62 int ret; 63 64 ASSERT(IS_ALIGNED(file_offset, block_size)); 65 66 /* 67 * We have flushed and locked the ranges of the source and destination 68 * inodes, we also have locked the inodes, so we are safe to do a 69 * reservation here. Also we must not do the reservation while holding 70 * a transaction open, otherwise we would deadlock. 71 */ 72 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset, 73 block_size); 74 if (ret) 75 goto out; 76 77 page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT, 78 btrfs_alloc_write_mask(mapping)); 79 if (!page) { 80 ret = -ENOMEM; 81 goto out_unlock; 82 } 83 84 ret = set_page_extent_mapped(page); 85 if (ret < 0) 86 goto out_unlock; 87 88 clear_extent_bit(&inode->io_tree, file_offset, range_end, 89 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 90 0, 0, NULL); 91 ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL); 92 if (ret) 93 goto out_unlock; 94 95 /* 96 * After dirtying the page our caller will need to start a transaction, 97 * and if we are low on metadata free space, that can cause flushing of 98 * delalloc for all inodes in order to get metadata space released. 99 * However we are holding the range locked for the whole duration of 100 * the clone/dedupe operation, so we may deadlock if that happens and no 101 * other task releases enough space. So mark this inode as not being 102 * possible to flush to avoid such deadlock. We will clear that flag 103 * when we finish cloning all extents, since a transaction is started 104 * after finding each extent to clone. 105 */ 106 set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags); 107 108 if (comp_type == BTRFS_COMPRESS_NONE) { 109 memcpy_to_page(page, 0, data_start, datal); 110 flush_dcache_page(page); 111 } else { 112 ret = btrfs_decompress(comp_type, data_start, page, 0, 113 inline_size, datal); 114 if (ret) 115 goto out_unlock; 116 flush_dcache_page(page); 117 } 118 119 /* 120 * If our inline data is smaller then the block/page size, then the 121 * remaining of the block/page is equivalent to zeroes. We had something 122 * like the following done: 123 * 124 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file 125 * $ sync # (or fsync) 126 * $ xfs_io -c "falloc 0 4K" file 127 * $ xfs_io -c "pwrite -S 0xcd 4K 4K" 128 * 129 * So what's in the range [500, 4095] corresponds to zeroes. 130 */ 131 if (datal < block_size) { 132 memzero_page(page, datal, block_size - datal); 133 flush_dcache_page(page); 134 } 135 136 SetPageUptodate(page); 137 ClearPageChecked(page); 138 set_page_dirty(page); 139 out_unlock: 140 if (page) { 141 unlock_page(page); 142 put_page(page); 143 } 144 if (ret) 145 btrfs_delalloc_release_space(inode, data_reserved, file_offset, 146 block_size, true); 147 btrfs_delalloc_release_extents(inode, block_size); 148 out: 149 extent_changeset_free(data_reserved); 150 151 return ret; 152 } 153 154 /* 155 * Deal with cloning of inline extents. We try to copy the inline extent from 156 * the source inode to destination inode when possible. When not possible we 157 * copy the inline extent's data into the respective page of the inode. 158 */ 159 static int clone_copy_inline_extent(struct inode *dst, 160 struct btrfs_path *path, 161 struct btrfs_key *new_key, 162 const u64 drop_start, 163 const u64 datal, 164 const u64 size, 165 const u8 comp_type, 166 char *inline_data, 167 struct btrfs_trans_handle **trans_out) 168 { 169 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); 170 struct btrfs_root *root = BTRFS_I(dst)->root; 171 const u64 aligned_end = ALIGN(new_key->offset + datal, 172 fs_info->sectorsize); 173 struct btrfs_trans_handle *trans = NULL; 174 struct btrfs_drop_extents_args drop_args = { 0 }; 175 int ret; 176 struct btrfs_key key; 177 178 if (new_key->offset > 0) { 179 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 180 inline_data, size, datal, comp_type); 181 goto out; 182 } 183 184 key.objectid = btrfs_ino(BTRFS_I(dst)); 185 key.type = BTRFS_EXTENT_DATA_KEY; 186 key.offset = 0; 187 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 188 if (ret < 0) { 189 return ret; 190 } else if (ret > 0) { 191 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 192 ret = btrfs_next_leaf(root, path); 193 if (ret < 0) 194 return ret; 195 else if (ret > 0) 196 goto copy_inline_extent; 197 } 198 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 199 if (key.objectid == btrfs_ino(BTRFS_I(dst)) && 200 key.type == BTRFS_EXTENT_DATA_KEY) { 201 /* 202 * There's an implicit hole at file offset 0, copy the 203 * inline extent's data to the page. 204 */ 205 ASSERT(key.offset > 0); 206 goto copy_to_page; 207 } 208 } else if (i_size_read(dst) <= datal) { 209 struct btrfs_file_extent_item *ei; 210 211 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 212 struct btrfs_file_extent_item); 213 /* 214 * If it's an inline extent replace it with the source inline 215 * extent, otherwise copy the source inline extent data into 216 * the respective page at the destination inode. 217 */ 218 if (btrfs_file_extent_type(path->nodes[0], ei) == 219 BTRFS_FILE_EXTENT_INLINE) 220 goto copy_inline_extent; 221 222 goto copy_to_page; 223 } 224 225 copy_inline_extent: 226 /* 227 * We have no extent items, or we have an extent at offset 0 which may 228 * or may not be inlined. All these cases are dealt the same way. 229 */ 230 if (i_size_read(dst) > datal) { 231 /* 232 * At the destination offset 0 we have either a hole, a regular 233 * extent or an inline extent larger then the one we want to 234 * clone. Deal with all these cases by copying the inline extent 235 * data into the respective page at the destination inode. 236 */ 237 goto copy_to_page; 238 } 239 240 /* 241 * Release path before starting a new transaction so we don't hold locks 242 * that would confuse lockdep. 243 */ 244 btrfs_release_path(path); 245 /* 246 * If we end up here it means were copy the inline extent into a leaf 247 * of the destination inode. We know we will drop or adjust at most one 248 * extent item in the destination root. 249 * 250 * 1 unit - adjusting old extent (we may have to split it) 251 * 1 unit - add new extent 252 * 1 unit - inode update 253 */ 254 trans = btrfs_start_transaction(root, 3); 255 if (IS_ERR(trans)) { 256 ret = PTR_ERR(trans); 257 trans = NULL; 258 goto out; 259 } 260 drop_args.path = path; 261 drop_args.start = drop_start; 262 drop_args.end = aligned_end; 263 drop_args.drop_cache = true; 264 ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args); 265 if (ret) 266 goto out; 267 ret = btrfs_insert_empty_item(trans, root, path, new_key, size); 268 if (ret) 269 goto out; 270 271 write_extent_buffer(path->nodes[0], inline_data, 272 btrfs_item_ptr_offset(path->nodes[0], 273 path->slots[0]), 274 size); 275 btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found); 276 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags); 277 ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end); 278 out: 279 if (!ret && !trans) { 280 /* 281 * No transaction here means we copied the inline extent into a 282 * page of the destination inode. 283 * 284 * 1 unit to update inode item 285 */ 286 trans = btrfs_start_transaction(root, 1); 287 if (IS_ERR(trans)) { 288 ret = PTR_ERR(trans); 289 trans = NULL; 290 } 291 } 292 if (ret && trans) { 293 btrfs_abort_transaction(trans, ret); 294 btrfs_end_transaction(trans); 295 } 296 if (!ret) 297 *trans_out = trans; 298 299 return ret; 300 301 copy_to_page: 302 /* 303 * Release our path because we don't need it anymore and also because 304 * copy_inline_to_page() needs to reserve data and metadata, which may 305 * need to flush delalloc when we are low on available space and 306 * therefore cause a deadlock if writeback of an inline extent needs to 307 * write to the same leaf or an ordered extent completion needs to write 308 * to the same leaf. 309 */ 310 btrfs_release_path(path); 311 312 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 313 inline_data, size, datal, comp_type); 314 goto out; 315 } 316 317 /** 318 * btrfs_clone() - clone a range from inode file to another 319 * 320 * @src: Inode to clone from 321 * @inode: Inode to clone to 322 * @off: Offset within source to start clone from 323 * @olen: Original length, passed by user, of range to clone 324 * @olen_aligned: Block-aligned value of olen 325 * @destoff: Offset within @inode to start clone 326 * @no_time_update: Whether to update mtime/ctime on the target inode 327 */ 328 static int btrfs_clone(struct inode *src, struct inode *inode, 329 const u64 off, const u64 olen, const u64 olen_aligned, 330 const u64 destoff, int no_time_update) 331 { 332 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 333 struct btrfs_path *path = NULL; 334 struct extent_buffer *leaf; 335 struct btrfs_trans_handle *trans; 336 char *buf = NULL; 337 struct btrfs_key key; 338 u32 nritems; 339 int slot; 340 int ret; 341 const u64 len = olen_aligned; 342 u64 last_dest_end = destoff; 343 344 ret = -ENOMEM; 345 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); 346 if (!buf) 347 return ret; 348 349 path = btrfs_alloc_path(); 350 if (!path) { 351 kvfree(buf); 352 return ret; 353 } 354 355 path->reada = READA_FORWARD; 356 /* Clone data */ 357 key.objectid = btrfs_ino(BTRFS_I(src)); 358 key.type = BTRFS_EXTENT_DATA_KEY; 359 key.offset = off; 360 361 while (1) { 362 u64 next_key_min_offset = key.offset + 1; 363 struct btrfs_file_extent_item *extent; 364 u64 extent_gen; 365 int type; 366 u32 size; 367 struct btrfs_key new_key; 368 u64 disko = 0, diskl = 0; 369 u64 datao = 0, datal = 0; 370 u8 comp; 371 u64 drop_start; 372 373 /* Note the key will change type as we walk through the tree */ 374 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path, 375 0, 0); 376 if (ret < 0) 377 goto out; 378 /* 379 * First search, if no extent item that starts at offset off was 380 * found but the previous item is an extent item, it's possible 381 * it might overlap our target range, therefore process it. 382 */ 383 if (key.offset == off && ret > 0 && path->slots[0] > 0) { 384 btrfs_item_key_to_cpu(path->nodes[0], &key, 385 path->slots[0] - 1); 386 if (key.type == BTRFS_EXTENT_DATA_KEY) 387 path->slots[0]--; 388 } 389 390 nritems = btrfs_header_nritems(path->nodes[0]); 391 process_slot: 392 if (path->slots[0] >= nritems) { 393 ret = btrfs_next_leaf(BTRFS_I(src)->root, path); 394 if (ret < 0) 395 goto out; 396 if (ret > 0) 397 break; 398 nritems = btrfs_header_nritems(path->nodes[0]); 399 } 400 leaf = path->nodes[0]; 401 slot = path->slots[0]; 402 403 btrfs_item_key_to_cpu(leaf, &key, slot); 404 if (key.type > BTRFS_EXTENT_DATA_KEY || 405 key.objectid != btrfs_ino(BTRFS_I(src))) 406 break; 407 408 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); 409 410 extent = btrfs_item_ptr(leaf, slot, 411 struct btrfs_file_extent_item); 412 extent_gen = btrfs_file_extent_generation(leaf, extent); 413 comp = btrfs_file_extent_compression(leaf, extent); 414 type = btrfs_file_extent_type(leaf, extent); 415 if (type == BTRFS_FILE_EXTENT_REG || 416 type == BTRFS_FILE_EXTENT_PREALLOC) { 417 disko = btrfs_file_extent_disk_bytenr(leaf, extent); 418 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); 419 datao = btrfs_file_extent_offset(leaf, extent); 420 datal = btrfs_file_extent_num_bytes(leaf, extent); 421 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 422 /* Take upper bound, may be compressed */ 423 datal = btrfs_file_extent_ram_bytes(leaf, extent); 424 } 425 426 /* 427 * The first search might have left us at an extent item that 428 * ends before our target range's start, can happen if we have 429 * holes and NO_HOLES feature enabled. 430 */ 431 if (key.offset + datal <= off) { 432 path->slots[0]++; 433 goto process_slot; 434 } else if (key.offset >= off + len) { 435 break; 436 } 437 next_key_min_offset = key.offset + datal; 438 size = btrfs_item_size_nr(leaf, slot); 439 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), 440 size); 441 442 btrfs_release_path(path); 443 444 memcpy(&new_key, &key, sizeof(new_key)); 445 new_key.objectid = btrfs_ino(BTRFS_I(inode)); 446 if (off <= key.offset) 447 new_key.offset = key.offset + destoff - off; 448 else 449 new_key.offset = destoff; 450 451 /* 452 * Deal with a hole that doesn't have an extent item that 453 * represents it (NO_HOLES feature enabled). 454 * This hole is either in the middle of the cloning range or at 455 * the beginning (fully overlaps it or partially overlaps it). 456 */ 457 if (new_key.offset != last_dest_end) 458 drop_start = last_dest_end; 459 else 460 drop_start = new_key.offset; 461 462 if (type == BTRFS_FILE_EXTENT_REG || 463 type == BTRFS_FILE_EXTENT_PREALLOC) { 464 struct btrfs_replace_extent_info clone_info; 465 466 /* 467 * a | --- range to clone ---| b 468 * | ------------- extent ------------- | 469 */ 470 471 /* Subtract range b */ 472 if (key.offset + datal > off + len) 473 datal = off + len - key.offset; 474 475 /* Subtract range a */ 476 if (off > key.offset) { 477 datao += off - key.offset; 478 datal -= off - key.offset; 479 } 480 481 clone_info.disk_offset = disko; 482 clone_info.disk_len = diskl; 483 clone_info.data_offset = datao; 484 clone_info.data_len = datal; 485 clone_info.file_offset = new_key.offset; 486 clone_info.extent_buf = buf; 487 clone_info.is_new_extent = false; 488 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, 489 drop_start, new_key.offset + datal - 1, 490 &clone_info, &trans); 491 if (ret) 492 goto out; 493 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 494 /* 495 * Inline extents always have to start at file offset 0 496 * and can never be bigger then the sector size. We can 497 * never clone only parts of an inline extent, since all 498 * reflink operations must start at a sector size aligned 499 * offset, and the length must be aligned too or end at 500 * the i_size (which implies the whole inlined data). 501 */ 502 ASSERT(key.offset == 0); 503 ASSERT(datal <= fs_info->sectorsize); 504 if (key.offset != 0 || datal > fs_info->sectorsize) 505 return -EUCLEAN; 506 507 ret = clone_copy_inline_extent(inode, path, &new_key, 508 drop_start, datal, size, 509 comp, buf, &trans); 510 if (ret) 511 goto out; 512 } 513 514 btrfs_release_path(path); 515 516 /* 517 * If this is a new extent update the last_reflink_trans of both 518 * inodes. This is used by fsync to make sure it does not log 519 * multiple checksum items with overlapping ranges. For older 520 * extents we don't need to do it since inode logging skips the 521 * checksums for older extents. Also ignore holes and inline 522 * extents because they don't have checksums in the csum tree. 523 */ 524 if (extent_gen == trans->transid && disko > 0) { 525 BTRFS_I(src)->last_reflink_trans = trans->transid; 526 BTRFS_I(inode)->last_reflink_trans = trans->transid; 527 } 528 529 last_dest_end = ALIGN(new_key.offset + datal, 530 fs_info->sectorsize); 531 ret = clone_finish_inode_update(trans, inode, last_dest_end, 532 destoff, olen, no_time_update); 533 if (ret) 534 goto out; 535 if (new_key.offset + datal >= destoff + len) 536 break; 537 538 btrfs_release_path(path); 539 key.offset = next_key_min_offset; 540 541 if (fatal_signal_pending(current)) { 542 ret = -EINTR; 543 goto out; 544 } 545 546 cond_resched(); 547 } 548 ret = 0; 549 550 if (last_dest_end < destoff + len) { 551 /* 552 * We have an implicit hole that fully or partially overlaps our 553 * cloning range at its end. This means that we either have the 554 * NO_HOLES feature enabled or the implicit hole happened due to 555 * mixing buffered and direct IO writes against this file. 556 */ 557 btrfs_release_path(path); 558 559 /* 560 * When using NO_HOLES and we are cloning a range that covers 561 * only a hole (no extents) into a range beyond the current 562 * i_size, punching a hole in the target range will not create 563 * an extent map defining a hole, because the range starts at or 564 * beyond current i_size. If the file previously had an i_size 565 * greater than the new i_size set by this clone operation, we 566 * need to make sure the next fsync is a full fsync, so that it 567 * detects and logs a hole covering a range from the current 568 * i_size to the new i_size. If the clone range covers extents, 569 * besides a hole, then we know the full sync flag was already 570 * set by previous calls to btrfs_replace_file_extents() that 571 * replaced file extent items. 572 */ 573 if (last_dest_end >= i_size_read(inode)) 574 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 575 &BTRFS_I(inode)->runtime_flags); 576 577 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, 578 last_dest_end, destoff + len - 1, NULL, &trans); 579 if (ret) 580 goto out; 581 582 ret = clone_finish_inode_update(trans, inode, destoff + len, 583 destoff, olen, no_time_update); 584 } 585 586 out: 587 btrfs_free_path(path); 588 kvfree(buf); 589 clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags); 590 591 return ret; 592 } 593 594 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, 595 struct inode *inode2, u64 loff2, u64 len) 596 { 597 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); 598 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); 599 } 600 601 static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, 602 struct inode *inode2, u64 loff2, u64 len) 603 { 604 if (inode1 < inode2) { 605 swap(inode1, inode2); 606 swap(loff1, loff2); 607 } else if (inode1 == inode2 && loff2 < loff1) { 608 swap(loff1, loff2); 609 } 610 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); 611 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); 612 } 613 614 static void btrfs_double_mmap_lock(struct inode *inode1, struct inode *inode2) 615 { 616 if (inode1 < inode2) 617 swap(inode1, inode2); 618 down_write(&BTRFS_I(inode1)->i_mmap_lock); 619 down_write_nested(&BTRFS_I(inode2)->i_mmap_lock, SINGLE_DEPTH_NESTING); 620 } 621 622 static void btrfs_double_mmap_unlock(struct inode *inode1, struct inode *inode2) 623 { 624 up_write(&BTRFS_I(inode1)->i_mmap_lock); 625 up_write(&BTRFS_I(inode2)->i_mmap_lock); 626 } 627 628 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, 629 struct inode *dst, u64 dst_loff) 630 { 631 const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; 632 int ret; 633 634 /* 635 * Lock destination range to serialize with concurrent readpages() and 636 * source range to serialize with relocation. 637 */ 638 btrfs_double_extent_lock(src, loff, dst, dst_loff, len); 639 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1); 640 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); 641 642 return ret; 643 } 644 645 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, 646 struct inode *dst, u64 dst_loff) 647 { 648 int ret; 649 u64 i, tail_len, chunk_count; 650 struct btrfs_root *root_dst = BTRFS_I(dst)->root; 651 652 spin_lock(&root_dst->root_item_lock); 653 if (root_dst->send_in_progress) { 654 btrfs_warn_rl(root_dst->fs_info, 655 "cannot deduplicate to root %llu while send operations are using it (%d in progress)", 656 root_dst->root_key.objectid, 657 root_dst->send_in_progress); 658 spin_unlock(&root_dst->root_item_lock); 659 return -EAGAIN; 660 } 661 root_dst->dedupe_in_progress++; 662 spin_unlock(&root_dst->root_item_lock); 663 664 tail_len = olen % BTRFS_MAX_DEDUPE_LEN; 665 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN); 666 667 for (i = 0; i < chunk_count; i++) { 668 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, 669 dst, dst_loff); 670 if (ret) 671 goto out; 672 673 loff += BTRFS_MAX_DEDUPE_LEN; 674 dst_loff += BTRFS_MAX_DEDUPE_LEN; 675 } 676 677 if (tail_len > 0) 678 ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff); 679 out: 680 spin_lock(&root_dst->root_item_lock); 681 root_dst->dedupe_in_progress--; 682 spin_unlock(&root_dst->root_item_lock); 683 684 return ret; 685 } 686 687 static noinline int btrfs_clone_files(struct file *file, struct file *file_src, 688 u64 off, u64 olen, u64 destoff) 689 { 690 struct inode *inode = file_inode(file); 691 struct inode *src = file_inode(file_src); 692 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 693 int ret; 694 int wb_ret; 695 u64 len = olen; 696 u64 bs = fs_info->sb->s_blocksize; 697 698 /* 699 * VFS's generic_remap_file_range_prep() protects us from cloning the 700 * eof block into the middle of a file, which would result in corruption 701 * if the file size is not blocksize aligned. So we don't need to check 702 * for that case here. 703 */ 704 if (off + len == src->i_size) 705 len = ALIGN(src->i_size, bs) - off; 706 707 if (destoff > inode->i_size) { 708 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); 709 710 ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff); 711 if (ret) 712 return ret; 713 /* 714 * We may have truncated the last block if the inode's size is 715 * not sector size aligned, so we need to wait for writeback to 716 * complete before proceeding further, otherwise we can race 717 * with cloning and attempt to increment a reference to an 718 * extent that no longer exists (writeback completed right after 719 * we found the previous extent covering eof and before we 720 * attempted to increment its reference count). 721 */ 722 ret = btrfs_wait_ordered_range(inode, wb_start, 723 destoff - wb_start); 724 if (ret) 725 return ret; 726 } 727 728 /* 729 * Lock destination range to serialize with concurrent readpages() and 730 * source range to serialize with relocation. 731 */ 732 btrfs_double_extent_lock(src, off, inode, destoff, len); 733 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); 734 btrfs_double_extent_unlock(src, off, inode, destoff, len); 735 736 /* 737 * We may have copied an inline extent into a page of the destination 738 * range, so wait for writeback to complete before truncating pages 739 * from the page cache. This is a rare case. 740 */ 741 wb_ret = btrfs_wait_ordered_range(inode, destoff, len); 742 ret = ret ? ret : wb_ret; 743 /* 744 * Truncate page cache pages so that future reads will see the cloned 745 * data immediately and not the previous data. 746 */ 747 truncate_inode_pages_range(&inode->i_data, 748 round_down(destoff, PAGE_SIZE), 749 round_up(destoff + len, PAGE_SIZE) - 1); 750 751 return ret; 752 } 753 754 static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, 755 struct file *file_out, loff_t pos_out, 756 loff_t *len, unsigned int remap_flags) 757 { 758 struct inode *inode_in = file_inode(file_in); 759 struct inode *inode_out = file_inode(file_out); 760 u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize; 761 bool same_inode = inode_out == inode_in; 762 u64 wb_len; 763 int ret; 764 765 if (!(remap_flags & REMAP_FILE_DEDUP)) { 766 struct btrfs_root *root_out = BTRFS_I(inode_out)->root; 767 768 if (btrfs_root_readonly(root_out)) 769 return -EROFS; 770 771 if (file_in->f_path.mnt != file_out->f_path.mnt || 772 inode_in->i_sb != inode_out->i_sb) 773 return -EXDEV; 774 } 775 776 /* Don't make the dst file partly checksummed */ 777 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) != 778 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) { 779 return -EINVAL; 780 } 781 782 /* 783 * Now that the inodes are locked, we need to start writeback ourselves 784 * and can not rely on the writeback from the VFS's generic helper 785 * generic_remap_file_range_prep() because: 786 * 787 * 1) For compression we must call filemap_fdatawrite_range() range 788 * twice (btrfs_fdatawrite_range() does it for us), and the generic 789 * helper only calls it once; 790 * 791 * 2) filemap_fdatawrite_range(), called by the generic helper only 792 * waits for the writeback to complete, i.e. for IO to be done, and 793 * not for the ordered extents to complete. We need to wait for them 794 * to complete so that new file extent items are in the fs tree. 795 */ 796 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP)) 797 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs); 798 else 799 wb_len = ALIGN(*len, bs); 800 801 /* 802 * Since we don't lock ranges, wait for ongoing lockless dio writes (as 803 * any in progress could create its ordered extents after we wait for 804 * existing ordered extents below). 805 */ 806 inode_dio_wait(inode_in); 807 if (!same_inode) 808 inode_dio_wait(inode_out); 809 810 /* 811 * Workaround to make sure NOCOW buffered write reach disk as NOCOW. 812 * 813 * Btrfs' back references do not have a block level granularity, they 814 * work at the whole extent level. 815 * NOCOW buffered write without data space reserved may not be able 816 * to fall back to CoW due to lack of data space, thus could cause 817 * data loss. 818 * 819 * Here we take a shortcut by flushing the whole inode, so that all 820 * nocow write should reach disk as nocow before we increase the 821 * reference of the extent. We could do better by only flushing NOCOW 822 * data, but that needs extra accounting. 823 * 824 * Also we don't need to check ASYNC_EXTENT, as async extent will be 825 * CoWed anyway, not affecting nocow part. 826 */ 827 ret = filemap_flush(inode_in->i_mapping); 828 if (ret < 0) 829 return ret; 830 831 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), 832 wb_len); 833 if (ret < 0) 834 return ret; 835 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), 836 wb_len); 837 if (ret < 0) 838 return ret; 839 840 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, 841 len, remap_flags); 842 } 843 844 static bool file_sync_write(const struct file *file) 845 { 846 if (file->f_flags & (__O_SYNC | O_DSYNC)) 847 return true; 848 if (IS_SYNC(file_inode(file))) 849 return true; 850 851 return false; 852 } 853 854 loff_t btrfs_remap_file_range(struct file *src_file, loff_t off, 855 struct file *dst_file, loff_t destoff, loff_t len, 856 unsigned int remap_flags) 857 { 858 struct inode *src_inode = file_inode(src_file); 859 struct inode *dst_inode = file_inode(dst_file); 860 bool same_inode = dst_inode == src_inode; 861 int ret; 862 863 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) 864 return -EINVAL; 865 866 if (same_inode) { 867 btrfs_inode_lock(src_inode, BTRFS_ILOCK_MMAP); 868 } else { 869 lock_two_nondirectories(src_inode, dst_inode); 870 btrfs_double_mmap_lock(src_inode, dst_inode); 871 } 872 873 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff, 874 &len, remap_flags); 875 if (ret < 0 || len == 0) 876 goto out_unlock; 877 878 if (remap_flags & REMAP_FILE_DEDUP) 879 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff); 880 else 881 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff); 882 883 out_unlock: 884 if (same_inode) { 885 btrfs_inode_unlock(src_inode, BTRFS_ILOCK_MMAP); 886 } else { 887 btrfs_double_mmap_unlock(src_inode, dst_inode); 888 unlock_two_nondirectories(src_inode, dst_inode); 889 } 890 891 /* 892 * If either the source or the destination file was opened with O_SYNC, 893 * O_DSYNC or has the S_SYNC attribute, fsync both the destination and 894 * source files/ranges, so that after a successful return (0) followed 895 * by a power failure results in the reflinked data to be readable from 896 * both files/ranges. 897 */ 898 if (ret == 0 && len > 0 && 899 (file_sync_write(src_file) || file_sync_write(dst_file))) { 900 ret = btrfs_sync_file(src_file, off, off + len - 1, 0); 901 if (ret == 0) 902 ret = btrfs_sync_file(dst_file, destoff, 903 destoff + len - 1, 0); 904 } 905 906 return ret < 0 ? ret : len; 907 } 908