1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/blkdev.h> 4 #include <linux/iversion.h> 5 #include "ctree.h" 6 #include "fs.h" 7 #include "messages.h" 8 #include "compression.h" 9 #include "delalloc-space.h" 10 #include "disk-io.h" 11 #include "reflink.h" 12 #include "transaction.h" 13 #include "subpage.h" 14 #include "accessors.h" 15 #include "file-item.h" 16 #include "file.h" 17 #include "super.h" 18 19 #define BTRFS_MAX_DEDUPE_LEN SZ_16M 20 21 static int clone_finish_inode_update(struct btrfs_trans_handle *trans, 22 struct inode *inode, 23 u64 endoff, 24 const u64 destoff, 25 const u64 olen, 26 int no_time_update) 27 { 28 struct btrfs_root *root = BTRFS_I(inode)->root; 29 int ret; 30 31 inode_inc_iversion(inode); 32 if (!no_time_update) { 33 inode->i_mtime = inode_set_ctime_current(inode); 34 } 35 /* 36 * We round up to the block size at eof when determining which 37 * extents to clone above, but shouldn't round up the file size. 38 */ 39 if (endoff > destoff + olen) 40 endoff = destoff + olen; 41 if (endoff > inode->i_size) { 42 i_size_write(inode, endoff); 43 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 44 } 45 46 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 47 if (ret) { 48 btrfs_abort_transaction(trans, ret); 49 btrfs_end_transaction(trans); 50 goto out; 51 } 52 ret = btrfs_end_transaction(trans); 53 out: 54 return ret; 55 } 56 57 static int copy_inline_to_page(struct btrfs_inode *inode, 58 const u64 file_offset, 59 char *inline_data, 60 const u64 size, 61 const u64 datal, 62 const u8 comp_type) 63 { 64 struct btrfs_fs_info *fs_info = inode->root->fs_info; 65 const u32 block_size = fs_info->sectorsize; 66 const u64 range_end = file_offset + block_size - 1; 67 const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0); 68 char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0); 69 struct extent_changeset *data_reserved = NULL; 70 struct page *page = NULL; 71 struct address_space *mapping = inode->vfs_inode.i_mapping; 72 int ret; 73 74 ASSERT(IS_ALIGNED(file_offset, block_size)); 75 76 /* 77 * We have flushed and locked the ranges of the source and destination 78 * inodes, we also have locked the inodes, so we are safe to do a 79 * reservation here. Also we must not do the reservation while holding 80 * a transaction open, otherwise we would deadlock. 81 */ 82 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset, 83 block_size); 84 if (ret) 85 goto out; 86 87 page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT, 88 btrfs_alloc_write_mask(mapping)); 89 if (!page) { 90 ret = -ENOMEM; 91 goto out_unlock; 92 } 93 94 ret = set_page_extent_mapped(page); 95 if (ret < 0) 96 goto out_unlock; 97 98 clear_extent_bit(&inode->io_tree, file_offset, range_end, 99 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 100 NULL); 101 ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL); 102 if (ret) 103 goto out_unlock; 104 105 /* 106 * After dirtying the page our caller will need to start a transaction, 107 * and if we are low on metadata free space, that can cause flushing of 108 * delalloc for all inodes in order to get metadata space released. 109 * However we are holding the range locked for the whole duration of 110 * the clone/dedupe operation, so we may deadlock if that happens and no 111 * other task releases enough space. So mark this inode as not being 112 * possible to flush to avoid such deadlock. We will clear that flag 113 * when we finish cloning all extents, since a transaction is started 114 * after finding each extent to clone. 115 */ 116 set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags); 117 118 if (comp_type == BTRFS_COMPRESS_NONE) { 119 memcpy_to_page(page, offset_in_page(file_offset), data_start, 120 datal); 121 } else { 122 ret = btrfs_decompress(comp_type, data_start, page, 123 offset_in_page(file_offset), 124 inline_size, datal); 125 if (ret) 126 goto out_unlock; 127 flush_dcache_page(page); 128 } 129 130 /* 131 * If our inline data is smaller then the block/page size, then the 132 * remaining of the block/page is equivalent to zeroes. We had something 133 * like the following done: 134 * 135 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file 136 * $ sync # (or fsync) 137 * $ xfs_io -c "falloc 0 4K" file 138 * $ xfs_io -c "pwrite -S 0xcd 4K 4K" 139 * 140 * So what's in the range [500, 4095] corresponds to zeroes. 141 */ 142 if (datal < block_size) 143 memzero_page(page, datal, block_size - datal); 144 145 btrfs_page_set_uptodate(fs_info, page, file_offset, block_size); 146 btrfs_page_clear_checked(fs_info, page, file_offset, block_size); 147 btrfs_page_set_dirty(fs_info, page, file_offset, block_size); 148 out_unlock: 149 if (page) { 150 unlock_page(page); 151 put_page(page); 152 } 153 if (ret) 154 btrfs_delalloc_release_space(inode, data_reserved, file_offset, 155 block_size, true); 156 btrfs_delalloc_release_extents(inode, block_size); 157 out: 158 extent_changeset_free(data_reserved); 159 160 return ret; 161 } 162 163 /* 164 * Deal with cloning of inline extents. We try to copy the inline extent from 165 * the source inode to destination inode when possible. When not possible we 166 * copy the inline extent's data into the respective page of the inode. 167 */ 168 static int clone_copy_inline_extent(struct inode *dst, 169 struct btrfs_path *path, 170 struct btrfs_key *new_key, 171 const u64 drop_start, 172 const u64 datal, 173 const u64 size, 174 const u8 comp_type, 175 char *inline_data, 176 struct btrfs_trans_handle **trans_out) 177 { 178 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); 179 struct btrfs_root *root = BTRFS_I(dst)->root; 180 const u64 aligned_end = ALIGN(new_key->offset + datal, 181 fs_info->sectorsize); 182 struct btrfs_trans_handle *trans = NULL; 183 struct btrfs_drop_extents_args drop_args = { 0 }; 184 int ret; 185 struct btrfs_key key; 186 187 if (new_key->offset > 0) { 188 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 189 inline_data, size, datal, comp_type); 190 goto out; 191 } 192 193 key.objectid = btrfs_ino(BTRFS_I(dst)); 194 key.type = BTRFS_EXTENT_DATA_KEY; 195 key.offset = 0; 196 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 197 if (ret < 0) { 198 return ret; 199 } else if (ret > 0) { 200 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 201 ret = btrfs_next_leaf(root, path); 202 if (ret < 0) 203 return ret; 204 else if (ret > 0) 205 goto copy_inline_extent; 206 } 207 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 208 if (key.objectid == btrfs_ino(BTRFS_I(dst)) && 209 key.type == BTRFS_EXTENT_DATA_KEY) { 210 /* 211 * There's an implicit hole at file offset 0, copy the 212 * inline extent's data to the page. 213 */ 214 ASSERT(key.offset > 0); 215 goto copy_to_page; 216 } 217 } else if (i_size_read(dst) <= datal) { 218 struct btrfs_file_extent_item *ei; 219 220 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 221 struct btrfs_file_extent_item); 222 /* 223 * If it's an inline extent replace it with the source inline 224 * extent, otherwise copy the source inline extent data into 225 * the respective page at the destination inode. 226 */ 227 if (btrfs_file_extent_type(path->nodes[0], ei) == 228 BTRFS_FILE_EXTENT_INLINE) 229 goto copy_inline_extent; 230 231 goto copy_to_page; 232 } 233 234 copy_inline_extent: 235 /* 236 * We have no extent items, or we have an extent at offset 0 which may 237 * or may not be inlined. All these cases are dealt the same way. 238 */ 239 if (i_size_read(dst) > datal) { 240 /* 241 * At the destination offset 0 we have either a hole, a regular 242 * extent or an inline extent larger then the one we want to 243 * clone. Deal with all these cases by copying the inline extent 244 * data into the respective page at the destination inode. 245 */ 246 goto copy_to_page; 247 } 248 249 /* 250 * Release path before starting a new transaction so we don't hold locks 251 * that would confuse lockdep. 252 */ 253 btrfs_release_path(path); 254 /* 255 * If we end up here it means were copy the inline extent into a leaf 256 * of the destination inode. We know we will drop or adjust at most one 257 * extent item in the destination root. 258 * 259 * 1 unit - adjusting old extent (we may have to split it) 260 * 1 unit - add new extent 261 * 1 unit - inode update 262 */ 263 trans = btrfs_start_transaction(root, 3); 264 if (IS_ERR(trans)) { 265 ret = PTR_ERR(trans); 266 trans = NULL; 267 goto out; 268 } 269 drop_args.path = path; 270 drop_args.start = drop_start; 271 drop_args.end = aligned_end; 272 drop_args.drop_cache = true; 273 ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args); 274 if (ret) 275 goto out; 276 ret = btrfs_insert_empty_item(trans, root, path, new_key, size); 277 if (ret) 278 goto out; 279 280 write_extent_buffer(path->nodes[0], inline_data, 281 btrfs_item_ptr_offset(path->nodes[0], 282 path->slots[0]), 283 size); 284 btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found); 285 btrfs_set_inode_full_sync(BTRFS_I(dst)); 286 ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end); 287 out: 288 if (!ret && !trans) { 289 /* 290 * No transaction here means we copied the inline extent into a 291 * page of the destination inode. 292 * 293 * 1 unit to update inode item 294 */ 295 trans = btrfs_start_transaction(root, 1); 296 if (IS_ERR(trans)) { 297 ret = PTR_ERR(trans); 298 trans = NULL; 299 } 300 } 301 if (ret && trans) { 302 btrfs_abort_transaction(trans, ret); 303 btrfs_end_transaction(trans); 304 } 305 if (!ret) 306 *trans_out = trans; 307 308 return ret; 309 310 copy_to_page: 311 /* 312 * Release our path because we don't need it anymore and also because 313 * copy_inline_to_page() needs to reserve data and metadata, which may 314 * need to flush delalloc when we are low on available space and 315 * therefore cause a deadlock if writeback of an inline extent needs to 316 * write to the same leaf or an ordered extent completion needs to write 317 * to the same leaf. 318 */ 319 btrfs_release_path(path); 320 321 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 322 inline_data, size, datal, comp_type); 323 goto out; 324 } 325 326 /* 327 * Clone a range from inode file to another. 328 * 329 * @src: Inode to clone from 330 * @inode: Inode to clone to 331 * @off: Offset within source to start clone from 332 * @olen: Original length, passed by user, of range to clone 333 * @olen_aligned: Block-aligned value of olen 334 * @destoff: Offset within @inode to start clone 335 * @no_time_update: Whether to update mtime/ctime on the target inode 336 */ 337 static int btrfs_clone(struct inode *src, struct inode *inode, 338 const u64 off, const u64 olen, const u64 olen_aligned, 339 const u64 destoff, int no_time_update) 340 { 341 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 342 struct btrfs_path *path = NULL; 343 struct extent_buffer *leaf; 344 struct btrfs_trans_handle *trans; 345 char *buf = NULL; 346 struct btrfs_key key; 347 u32 nritems; 348 int slot; 349 int ret; 350 const u64 len = olen_aligned; 351 u64 last_dest_end = destoff; 352 u64 prev_extent_end = off; 353 354 ret = -ENOMEM; 355 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); 356 if (!buf) 357 return ret; 358 359 path = btrfs_alloc_path(); 360 if (!path) { 361 kvfree(buf); 362 return ret; 363 } 364 365 path->reada = READA_FORWARD; 366 /* Clone data */ 367 key.objectid = btrfs_ino(BTRFS_I(src)); 368 key.type = BTRFS_EXTENT_DATA_KEY; 369 key.offset = off; 370 371 while (1) { 372 struct btrfs_file_extent_item *extent; 373 u64 extent_gen; 374 int type; 375 u32 size; 376 struct btrfs_key new_key; 377 u64 disko = 0, diskl = 0; 378 u64 datao = 0, datal = 0; 379 u8 comp; 380 u64 drop_start; 381 382 /* Note the key will change type as we walk through the tree */ 383 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path, 384 0, 0); 385 if (ret < 0) 386 goto out; 387 /* 388 * First search, if no extent item that starts at offset off was 389 * found but the previous item is an extent item, it's possible 390 * it might overlap our target range, therefore process it. 391 */ 392 if (key.offset == off && ret > 0 && path->slots[0] > 0) { 393 btrfs_item_key_to_cpu(path->nodes[0], &key, 394 path->slots[0] - 1); 395 if (key.type == BTRFS_EXTENT_DATA_KEY) 396 path->slots[0]--; 397 } 398 399 nritems = btrfs_header_nritems(path->nodes[0]); 400 process_slot: 401 if (path->slots[0] >= nritems) { 402 ret = btrfs_next_leaf(BTRFS_I(src)->root, path); 403 if (ret < 0) 404 goto out; 405 if (ret > 0) 406 break; 407 nritems = btrfs_header_nritems(path->nodes[0]); 408 } 409 leaf = path->nodes[0]; 410 slot = path->slots[0]; 411 412 btrfs_item_key_to_cpu(leaf, &key, slot); 413 if (key.type > BTRFS_EXTENT_DATA_KEY || 414 key.objectid != btrfs_ino(BTRFS_I(src))) 415 break; 416 417 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); 418 419 extent = btrfs_item_ptr(leaf, slot, 420 struct btrfs_file_extent_item); 421 extent_gen = btrfs_file_extent_generation(leaf, extent); 422 comp = btrfs_file_extent_compression(leaf, extent); 423 type = btrfs_file_extent_type(leaf, extent); 424 if (type == BTRFS_FILE_EXTENT_REG || 425 type == BTRFS_FILE_EXTENT_PREALLOC) { 426 disko = btrfs_file_extent_disk_bytenr(leaf, extent); 427 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); 428 datao = btrfs_file_extent_offset(leaf, extent); 429 datal = btrfs_file_extent_num_bytes(leaf, extent); 430 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 431 /* Take upper bound, may be compressed */ 432 datal = btrfs_file_extent_ram_bytes(leaf, extent); 433 } 434 435 /* 436 * The first search might have left us at an extent item that 437 * ends before our target range's start, can happen if we have 438 * holes and NO_HOLES feature enabled. 439 * 440 * Subsequent searches may leave us on a file range we have 441 * processed before - this happens due to a race with ordered 442 * extent completion for a file range that is outside our source 443 * range, but that range was part of a file extent item that 444 * also covered a leading part of our source range. 445 */ 446 if (key.offset + datal <= prev_extent_end) { 447 path->slots[0]++; 448 goto process_slot; 449 } else if (key.offset >= off + len) { 450 break; 451 } 452 453 prev_extent_end = key.offset + datal; 454 size = btrfs_item_size(leaf, slot); 455 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), 456 size); 457 458 btrfs_release_path(path); 459 460 memcpy(&new_key, &key, sizeof(new_key)); 461 new_key.objectid = btrfs_ino(BTRFS_I(inode)); 462 if (off <= key.offset) 463 new_key.offset = key.offset + destoff - off; 464 else 465 new_key.offset = destoff; 466 467 /* 468 * Deal with a hole that doesn't have an extent item that 469 * represents it (NO_HOLES feature enabled). 470 * This hole is either in the middle of the cloning range or at 471 * the beginning (fully overlaps it or partially overlaps it). 472 */ 473 if (new_key.offset != last_dest_end) 474 drop_start = last_dest_end; 475 else 476 drop_start = new_key.offset; 477 478 if (type == BTRFS_FILE_EXTENT_REG || 479 type == BTRFS_FILE_EXTENT_PREALLOC) { 480 struct btrfs_replace_extent_info clone_info; 481 482 /* 483 * a | --- range to clone ---| b 484 * | ------------- extent ------------- | 485 */ 486 487 /* Subtract range b */ 488 if (key.offset + datal > off + len) 489 datal = off + len - key.offset; 490 491 /* Subtract range a */ 492 if (off > key.offset) { 493 datao += off - key.offset; 494 datal -= off - key.offset; 495 } 496 497 clone_info.disk_offset = disko; 498 clone_info.disk_len = diskl; 499 clone_info.data_offset = datao; 500 clone_info.data_len = datal; 501 clone_info.file_offset = new_key.offset; 502 clone_info.extent_buf = buf; 503 clone_info.is_new_extent = false; 504 clone_info.update_times = !no_time_update; 505 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, 506 drop_start, new_key.offset + datal - 1, 507 &clone_info, &trans); 508 if (ret) 509 goto out; 510 } else { 511 ASSERT(type == BTRFS_FILE_EXTENT_INLINE); 512 /* 513 * Inline extents always have to start at file offset 0 514 * and can never be bigger then the sector size. We can 515 * never clone only parts of an inline extent, since all 516 * reflink operations must start at a sector size aligned 517 * offset, and the length must be aligned too or end at 518 * the i_size (which implies the whole inlined data). 519 */ 520 ASSERT(key.offset == 0); 521 ASSERT(datal <= fs_info->sectorsize); 522 if (WARN_ON(type != BTRFS_FILE_EXTENT_INLINE) || 523 WARN_ON(key.offset != 0) || 524 WARN_ON(datal > fs_info->sectorsize)) { 525 ret = -EUCLEAN; 526 goto out; 527 } 528 529 ret = clone_copy_inline_extent(inode, path, &new_key, 530 drop_start, datal, size, 531 comp, buf, &trans); 532 if (ret) 533 goto out; 534 } 535 536 btrfs_release_path(path); 537 538 /* 539 * Whenever we share an extent we update the last_reflink_trans 540 * of each inode to the current transaction. This is needed to 541 * make sure fsync does not log multiple checksum items with 542 * overlapping ranges (because some extent items might refer 543 * only to sections of the original extent). For the destination 544 * inode we do this regardless of the generation of the extents 545 * or even if they are inline extents or explicit holes, to make 546 * sure a full fsync does not skip them. For the source inode, 547 * we only need to update last_reflink_trans in case it's a new 548 * extent that is not a hole or an inline extent, to deal with 549 * the checksums problem on fsync. 550 */ 551 if (extent_gen == trans->transid && disko > 0) 552 BTRFS_I(src)->last_reflink_trans = trans->transid; 553 554 BTRFS_I(inode)->last_reflink_trans = trans->transid; 555 556 last_dest_end = ALIGN(new_key.offset + datal, 557 fs_info->sectorsize); 558 ret = clone_finish_inode_update(trans, inode, last_dest_end, 559 destoff, olen, no_time_update); 560 if (ret) 561 goto out; 562 if (new_key.offset + datal >= destoff + len) 563 break; 564 565 btrfs_release_path(path); 566 key.offset = prev_extent_end; 567 568 if (fatal_signal_pending(current)) { 569 ret = -EINTR; 570 goto out; 571 } 572 573 cond_resched(); 574 } 575 ret = 0; 576 577 if (last_dest_end < destoff + len) { 578 /* 579 * We have an implicit hole that fully or partially overlaps our 580 * cloning range at its end. This means that we either have the 581 * NO_HOLES feature enabled or the implicit hole happened due to 582 * mixing buffered and direct IO writes against this file. 583 */ 584 btrfs_release_path(path); 585 586 /* 587 * When using NO_HOLES and we are cloning a range that covers 588 * only a hole (no extents) into a range beyond the current 589 * i_size, punching a hole in the target range will not create 590 * an extent map defining a hole, because the range starts at or 591 * beyond current i_size. If the file previously had an i_size 592 * greater than the new i_size set by this clone operation, we 593 * need to make sure the next fsync is a full fsync, so that it 594 * detects and logs a hole covering a range from the current 595 * i_size to the new i_size. If the clone range covers extents, 596 * besides a hole, then we know the full sync flag was already 597 * set by previous calls to btrfs_replace_file_extents() that 598 * replaced file extent items. 599 */ 600 if (last_dest_end >= i_size_read(inode)) 601 btrfs_set_inode_full_sync(BTRFS_I(inode)); 602 603 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, 604 last_dest_end, destoff + len - 1, NULL, &trans); 605 if (ret) 606 goto out; 607 608 ret = clone_finish_inode_update(trans, inode, destoff + len, 609 destoff, olen, no_time_update); 610 } 611 612 out: 613 btrfs_free_path(path); 614 kvfree(buf); 615 clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags); 616 617 return ret; 618 } 619 620 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, 621 struct inode *inode2, u64 loff2, u64 len) 622 { 623 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1, NULL); 624 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1, NULL); 625 } 626 627 static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, 628 struct inode *inode2, u64 loff2, u64 len) 629 { 630 u64 range1_end = loff1 + len - 1; 631 u64 range2_end = loff2 + len - 1; 632 633 if (inode1 < inode2) { 634 swap(inode1, inode2); 635 swap(loff1, loff2); 636 swap(range1_end, range2_end); 637 } else if (inode1 == inode2 && loff2 < loff1) { 638 swap(loff1, loff2); 639 swap(range1_end, range2_end); 640 } 641 642 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, range1_end, NULL); 643 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, range2_end, NULL); 644 645 btrfs_assert_inode_range_clean(BTRFS_I(inode1), loff1, range1_end); 646 btrfs_assert_inode_range_clean(BTRFS_I(inode2), loff2, range2_end); 647 } 648 649 static void btrfs_double_mmap_lock(struct inode *inode1, struct inode *inode2) 650 { 651 if (inode1 < inode2) 652 swap(inode1, inode2); 653 down_write(&BTRFS_I(inode1)->i_mmap_lock); 654 down_write_nested(&BTRFS_I(inode2)->i_mmap_lock, SINGLE_DEPTH_NESTING); 655 } 656 657 static void btrfs_double_mmap_unlock(struct inode *inode1, struct inode *inode2) 658 { 659 up_write(&BTRFS_I(inode1)->i_mmap_lock); 660 up_write(&BTRFS_I(inode2)->i_mmap_lock); 661 } 662 663 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, 664 struct inode *dst, u64 dst_loff) 665 { 666 struct btrfs_fs_info *fs_info = BTRFS_I(src)->root->fs_info; 667 const u64 bs = fs_info->sectorsize; 668 int ret; 669 670 /* 671 * Lock destination range to serialize with concurrent readahead() and 672 * source range to serialize with relocation. 673 */ 674 btrfs_double_extent_lock(src, loff, dst, dst_loff, len); 675 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1); 676 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); 677 678 btrfs_btree_balance_dirty(fs_info); 679 680 return ret; 681 } 682 683 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, 684 struct inode *dst, u64 dst_loff) 685 { 686 int ret = 0; 687 u64 i, tail_len, chunk_count; 688 struct btrfs_root *root_dst = BTRFS_I(dst)->root; 689 690 spin_lock(&root_dst->root_item_lock); 691 if (root_dst->send_in_progress) { 692 btrfs_warn_rl(root_dst->fs_info, 693 "cannot deduplicate to root %llu while send operations are using it (%d in progress)", 694 root_dst->root_key.objectid, 695 root_dst->send_in_progress); 696 spin_unlock(&root_dst->root_item_lock); 697 return -EAGAIN; 698 } 699 root_dst->dedupe_in_progress++; 700 spin_unlock(&root_dst->root_item_lock); 701 702 tail_len = olen % BTRFS_MAX_DEDUPE_LEN; 703 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN); 704 705 for (i = 0; i < chunk_count; i++) { 706 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, 707 dst, dst_loff); 708 if (ret) 709 goto out; 710 711 loff += BTRFS_MAX_DEDUPE_LEN; 712 dst_loff += BTRFS_MAX_DEDUPE_LEN; 713 } 714 715 if (tail_len > 0) 716 ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff); 717 out: 718 spin_lock(&root_dst->root_item_lock); 719 root_dst->dedupe_in_progress--; 720 spin_unlock(&root_dst->root_item_lock); 721 722 return ret; 723 } 724 725 static noinline int btrfs_clone_files(struct file *file, struct file *file_src, 726 u64 off, u64 olen, u64 destoff) 727 { 728 struct inode *inode = file_inode(file); 729 struct inode *src = file_inode(file_src); 730 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 731 int ret; 732 int wb_ret; 733 u64 len = olen; 734 u64 bs = fs_info->sectorsize; 735 736 /* 737 * VFS's generic_remap_file_range_prep() protects us from cloning the 738 * eof block into the middle of a file, which would result in corruption 739 * if the file size is not blocksize aligned. So we don't need to check 740 * for that case here. 741 */ 742 if (off + len == src->i_size) 743 len = ALIGN(src->i_size, bs) - off; 744 745 if (destoff > inode->i_size) { 746 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); 747 748 ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff); 749 if (ret) 750 return ret; 751 /* 752 * We may have truncated the last block if the inode's size is 753 * not sector size aligned, so we need to wait for writeback to 754 * complete before proceeding further, otherwise we can race 755 * with cloning and attempt to increment a reference to an 756 * extent that no longer exists (writeback completed right after 757 * we found the previous extent covering eof and before we 758 * attempted to increment its reference count). 759 */ 760 ret = btrfs_wait_ordered_range(inode, wb_start, 761 destoff - wb_start); 762 if (ret) 763 return ret; 764 } 765 766 /* 767 * Lock destination range to serialize with concurrent readahead() and 768 * source range to serialize with relocation. 769 */ 770 btrfs_double_extent_lock(src, off, inode, destoff, len); 771 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); 772 btrfs_double_extent_unlock(src, off, inode, destoff, len); 773 774 /* 775 * We may have copied an inline extent into a page of the destination 776 * range, so wait for writeback to complete before truncating pages 777 * from the page cache. This is a rare case. 778 */ 779 wb_ret = btrfs_wait_ordered_range(inode, destoff, len); 780 ret = ret ? ret : wb_ret; 781 /* 782 * Truncate page cache pages so that future reads will see the cloned 783 * data immediately and not the previous data. 784 */ 785 truncate_inode_pages_range(&inode->i_data, 786 round_down(destoff, PAGE_SIZE), 787 round_up(destoff + len, PAGE_SIZE) - 1); 788 789 btrfs_btree_balance_dirty(fs_info); 790 791 return ret; 792 } 793 794 static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, 795 struct file *file_out, loff_t pos_out, 796 loff_t *len, unsigned int remap_flags) 797 { 798 struct inode *inode_in = file_inode(file_in); 799 struct inode *inode_out = file_inode(file_out); 800 u64 bs = BTRFS_I(inode_out)->root->fs_info->sectorsize; 801 u64 wb_len; 802 int ret; 803 804 if (!(remap_flags & REMAP_FILE_DEDUP)) { 805 struct btrfs_root *root_out = BTRFS_I(inode_out)->root; 806 807 if (btrfs_root_readonly(root_out)) 808 return -EROFS; 809 810 ASSERT(inode_in->i_sb == inode_out->i_sb); 811 } 812 813 /* Don't make the dst file partly checksummed */ 814 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) != 815 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) { 816 return -EINVAL; 817 } 818 819 /* 820 * Now that the inodes are locked, we need to start writeback ourselves 821 * and can not rely on the writeback from the VFS's generic helper 822 * generic_remap_file_range_prep() because: 823 * 824 * 1) For compression we must call filemap_fdatawrite_range() range 825 * twice (btrfs_fdatawrite_range() does it for us), and the generic 826 * helper only calls it once; 827 * 828 * 2) filemap_fdatawrite_range(), called by the generic helper only 829 * waits for the writeback to complete, i.e. for IO to be done, and 830 * not for the ordered extents to complete. We need to wait for them 831 * to complete so that new file extent items are in the fs tree. 832 */ 833 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP)) 834 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs); 835 else 836 wb_len = ALIGN(*len, bs); 837 838 /* 839 * Workaround to make sure NOCOW buffered write reach disk as NOCOW. 840 * 841 * Btrfs' back references do not have a block level granularity, they 842 * work at the whole extent level. 843 * NOCOW buffered write without data space reserved may not be able 844 * to fall back to CoW due to lack of data space, thus could cause 845 * data loss. 846 * 847 * Here we take a shortcut by flushing the whole inode, so that all 848 * nocow write should reach disk as nocow before we increase the 849 * reference of the extent. We could do better by only flushing NOCOW 850 * data, but that needs extra accounting. 851 * 852 * Also we don't need to check ASYNC_EXTENT, as async extent will be 853 * CoWed anyway, not affecting nocow part. 854 */ 855 ret = filemap_flush(inode_in->i_mapping); 856 if (ret < 0) 857 return ret; 858 859 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), 860 wb_len); 861 if (ret < 0) 862 return ret; 863 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), 864 wb_len); 865 if (ret < 0) 866 return ret; 867 868 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, 869 len, remap_flags); 870 } 871 872 static bool file_sync_write(const struct file *file) 873 { 874 if (file->f_flags & (__O_SYNC | O_DSYNC)) 875 return true; 876 if (IS_SYNC(file_inode(file))) 877 return true; 878 879 return false; 880 } 881 882 loff_t btrfs_remap_file_range(struct file *src_file, loff_t off, 883 struct file *dst_file, loff_t destoff, loff_t len, 884 unsigned int remap_flags) 885 { 886 struct inode *src_inode = file_inode(src_file); 887 struct inode *dst_inode = file_inode(dst_file); 888 bool same_inode = dst_inode == src_inode; 889 int ret; 890 891 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) 892 return -EINVAL; 893 894 if (same_inode) { 895 btrfs_inode_lock(BTRFS_I(src_inode), BTRFS_ILOCK_MMAP); 896 } else { 897 lock_two_nondirectories(src_inode, dst_inode); 898 btrfs_double_mmap_lock(src_inode, dst_inode); 899 } 900 901 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff, 902 &len, remap_flags); 903 if (ret < 0 || len == 0) 904 goto out_unlock; 905 906 if (remap_flags & REMAP_FILE_DEDUP) 907 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff); 908 else 909 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff); 910 911 out_unlock: 912 if (same_inode) { 913 btrfs_inode_unlock(BTRFS_I(src_inode), BTRFS_ILOCK_MMAP); 914 } else { 915 btrfs_double_mmap_unlock(src_inode, dst_inode); 916 unlock_two_nondirectories(src_inode, dst_inode); 917 } 918 919 /* 920 * If either the source or the destination file was opened with O_SYNC, 921 * O_DSYNC or has the S_SYNC attribute, fsync both the destination and 922 * source files/ranges, so that after a successful return (0) followed 923 * by a power failure results in the reflinked data to be readable from 924 * both files/ranges. 925 */ 926 if (ret == 0 && len > 0 && 927 (file_sync_write(src_file) || file_sync_write(dst_file))) { 928 ret = btrfs_sync_file(src_file, off, off + len - 1, 0); 929 if (ret == 0) 930 ret = btrfs_sync_file(dst_file, destoff, 931 destoff + len - 1, 0); 932 } 933 934 return ret < 0 ? ret : len; 935 } 936