1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/blkdev.h> 4 #include <linux/iversion.h> 5 #include "ctree.h" 6 #include "fs.h" 7 #include "messages.h" 8 #include "compression.h" 9 #include "delalloc-space.h" 10 #include "disk-io.h" 11 #include "reflink.h" 12 #include "transaction.h" 13 #include "subpage.h" 14 15 #define BTRFS_MAX_DEDUPE_LEN SZ_16M 16 17 static int clone_finish_inode_update(struct btrfs_trans_handle *trans, 18 struct inode *inode, 19 u64 endoff, 20 const u64 destoff, 21 const u64 olen, 22 int no_time_update) 23 { 24 struct btrfs_root *root = BTRFS_I(inode)->root; 25 int ret; 26 27 inode_inc_iversion(inode); 28 if (!no_time_update) { 29 inode->i_mtime = current_time(inode); 30 inode->i_ctime = inode->i_mtime; 31 } 32 /* 33 * We round up to the block size at eof when determining which 34 * extents to clone above, but shouldn't round up the file size. 35 */ 36 if (endoff > destoff + olen) 37 endoff = destoff + olen; 38 if (endoff > inode->i_size) { 39 i_size_write(inode, endoff); 40 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 41 } 42 43 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 44 if (ret) { 45 btrfs_abort_transaction(trans, ret); 46 btrfs_end_transaction(trans); 47 goto out; 48 } 49 ret = btrfs_end_transaction(trans); 50 out: 51 return ret; 52 } 53 54 static int copy_inline_to_page(struct btrfs_inode *inode, 55 const u64 file_offset, 56 char *inline_data, 57 const u64 size, 58 const u64 datal, 59 const u8 comp_type) 60 { 61 struct btrfs_fs_info *fs_info = inode->root->fs_info; 62 const u32 block_size = fs_info->sectorsize; 63 const u64 range_end = file_offset + block_size - 1; 64 const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0); 65 char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0); 66 struct extent_changeset *data_reserved = NULL; 67 struct page *page = NULL; 68 struct address_space *mapping = inode->vfs_inode.i_mapping; 69 int ret; 70 71 ASSERT(IS_ALIGNED(file_offset, block_size)); 72 73 /* 74 * We have flushed and locked the ranges of the source and destination 75 * inodes, we also have locked the inodes, so we are safe to do a 76 * reservation here. Also we must not do the reservation while holding 77 * a transaction open, otherwise we would deadlock. 78 */ 79 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset, 80 block_size); 81 if (ret) 82 goto out; 83 84 page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT, 85 btrfs_alloc_write_mask(mapping)); 86 if (!page) { 87 ret = -ENOMEM; 88 goto out_unlock; 89 } 90 91 ret = set_page_extent_mapped(page); 92 if (ret < 0) 93 goto out_unlock; 94 95 clear_extent_bit(&inode->io_tree, file_offset, range_end, 96 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 97 NULL); 98 ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL); 99 if (ret) 100 goto out_unlock; 101 102 /* 103 * After dirtying the page our caller will need to start a transaction, 104 * and if we are low on metadata free space, that can cause flushing of 105 * delalloc for all inodes in order to get metadata space released. 106 * However we are holding the range locked for the whole duration of 107 * the clone/dedupe operation, so we may deadlock if that happens and no 108 * other task releases enough space. So mark this inode as not being 109 * possible to flush to avoid such deadlock. We will clear that flag 110 * when we finish cloning all extents, since a transaction is started 111 * after finding each extent to clone. 112 */ 113 set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags); 114 115 if (comp_type == BTRFS_COMPRESS_NONE) { 116 memcpy_to_page(page, offset_in_page(file_offset), data_start, 117 datal); 118 } else { 119 ret = btrfs_decompress(comp_type, data_start, page, 120 offset_in_page(file_offset), 121 inline_size, datal); 122 if (ret) 123 goto out_unlock; 124 flush_dcache_page(page); 125 } 126 127 /* 128 * If our inline data is smaller then the block/page size, then the 129 * remaining of the block/page is equivalent to zeroes. We had something 130 * like the following done: 131 * 132 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file 133 * $ sync # (or fsync) 134 * $ xfs_io -c "falloc 0 4K" file 135 * $ xfs_io -c "pwrite -S 0xcd 4K 4K" 136 * 137 * So what's in the range [500, 4095] corresponds to zeroes. 138 */ 139 if (datal < block_size) 140 memzero_page(page, datal, block_size - datal); 141 142 btrfs_page_set_uptodate(fs_info, page, file_offset, block_size); 143 btrfs_page_clear_checked(fs_info, page, file_offset, block_size); 144 btrfs_page_set_dirty(fs_info, page, file_offset, block_size); 145 out_unlock: 146 if (page) { 147 unlock_page(page); 148 put_page(page); 149 } 150 if (ret) 151 btrfs_delalloc_release_space(inode, data_reserved, file_offset, 152 block_size, true); 153 btrfs_delalloc_release_extents(inode, block_size); 154 out: 155 extent_changeset_free(data_reserved); 156 157 return ret; 158 } 159 160 /* 161 * Deal with cloning of inline extents. We try to copy the inline extent from 162 * the source inode to destination inode when possible. When not possible we 163 * copy the inline extent's data into the respective page of the inode. 164 */ 165 static int clone_copy_inline_extent(struct inode *dst, 166 struct btrfs_path *path, 167 struct btrfs_key *new_key, 168 const u64 drop_start, 169 const u64 datal, 170 const u64 size, 171 const u8 comp_type, 172 char *inline_data, 173 struct btrfs_trans_handle **trans_out) 174 { 175 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); 176 struct btrfs_root *root = BTRFS_I(dst)->root; 177 const u64 aligned_end = ALIGN(new_key->offset + datal, 178 fs_info->sectorsize); 179 struct btrfs_trans_handle *trans = NULL; 180 struct btrfs_drop_extents_args drop_args = { 0 }; 181 int ret; 182 struct btrfs_key key; 183 184 if (new_key->offset > 0) { 185 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 186 inline_data, size, datal, comp_type); 187 goto out; 188 } 189 190 key.objectid = btrfs_ino(BTRFS_I(dst)); 191 key.type = BTRFS_EXTENT_DATA_KEY; 192 key.offset = 0; 193 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 194 if (ret < 0) { 195 return ret; 196 } else if (ret > 0) { 197 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 198 ret = btrfs_next_leaf(root, path); 199 if (ret < 0) 200 return ret; 201 else if (ret > 0) 202 goto copy_inline_extent; 203 } 204 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 205 if (key.objectid == btrfs_ino(BTRFS_I(dst)) && 206 key.type == BTRFS_EXTENT_DATA_KEY) { 207 /* 208 * There's an implicit hole at file offset 0, copy the 209 * inline extent's data to the page. 210 */ 211 ASSERT(key.offset > 0); 212 goto copy_to_page; 213 } 214 } else if (i_size_read(dst) <= datal) { 215 struct btrfs_file_extent_item *ei; 216 217 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 218 struct btrfs_file_extent_item); 219 /* 220 * If it's an inline extent replace it with the source inline 221 * extent, otherwise copy the source inline extent data into 222 * the respective page at the destination inode. 223 */ 224 if (btrfs_file_extent_type(path->nodes[0], ei) == 225 BTRFS_FILE_EXTENT_INLINE) 226 goto copy_inline_extent; 227 228 goto copy_to_page; 229 } 230 231 copy_inline_extent: 232 /* 233 * We have no extent items, or we have an extent at offset 0 which may 234 * or may not be inlined. All these cases are dealt the same way. 235 */ 236 if (i_size_read(dst) > datal) { 237 /* 238 * At the destination offset 0 we have either a hole, a regular 239 * extent or an inline extent larger then the one we want to 240 * clone. Deal with all these cases by copying the inline extent 241 * data into the respective page at the destination inode. 242 */ 243 goto copy_to_page; 244 } 245 246 /* 247 * Release path before starting a new transaction so we don't hold locks 248 * that would confuse lockdep. 249 */ 250 btrfs_release_path(path); 251 /* 252 * If we end up here it means were copy the inline extent into a leaf 253 * of the destination inode. We know we will drop or adjust at most one 254 * extent item in the destination root. 255 * 256 * 1 unit - adjusting old extent (we may have to split it) 257 * 1 unit - add new extent 258 * 1 unit - inode update 259 */ 260 trans = btrfs_start_transaction(root, 3); 261 if (IS_ERR(trans)) { 262 ret = PTR_ERR(trans); 263 trans = NULL; 264 goto out; 265 } 266 drop_args.path = path; 267 drop_args.start = drop_start; 268 drop_args.end = aligned_end; 269 drop_args.drop_cache = true; 270 ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args); 271 if (ret) 272 goto out; 273 ret = btrfs_insert_empty_item(trans, root, path, new_key, size); 274 if (ret) 275 goto out; 276 277 write_extent_buffer(path->nodes[0], inline_data, 278 btrfs_item_ptr_offset(path->nodes[0], 279 path->slots[0]), 280 size); 281 btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found); 282 btrfs_set_inode_full_sync(BTRFS_I(dst)); 283 ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end); 284 out: 285 if (!ret && !trans) { 286 /* 287 * No transaction here means we copied the inline extent into a 288 * page of the destination inode. 289 * 290 * 1 unit to update inode item 291 */ 292 trans = btrfs_start_transaction(root, 1); 293 if (IS_ERR(trans)) { 294 ret = PTR_ERR(trans); 295 trans = NULL; 296 } 297 } 298 if (ret && trans) { 299 btrfs_abort_transaction(trans, ret); 300 btrfs_end_transaction(trans); 301 } 302 if (!ret) 303 *trans_out = trans; 304 305 return ret; 306 307 copy_to_page: 308 /* 309 * Release our path because we don't need it anymore and also because 310 * copy_inline_to_page() needs to reserve data and metadata, which may 311 * need to flush delalloc when we are low on available space and 312 * therefore cause a deadlock if writeback of an inline extent needs to 313 * write to the same leaf or an ordered extent completion needs to write 314 * to the same leaf. 315 */ 316 btrfs_release_path(path); 317 318 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 319 inline_data, size, datal, comp_type); 320 goto out; 321 } 322 323 /** 324 * btrfs_clone() - clone a range from inode file to another 325 * 326 * @src: Inode to clone from 327 * @inode: Inode to clone to 328 * @off: Offset within source to start clone from 329 * @olen: Original length, passed by user, of range to clone 330 * @olen_aligned: Block-aligned value of olen 331 * @destoff: Offset within @inode to start clone 332 * @no_time_update: Whether to update mtime/ctime on the target inode 333 */ 334 static int btrfs_clone(struct inode *src, struct inode *inode, 335 const u64 off, const u64 olen, const u64 olen_aligned, 336 const u64 destoff, int no_time_update) 337 { 338 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 339 struct btrfs_path *path = NULL; 340 struct extent_buffer *leaf; 341 struct btrfs_trans_handle *trans; 342 char *buf = NULL; 343 struct btrfs_key key; 344 u32 nritems; 345 int slot; 346 int ret; 347 const u64 len = olen_aligned; 348 u64 last_dest_end = destoff; 349 u64 prev_extent_end = off; 350 351 ret = -ENOMEM; 352 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); 353 if (!buf) 354 return ret; 355 356 path = btrfs_alloc_path(); 357 if (!path) { 358 kvfree(buf); 359 return ret; 360 } 361 362 path->reada = READA_FORWARD; 363 /* Clone data */ 364 key.objectid = btrfs_ino(BTRFS_I(src)); 365 key.type = BTRFS_EXTENT_DATA_KEY; 366 key.offset = off; 367 368 while (1) { 369 struct btrfs_file_extent_item *extent; 370 u64 extent_gen; 371 int type; 372 u32 size; 373 struct btrfs_key new_key; 374 u64 disko = 0, diskl = 0; 375 u64 datao = 0, datal = 0; 376 u8 comp; 377 u64 drop_start; 378 379 /* Note the key will change type as we walk through the tree */ 380 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path, 381 0, 0); 382 if (ret < 0) 383 goto out; 384 /* 385 * First search, if no extent item that starts at offset off was 386 * found but the previous item is an extent item, it's possible 387 * it might overlap our target range, therefore process it. 388 */ 389 if (key.offset == off && ret > 0 && path->slots[0] > 0) { 390 btrfs_item_key_to_cpu(path->nodes[0], &key, 391 path->slots[0] - 1); 392 if (key.type == BTRFS_EXTENT_DATA_KEY) 393 path->slots[0]--; 394 } 395 396 nritems = btrfs_header_nritems(path->nodes[0]); 397 process_slot: 398 if (path->slots[0] >= nritems) { 399 ret = btrfs_next_leaf(BTRFS_I(src)->root, path); 400 if (ret < 0) 401 goto out; 402 if (ret > 0) 403 break; 404 nritems = btrfs_header_nritems(path->nodes[0]); 405 } 406 leaf = path->nodes[0]; 407 slot = path->slots[0]; 408 409 btrfs_item_key_to_cpu(leaf, &key, slot); 410 if (key.type > BTRFS_EXTENT_DATA_KEY || 411 key.objectid != btrfs_ino(BTRFS_I(src))) 412 break; 413 414 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); 415 416 extent = btrfs_item_ptr(leaf, slot, 417 struct btrfs_file_extent_item); 418 extent_gen = btrfs_file_extent_generation(leaf, extent); 419 comp = btrfs_file_extent_compression(leaf, extent); 420 type = btrfs_file_extent_type(leaf, extent); 421 if (type == BTRFS_FILE_EXTENT_REG || 422 type == BTRFS_FILE_EXTENT_PREALLOC) { 423 disko = btrfs_file_extent_disk_bytenr(leaf, extent); 424 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); 425 datao = btrfs_file_extent_offset(leaf, extent); 426 datal = btrfs_file_extent_num_bytes(leaf, extent); 427 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 428 /* Take upper bound, may be compressed */ 429 datal = btrfs_file_extent_ram_bytes(leaf, extent); 430 } 431 432 /* 433 * The first search might have left us at an extent item that 434 * ends before our target range's start, can happen if we have 435 * holes and NO_HOLES feature enabled. 436 * 437 * Subsequent searches may leave us on a file range we have 438 * processed before - this happens due to a race with ordered 439 * extent completion for a file range that is outside our source 440 * range, but that range was part of a file extent item that 441 * also covered a leading part of our source range. 442 */ 443 if (key.offset + datal <= prev_extent_end) { 444 path->slots[0]++; 445 goto process_slot; 446 } else if (key.offset >= off + len) { 447 break; 448 } 449 450 prev_extent_end = key.offset + datal; 451 size = btrfs_item_size(leaf, slot); 452 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), 453 size); 454 455 btrfs_release_path(path); 456 457 memcpy(&new_key, &key, sizeof(new_key)); 458 new_key.objectid = btrfs_ino(BTRFS_I(inode)); 459 if (off <= key.offset) 460 new_key.offset = key.offset + destoff - off; 461 else 462 new_key.offset = destoff; 463 464 /* 465 * Deal with a hole that doesn't have an extent item that 466 * represents it (NO_HOLES feature enabled). 467 * This hole is either in the middle of the cloning range or at 468 * the beginning (fully overlaps it or partially overlaps it). 469 */ 470 if (new_key.offset != last_dest_end) 471 drop_start = last_dest_end; 472 else 473 drop_start = new_key.offset; 474 475 if (type == BTRFS_FILE_EXTENT_REG || 476 type == BTRFS_FILE_EXTENT_PREALLOC) { 477 struct btrfs_replace_extent_info clone_info; 478 479 /* 480 * a | --- range to clone ---| b 481 * | ------------- extent ------------- | 482 */ 483 484 /* Subtract range b */ 485 if (key.offset + datal > off + len) 486 datal = off + len - key.offset; 487 488 /* Subtract range a */ 489 if (off > key.offset) { 490 datao += off - key.offset; 491 datal -= off - key.offset; 492 } 493 494 clone_info.disk_offset = disko; 495 clone_info.disk_len = diskl; 496 clone_info.data_offset = datao; 497 clone_info.data_len = datal; 498 clone_info.file_offset = new_key.offset; 499 clone_info.extent_buf = buf; 500 clone_info.is_new_extent = false; 501 clone_info.update_times = !no_time_update; 502 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, 503 drop_start, new_key.offset + datal - 1, 504 &clone_info, &trans); 505 if (ret) 506 goto out; 507 } else { 508 ASSERT(type == BTRFS_FILE_EXTENT_INLINE); 509 /* 510 * Inline extents always have to start at file offset 0 511 * and can never be bigger then the sector size. We can 512 * never clone only parts of an inline extent, since all 513 * reflink operations must start at a sector size aligned 514 * offset, and the length must be aligned too or end at 515 * the i_size (which implies the whole inlined data). 516 */ 517 ASSERT(key.offset == 0); 518 ASSERT(datal <= fs_info->sectorsize); 519 if (WARN_ON(type != BTRFS_FILE_EXTENT_INLINE) || 520 WARN_ON(key.offset != 0) || 521 WARN_ON(datal > fs_info->sectorsize)) { 522 ret = -EUCLEAN; 523 goto out; 524 } 525 526 ret = clone_copy_inline_extent(inode, path, &new_key, 527 drop_start, datal, size, 528 comp, buf, &trans); 529 if (ret) 530 goto out; 531 } 532 533 btrfs_release_path(path); 534 535 /* 536 * Whenever we share an extent we update the last_reflink_trans 537 * of each inode to the current transaction. This is needed to 538 * make sure fsync does not log multiple checksum items with 539 * overlapping ranges (because some extent items might refer 540 * only to sections of the original extent). For the destination 541 * inode we do this regardless of the generation of the extents 542 * or even if they are inline extents or explicit holes, to make 543 * sure a full fsync does not skip them. For the source inode, 544 * we only need to update last_reflink_trans in case it's a new 545 * extent that is not a hole or an inline extent, to deal with 546 * the checksums problem on fsync. 547 */ 548 if (extent_gen == trans->transid && disko > 0) 549 BTRFS_I(src)->last_reflink_trans = trans->transid; 550 551 BTRFS_I(inode)->last_reflink_trans = trans->transid; 552 553 last_dest_end = ALIGN(new_key.offset + datal, 554 fs_info->sectorsize); 555 ret = clone_finish_inode_update(trans, inode, last_dest_end, 556 destoff, olen, no_time_update); 557 if (ret) 558 goto out; 559 if (new_key.offset + datal >= destoff + len) 560 break; 561 562 btrfs_release_path(path); 563 key.offset = prev_extent_end; 564 565 if (fatal_signal_pending(current)) { 566 ret = -EINTR; 567 goto out; 568 } 569 570 cond_resched(); 571 } 572 ret = 0; 573 574 if (last_dest_end < destoff + len) { 575 /* 576 * We have an implicit hole that fully or partially overlaps our 577 * cloning range at its end. This means that we either have the 578 * NO_HOLES feature enabled or the implicit hole happened due to 579 * mixing buffered and direct IO writes against this file. 580 */ 581 btrfs_release_path(path); 582 583 /* 584 * When using NO_HOLES and we are cloning a range that covers 585 * only a hole (no extents) into a range beyond the current 586 * i_size, punching a hole in the target range will not create 587 * an extent map defining a hole, because the range starts at or 588 * beyond current i_size. If the file previously had an i_size 589 * greater than the new i_size set by this clone operation, we 590 * need to make sure the next fsync is a full fsync, so that it 591 * detects and logs a hole covering a range from the current 592 * i_size to the new i_size. If the clone range covers extents, 593 * besides a hole, then we know the full sync flag was already 594 * set by previous calls to btrfs_replace_file_extents() that 595 * replaced file extent items. 596 */ 597 if (last_dest_end >= i_size_read(inode)) 598 btrfs_set_inode_full_sync(BTRFS_I(inode)); 599 600 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, 601 last_dest_end, destoff + len - 1, NULL, &trans); 602 if (ret) 603 goto out; 604 605 ret = clone_finish_inode_update(trans, inode, destoff + len, 606 destoff, olen, no_time_update); 607 } 608 609 out: 610 btrfs_free_path(path); 611 kvfree(buf); 612 clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags); 613 614 return ret; 615 } 616 617 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, 618 struct inode *inode2, u64 loff2, u64 len) 619 { 620 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1, NULL); 621 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1, NULL); 622 } 623 624 static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, 625 struct inode *inode2, u64 loff2, u64 len) 626 { 627 u64 range1_end = loff1 + len - 1; 628 u64 range2_end = loff2 + len - 1; 629 630 if (inode1 < inode2) { 631 swap(inode1, inode2); 632 swap(loff1, loff2); 633 swap(range1_end, range2_end); 634 } else if (inode1 == inode2 && loff2 < loff1) { 635 swap(loff1, loff2); 636 swap(range1_end, range2_end); 637 } 638 639 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, range1_end, NULL); 640 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, range2_end, NULL); 641 642 btrfs_assert_inode_range_clean(BTRFS_I(inode1), loff1, range1_end); 643 btrfs_assert_inode_range_clean(BTRFS_I(inode2), loff2, range2_end); 644 } 645 646 static void btrfs_double_mmap_lock(struct inode *inode1, struct inode *inode2) 647 { 648 if (inode1 < inode2) 649 swap(inode1, inode2); 650 down_write(&BTRFS_I(inode1)->i_mmap_lock); 651 down_write_nested(&BTRFS_I(inode2)->i_mmap_lock, SINGLE_DEPTH_NESTING); 652 } 653 654 static void btrfs_double_mmap_unlock(struct inode *inode1, struct inode *inode2) 655 { 656 up_write(&BTRFS_I(inode1)->i_mmap_lock); 657 up_write(&BTRFS_I(inode2)->i_mmap_lock); 658 } 659 660 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, 661 struct inode *dst, u64 dst_loff) 662 { 663 struct btrfs_fs_info *fs_info = BTRFS_I(src)->root->fs_info; 664 const u64 bs = fs_info->sb->s_blocksize; 665 int ret; 666 667 /* 668 * Lock destination range to serialize with concurrent readahead() and 669 * source range to serialize with relocation. 670 */ 671 btrfs_double_extent_lock(src, loff, dst, dst_loff, len); 672 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1); 673 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); 674 675 btrfs_btree_balance_dirty(fs_info); 676 677 return ret; 678 } 679 680 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, 681 struct inode *dst, u64 dst_loff) 682 { 683 int ret = 0; 684 u64 i, tail_len, chunk_count; 685 struct btrfs_root *root_dst = BTRFS_I(dst)->root; 686 687 spin_lock(&root_dst->root_item_lock); 688 if (root_dst->send_in_progress) { 689 btrfs_warn_rl(root_dst->fs_info, 690 "cannot deduplicate to root %llu while send operations are using it (%d in progress)", 691 root_dst->root_key.objectid, 692 root_dst->send_in_progress); 693 spin_unlock(&root_dst->root_item_lock); 694 return -EAGAIN; 695 } 696 root_dst->dedupe_in_progress++; 697 spin_unlock(&root_dst->root_item_lock); 698 699 tail_len = olen % BTRFS_MAX_DEDUPE_LEN; 700 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN); 701 702 for (i = 0; i < chunk_count; i++) { 703 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, 704 dst, dst_loff); 705 if (ret) 706 goto out; 707 708 loff += BTRFS_MAX_DEDUPE_LEN; 709 dst_loff += BTRFS_MAX_DEDUPE_LEN; 710 } 711 712 if (tail_len > 0) 713 ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff); 714 out: 715 spin_lock(&root_dst->root_item_lock); 716 root_dst->dedupe_in_progress--; 717 spin_unlock(&root_dst->root_item_lock); 718 719 return ret; 720 } 721 722 static noinline int btrfs_clone_files(struct file *file, struct file *file_src, 723 u64 off, u64 olen, u64 destoff) 724 { 725 struct inode *inode = file_inode(file); 726 struct inode *src = file_inode(file_src); 727 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 728 int ret; 729 int wb_ret; 730 u64 len = olen; 731 u64 bs = fs_info->sb->s_blocksize; 732 733 /* 734 * VFS's generic_remap_file_range_prep() protects us from cloning the 735 * eof block into the middle of a file, which would result in corruption 736 * if the file size is not blocksize aligned. So we don't need to check 737 * for that case here. 738 */ 739 if (off + len == src->i_size) 740 len = ALIGN(src->i_size, bs) - off; 741 742 if (destoff > inode->i_size) { 743 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); 744 745 ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff); 746 if (ret) 747 return ret; 748 /* 749 * We may have truncated the last block if the inode's size is 750 * not sector size aligned, so we need to wait for writeback to 751 * complete before proceeding further, otherwise we can race 752 * with cloning and attempt to increment a reference to an 753 * extent that no longer exists (writeback completed right after 754 * we found the previous extent covering eof and before we 755 * attempted to increment its reference count). 756 */ 757 ret = btrfs_wait_ordered_range(inode, wb_start, 758 destoff - wb_start); 759 if (ret) 760 return ret; 761 } 762 763 /* 764 * Lock destination range to serialize with concurrent readahead() and 765 * source range to serialize with relocation. 766 */ 767 btrfs_double_extent_lock(src, off, inode, destoff, len); 768 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); 769 btrfs_double_extent_unlock(src, off, inode, destoff, len); 770 771 /* 772 * We may have copied an inline extent into a page of the destination 773 * range, so wait for writeback to complete before truncating pages 774 * from the page cache. This is a rare case. 775 */ 776 wb_ret = btrfs_wait_ordered_range(inode, destoff, len); 777 ret = ret ? ret : wb_ret; 778 /* 779 * Truncate page cache pages so that future reads will see the cloned 780 * data immediately and not the previous data. 781 */ 782 truncate_inode_pages_range(&inode->i_data, 783 round_down(destoff, PAGE_SIZE), 784 round_up(destoff + len, PAGE_SIZE) - 1); 785 786 btrfs_btree_balance_dirty(fs_info); 787 788 return ret; 789 } 790 791 static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, 792 struct file *file_out, loff_t pos_out, 793 loff_t *len, unsigned int remap_flags) 794 { 795 struct inode *inode_in = file_inode(file_in); 796 struct inode *inode_out = file_inode(file_out); 797 u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize; 798 u64 wb_len; 799 int ret; 800 801 if (!(remap_flags & REMAP_FILE_DEDUP)) { 802 struct btrfs_root *root_out = BTRFS_I(inode_out)->root; 803 804 if (btrfs_root_readonly(root_out)) 805 return -EROFS; 806 807 ASSERT(inode_in->i_sb == inode_out->i_sb); 808 } 809 810 /* Don't make the dst file partly checksummed */ 811 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) != 812 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) { 813 return -EINVAL; 814 } 815 816 /* 817 * Now that the inodes are locked, we need to start writeback ourselves 818 * and can not rely on the writeback from the VFS's generic helper 819 * generic_remap_file_range_prep() because: 820 * 821 * 1) For compression we must call filemap_fdatawrite_range() range 822 * twice (btrfs_fdatawrite_range() does it for us), and the generic 823 * helper only calls it once; 824 * 825 * 2) filemap_fdatawrite_range(), called by the generic helper only 826 * waits for the writeback to complete, i.e. for IO to be done, and 827 * not for the ordered extents to complete. We need to wait for them 828 * to complete so that new file extent items are in the fs tree. 829 */ 830 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP)) 831 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs); 832 else 833 wb_len = ALIGN(*len, bs); 834 835 /* 836 * Workaround to make sure NOCOW buffered write reach disk as NOCOW. 837 * 838 * Btrfs' back references do not have a block level granularity, they 839 * work at the whole extent level. 840 * NOCOW buffered write without data space reserved may not be able 841 * to fall back to CoW due to lack of data space, thus could cause 842 * data loss. 843 * 844 * Here we take a shortcut by flushing the whole inode, so that all 845 * nocow write should reach disk as nocow before we increase the 846 * reference of the extent. We could do better by only flushing NOCOW 847 * data, but that needs extra accounting. 848 * 849 * Also we don't need to check ASYNC_EXTENT, as async extent will be 850 * CoWed anyway, not affecting nocow part. 851 */ 852 ret = filemap_flush(inode_in->i_mapping); 853 if (ret < 0) 854 return ret; 855 856 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), 857 wb_len); 858 if (ret < 0) 859 return ret; 860 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), 861 wb_len); 862 if (ret < 0) 863 return ret; 864 865 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, 866 len, remap_flags); 867 } 868 869 static bool file_sync_write(const struct file *file) 870 { 871 if (file->f_flags & (__O_SYNC | O_DSYNC)) 872 return true; 873 if (IS_SYNC(file_inode(file))) 874 return true; 875 876 return false; 877 } 878 879 loff_t btrfs_remap_file_range(struct file *src_file, loff_t off, 880 struct file *dst_file, loff_t destoff, loff_t len, 881 unsigned int remap_flags) 882 { 883 struct inode *src_inode = file_inode(src_file); 884 struct inode *dst_inode = file_inode(dst_file); 885 bool same_inode = dst_inode == src_inode; 886 int ret; 887 888 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) 889 return -EINVAL; 890 891 if (same_inode) { 892 btrfs_inode_lock(src_inode, BTRFS_ILOCK_MMAP); 893 } else { 894 lock_two_nondirectories(src_inode, dst_inode); 895 btrfs_double_mmap_lock(src_inode, dst_inode); 896 } 897 898 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff, 899 &len, remap_flags); 900 if (ret < 0 || len == 0) 901 goto out_unlock; 902 903 if (remap_flags & REMAP_FILE_DEDUP) 904 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff); 905 else 906 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff); 907 908 out_unlock: 909 if (same_inode) { 910 btrfs_inode_unlock(src_inode, BTRFS_ILOCK_MMAP); 911 } else { 912 btrfs_double_mmap_unlock(src_inode, dst_inode); 913 unlock_two_nondirectories(src_inode, dst_inode); 914 } 915 916 /* 917 * If either the source or the destination file was opened with O_SYNC, 918 * O_DSYNC or has the S_SYNC attribute, fsync both the destination and 919 * source files/ranges, so that after a successful return (0) followed 920 * by a power failure results in the reflinked data to be readable from 921 * both files/ranges. 922 */ 923 if (ret == 0 && len > 0 && 924 (file_sync_write(src_file) || file_sync_write(dst_file))) { 925 ret = btrfs_sync_file(src_file, off, off + len - 1, 0); 926 if (ret == 0) 927 ret = btrfs_sync_file(dst_file, destoff, 928 destoff + len - 1, 0); 929 } 930 931 return ret < 0 ? ret : len; 932 } 933