1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/blkdev.h> 4 #include <linux/iversion.h> 5 #include "compression.h" 6 #include "ctree.h" 7 #include "delalloc-space.h" 8 #include "reflink.h" 9 #include "transaction.h" 10 #include "subpage.h" 11 12 #define BTRFS_MAX_DEDUPE_LEN SZ_16M 13 14 static int clone_finish_inode_update(struct btrfs_trans_handle *trans, 15 struct inode *inode, 16 u64 endoff, 17 const u64 destoff, 18 const u64 olen, 19 int no_time_update) 20 { 21 struct btrfs_root *root = BTRFS_I(inode)->root; 22 int ret; 23 24 inode_inc_iversion(inode); 25 if (!no_time_update) 26 inode->i_mtime = inode->i_ctime = current_time(inode); 27 /* 28 * We round up to the block size at eof when determining which 29 * extents to clone above, but shouldn't round up the file size. 30 */ 31 if (endoff > destoff + olen) 32 endoff = destoff + olen; 33 if (endoff > inode->i_size) { 34 i_size_write(inode, endoff); 35 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 36 } 37 38 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 39 if (ret) { 40 btrfs_abort_transaction(trans, ret); 41 btrfs_end_transaction(trans); 42 goto out; 43 } 44 ret = btrfs_end_transaction(trans); 45 out: 46 return ret; 47 } 48 49 static int copy_inline_to_page(struct btrfs_inode *inode, 50 const u64 file_offset, 51 char *inline_data, 52 const u64 size, 53 const u64 datal, 54 const u8 comp_type) 55 { 56 struct btrfs_fs_info *fs_info = inode->root->fs_info; 57 const u32 block_size = fs_info->sectorsize; 58 const u64 range_end = file_offset + block_size - 1; 59 const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0); 60 char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0); 61 struct extent_changeset *data_reserved = NULL; 62 struct page *page = NULL; 63 struct address_space *mapping = inode->vfs_inode.i_mapping; 64 int ret; 65 66 ASSERT(IS_ALIGNED(file_offset, block_size)); 67 68 /* 69 * We have flushed and locked the ranges of the source and destination 70 * inodes, we also have locked the inodes, so we are safe to do a 71 * reservation here. Also we must not do the reservation while holding 72 * a transaction open, otherwise we would deadlock. 73 */ 74 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset, 75 block_size); 76 if (ret) 77 goto out; 78 79 page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT, 80 btrfs_alloc_write_mask(mapping)); 81 if (!page) { 82 ret = -ENOMEM; 83 goto out_unlock; 84 } 85 86 ret = set_page_extent_mapped(page); 87 if (ret < 0) 88 goto out_unlock; 89 90 clear_extent_bit(&inode->io_tree, file_offset, range_end, 91 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 92 0, 0, NULL); 93 ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL); 94 if (ret) 95 goto out_unlock; 96 97 /* 98 * After dirtying the page our caller will need to start a transaction, 99 * and if we are low on metadata free space, that can cause flushing of 100 * delalloc for all inodes in order to get metadata space released. 101 * However we are holding the range locked for the whole duration of 102 * the clone/dedupe operation, so we may deadlock if that happens and no 103 * other task releases enough space. So mark this inode as not being 104 * possible to flush to avoid such deadlock. We will clear that flag 105 * when we finish cloning all extents, since a transaction is started 106 * after finding each extent to clone. 107 */ 108 set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags); 109 110 if (comp_type == BTRFS_COMPRESS_NONE) { 111 memcpy_to_page(page, offset_in_page(file_offset), data_start, 112 datal); 113 } else { 114 ret = btrfs_decompress(comp_type, data_start, page, 115 offset_in_page(file_offset), 116 inline_size, datal); 117 if (ret) 118 goto out_unlock; 119 flush_dcache_page(page); 120 } 121 122 /* 123 * If our inline data is smaller then the block/page size, then the 124 * remaining of the block/page is equivalent to zeroes. We had something 125 * like the following done: 126 * 127 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file 128 * $ sync # (or fsync) 129 * $ xfs_io -c "falloc 0 4K" file 130 * $ xfs_io -c "pwrite -S 0xcd 4K 4K" 131 * 132 * So what's in the range [500, 4095] corresponds to zeroes. 133 */ 134 if (datal < block_size) 135 memzero_page(page, datal, block_size - datal); 136 137 btrfs_page_set_uptodate(fs_info, page, file_offset, block_size); 138 btrfs_page_clear_checked(fs_info, page, file_offset, block_size); 139 btrfs_page_set_dirty(fs_info, page, file_offset, block_size); 140 out_unlock: 141 if (page) { 142 unlock_page(page); 143 put_page(page); 144 } 145 if (ret) 146 btrfs_delalloc_release_space(inode, data_reserved, file_offset, 147 block_size, true); 148 btrfs_delalloc_release_extents(inode, block_size); 149 out: 150 extent_changeset_free(data_reserved); 151 152 return ret; 153 } 154 155 /* 156 * Deal with cloning of inline extents. We try to copy the inline extent from 157 * the source inode to destination inode when possible. When not possible we 158 * copy the inline extent's data into the respective page of the inode. 159 */ 160 static int clone_copy_inline_extent(struct inode *dst, 161 struct btrfs_path *path, 162 struct btrfs_key *new_key, 163 const u64 drop_start, 164 const u64 datal, 165 const u64 size, 166 const u8 comp_type, 167 char *inline_data, 168 struct btrfs_trans_handle **trans_out) 169 { 170 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); 171 struct btrfs_root *root = BTRFS_I(dst)->root; 172 const u64 aligned_end = ALIGN(new_key->offset + datal, 173 fs_info->sectorsize); 174 struct btrfs_trans_handle *trans = NULL; 175 struct btrfs_drop_extents_args drop_args = { 0 }; 176 int ret; 177 struct btrfs_key key; 178 179 if (new_key->offset > 0) { 180 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 181 inline_data, size, datal, comp_type); 182 goto out; 183 } 184 185 key.objectid = btrfs_ino(BTRFS_I(dst)); 186 key.type = BTRFS_EXTENT_DATA_KEY; 187 key.offset = 0; 188 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 189 if (ret < 0) { 190 return ret; 191 } else if (ret > 0) { 192 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 193 ret = btrfs_next_leaf(root, path); 194 if (ret < 0) 195 return ret; 196 else if (ret > 0) 197 goto copy_inline_extent; 198 } 199 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 200 if (key.objectid == btrfs_ino(BTRFS_I(dst)) && 201 key.type == BTRFS_EXTENT_DATA_KEY) { 202 /* 203 * There's an implicit hole at file offset 0, copy the 204 * inline extent's data to the page. 205 */ 206 ASSERT(key.offset > 0); 207 goto copy_to_page; 208 } 209 } else if (i_size_read(dst) <= datal) { 210 struct btrfs_file_extent_item *ei; 211 212 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 213 struct btrfs_file_extent_item); 214 /* 215 * If it's an inline extent replace it with the source inline 216 * extent, otherwise copy the source inline extent data into 217 * the respective page at the destination inode. 218 */ 219 if (btrfs_file_extent_type(path->nodes[0], ei) == 220 BTRFS_FILE_EXTENT_INLINE) 221 goto copy_inline_extent; 222 223 goto copy_to_page; 224 } 225 226 copy_inline_extent: 227 /* 228 * We have no extent items, or we have an extent at offset 0 which may 229 * or may not be inlined. All these cases are dealt the same way. 230 */ 231 if (i_size_read(dst) > datal) { 232 /* 233 * At the destination offset 0 we have either a hole, a regular 234 * extent or an inline extent larger then the one we want to 235 * clone. Deal with all these cases by copying the inline extent 236 * data into the respective page at the destination inode. 237 */ 238 goto copy_to_page; 239 } 240 241 /* 242 * Release path before starting a new transaction so we don't hold locks 243 * that would confuse lockdep. 244 */ 245 btrfs_release_path(path); 246 /* 247 * If we end up here it means were copy the inline extent into a leaf 248 * of the destination inode. We know we will drop or adjust at most one 249 * extent item in the destination root. 250 * 251 * 1 unit - adjusting old extent (we may have to split it) 252 * 1 unit - add new extent 253 * 1 unit - inode update 254 */ 255 trans = btrfs_start_transaction(root, 3); 256 if (IS_ERR(trans)) { 257 ret = PTR_ERR(trans); 258 trans = NULL; 259 goto out; 260 } 261 drop_args.path = path; 262 drop_args.start = drop_start; 263 drop_args.end = aligned_end; 264 drop_args.drop_cache = true; 265 ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args); 266 if (ret) 267 goto out; 268 ret = btrfs_insert_empty_item(trans, root, path, new_key, size); 269 if (ret) 270 goto out; 271 272 write_extent_buffer(path->nodes[0], inline_data, 273 btrfs_item_ptr_offset(path->nodes[0], 274 path->slots[0]), 275 size); 276 btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found); 277 btrfs_set_inode_full_sync(BTRFS_I(dst)); 278 ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end); 279 out: 280 if (!ret && !trans) { 281 /* 282 * No transaction here means we copied the inline extent into a 283 * page of the destination inode. 284 * 285 * 1 unit to update inode item 286 */ 287 trans = btrfs_start_transaction(root, 1); 288 if (IS_ERR(trans)) { 289 ret = PTR_ERR(trans); 290 trans = NULL; 291 } 292 } 293 if (ret && trans) { 294 btrfs_abort_transaction(trans, ret); 295 btrfs_end_transaction(trans); 296 } 297 if (!ret) 298 *trans_out = trans; 299 300 return ret; 301 302 copy_to_page: 303 /* 304 * Release our path because we don't need it anymore and also because 305 * copy_inline_to_page() needs to reserve data and metadata, which may 306 * need to flush delalloc when we are low on available space and 307 * therefore cause a deadlock if writeback of an inline extent needs to 308 * write to the same leaf or an ordered extent completion needs to write 309 * to the same leaf. 310 */ 311 btrfs_release_path(path); 312 313 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 314 inline_data, size, datal, comp_type); 315 goto out; 316 } 317 318 /** 319 * btrfs_clone() - clone a range from inode file to another 320 * 321 * @src: Inode to clone from 322 * @inode: Inode to clone to 323 * @off: Offset within source to start clone from 324 * @olen: Original length, passed by user, of range to clone 325 * @olen_aligned: Block-aligned value of olen 326 * @destoff: Offset within @inode to start clone 327 * @no_time_update: Whether to update mtime/ctime on the target inode 328 */ 329 static int btrfs_clone(struct inode *src, struct inode *inode, 330 const u64 off, const u64 olen, const u64 olen_aligned, 331 const u64 destoff, int no_time_update) 332 { 333 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 334 struct btrfs_path *path = NULL; 335 struct extent_buffer *leaf; 336 struct btrfs_trans_handle *trans; 337 char *buf = NULL; 338 struct btrfs_key key; 339 u32 nritems; 340 int slot; 341 int ret; 342 const u64 len = olen_aligned; 343 u64 last_dest_end = destoff; 344 u64 prev_extent_end = off; 345 346 ret = -ENOMEM; 347 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); 348 if (!buf) 349 return ret; 350 351 path = btrfs_alloc_path(); 352 if (!path) { 353 kvfree(buf); 354 return ret; 355 } 356 357 path->reada = READA_FORWARD; 358 /* Clone data */ 359 key.objectid = btrfs_ino(BTRFS_I(src)); 360 key.type = BTRFS_EXTENT_DATA_KEY; 361 key.offset = off; 362 363 while (1) { 364 struct btrfs_file_extent_item *extent; 365 u64 extent_gen; 366 int type; 367 u32 size; 368 struct btrfs_key new_key; 369 u64 disko = 0, diskl = 0; 370 u64 datao = 0, datal = 0; 371 u8 comp; 372 u64 drop_start; 373 374 /* Note the key will change type as we walk through the tree */ 375 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path, 376 0, 0); 377 if (ret < 0) 378 goto out; 379 /* 380 * First search, if no extent item that starts at offset off was 381 * found but the previous item is an extent item, it's possible 382 * it might overlap our target range, therefore process it. 383 */ 384 if (key.offset == off && ret > 0 && path->slots[0] > 0) { 385 btrfs_item_key_to_cpu(path->nodes[0], &key, 386 path->slots[0] - 1); 387 if (key.type == BTRFS_EXTENT_DATA_KEY) 388 path->slots[0]--; 389 } 390 391 nritems = btrfs_header_nritems(path->nodes[0]); 392 process_slot: 393 if (path->slots[0] >= nritems) { 394 ret = btrfs_next_leaf(BTRFS_I(src)->root, path); 395 if (ret < 0) 396 goto out; 397 if (ret > 0) 398 break; 399 nritems = btrfs_header_nritems(path->nodes[0]); 400 } 401 leaf = path->nodes[0]; 402 slot = path->slots[0]; 403 404 btrfs_item_key_to_cpu(leaf, &key, slot); 405 if (key.type > BTRFS_EXTENT_DATA_KEY || 406 key.objectid != btrfs_ino(BTRFS_I(src))) 407 break; 408 409 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); 410 411 extent = btrfs_item_ptr(leaf, slot, 412 struct btrfs_file_extent_item); 413 extent_gen = btrfs_file_extent_generation(leaf, extent); 414 comp = btrfs_file_extent_compression(leaf, extent); 415 type = btrfs_file_extent_type(leaf, extent); 416 if (type == BTRFS_FILE_EXTENT_REG || 417 type == BTRFS_FILE_EXTENT_PREALLOC) { 418 disko = btrfs_file_extent_disk_bytenr(leaf, extent); 419 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); 420 datao = btrfs_file_extent_offset(leaf, extent); 421 datal = btrfs_file_extent_num_bytes(leaf, extent); 422 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 423 /* Take upper bound, may be compressed */ 424 datal = btrfs_file_extent_ram_bytes(leaf, extent); 425 } 426 427 /* 428 * The first search might have left us at an extent item that 429 * ends before our target range's start, can happen if we have 430 * holes and NO_HOLES feature enabled. 431 * 432 * Subsequent searches may leave us on a file range we have 433 * processed before - this happens due to a race with ordered 434 * extent completion for a file range that is outside our source 435 * range, but that range was part of a file extent item that 436 * also covered a leading part of our source range. 437 */ 438 if (key.offset + datal <= prev_extent_end) { 439 path->slots[0]++; 440 goto process_slot; 441 } else if (key.offset >= off + len) { 442 break; 443 } 444 445 prev_extent_end = key.offset + datal; 446 size = btrfs_item_size(leaf, slot); 447 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), 448 size); 449 450 btrfs_release_path(path); 451 452 memcpy(&new_key, &key, sizeof(new_key)); 453 new_key.objectid = btrfs_ino(BTRFS_I(inode)); 454 if (off <= key.offset) 455 new_key.offset = key.offset + destoff - off; 456 else 457 new_key.offset = destoff; 458 459 /* 460 * Deal with a hole that doesn't have an extent item that 461 * represents it (NO_HOLES feature enabled). 462 * This hole is either in the middle of the cloning range or at 463 * the beginning (fully overlaps it or partially overlaps it). 464 */ 465 if (new_key.offset != last_dest_end) 466 drop_start = last_dest_end; 467 else 468 drop_start = new_key.offset; 469 470 if (type == BTRFS_FILE_EXTENT_REG || 471 type == BTRFS_FILE_EXTENT_PREALLOC) { 472 struct btrfs_replace_extent_info clone_info; 473 474 /* 475 * a | --- range to clone ---| b 476 * | ------------- extent ------------- | 477 */ 478 479 /* Subtract range b */ 480 if (key.offset + datal > off + len) 481 datal = off + len - key.offset; 482 483 /* Subtract range a */ 484 if (off > key.offset) { 485 datao += off - key.offset; 486 datal -= off - key.offset; 487 } 488 489 clone_info.disk_offset = disko; 490 clone_info.disk_len = diskl; 491 clone_info.data_offset = datao; 492 clone_info.data_len = datal; 493 clone_info.file_offset = new_key.offset; 494 clone_info.extent_buf = buf; 495 clone_info.is_new_extent = false; 496 clone_info.update_times = !no_time_update; 497 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, 498 drop_start, new_key.offset + datal - 1, 499 &clone_info, &trans); 500 if (ret) 501 goto out; 502 } else { 503 ASSERT(type == BTRFS_FILE_EXTENT_INLINE); 504 /* 505 * Inline extents always have to start at file offset 0 506 * and can never be bigger then the sector size. We can 507 * never clone only parts of an inline extent, since all 508 * reflink operations must start at a sector size aligned 509 * offset, and the length must be aligned too or end at 510 * the i_size (which implies the whole inlined data). 511 */ 512 ASSERT(key.offset == 0); 513 ASSERT(datal <= fs_info->sectorsize); 514 if (WARN_ON(type != BTRFS_FILE_EXTENT_INLINE) || 515 WARN_ON(key.offset != 0) || 516 WARN_ON(datal > fs_info->sectorsize)) { 517 ret = -EUCLEAN; 518 goto out; 519 } 520 521 ret = clone_copy_inline_extent(inode, path, &new_key, 522 drop_start, datal, size, 523 comp, buf, &trans); 524 if (ret) 525 goto out; 526 } 527 528 btrfs_release_path(path); 529 530 /* 531 * Whenever we share an extent we update the last_reflink_trans 532 * of each inode to the current transaction. This is needed to 533 * make sure fsync does not log multiple checksum items with 534 * overlapping ranges (because some extent items might refer 535 * only to sections of the original extent). For the destination 536 * inode we do this regardless of the generation of the extents 537 * or even if they are inline extents or explicit holes, to make 538 * sure a full fsync does not skip them. For the source inode, 539 * we only need to update last_reflink_trans in case it's a new 540 * extent that is not a hole or an inline extent, to deal with 541 * the checksums problem on fsync. 542 */ 543 if (extent_gen == trans->transid && disko > 0) 544 BTRFS_I(src)->last_reflink_trans = trans->transid; 545 546 BTRFS_I(inode)->last_reflink_trans = trans->transid; 547 548 last_dest_end = ALIGN(new_key.offset + datal, 549 fs_info->sectorsize); 550 ret = clone_finish_inode_update(trans, inode, last_dest_end, 551 destoff, olen, no_time_update); 552 if (ret) 553 goto out; 554 if (new_key.offset + datal >= destoff + len) 555 break; 556 557 btrfs_release_path(path); 558 key.offset = prev_extent_end; 559 560 if (fatal_signal_pending(current)) { 561 ret = -EINTR; 562 goto out; 563 } 564 565 cond_resched(); 566 } 567 ret = 0; 568 569 if (last_dest_end < destoff + len) { 570 /* 571 * We have an implicit hole that fully or partially overlaps our 572 * cloning range at its end. This means that we either have the 573 * NO_HOLES feature enabled or the implicit hole happened due to 574 * mixing buffered and direct IO writes against this file. 575 */ 576 btrfs_release_path(path); 577 578 /* 579 * When using NO_HOLES and we are cloning a range that covers 580 * only a hole (no extents) into a range beyond the current 581 * i_size, punching a hole in the target range will not create 582 * an extent map defining a hole, because the range starts at or 583 * beyond current i_size. If the file previously had an i_size 584 * greater than the new i_size set by this clone operation, we 585 * need to make sure the next fsync is a full fsync, so that it 586 * detects and logs a hole covering a range from the current 587 * i_size to the new i_size. If the clone range covers extents, 588 * besides a hole, then we know the full sync flag was already 589 * set by previous calls to btrfs_replace_file_extents() that 590 * replaced file extent items. 591 */ 592 if (last_dest_end >= i_size_read(inode)) 593 btrfs_set_inode_full_sync(BTRFS_I(inode)); 594 595 ret = btrfs_replace_file_extents(BTRFS_I(inode), path, 596 last_dest_end, destoff + len - 1, NULL, &trans); 597 if (ret) 598 goto out; 599 600 ret = clone_finish_inode_update(trans, inode, destoff + len, 601 destoff, olen, no_time_update); 602 } 603 604 out: 605 btrfs_free_path(path); 606 kvfree(buf); 607 clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags); 608 609 return ret; 610 } 611 612 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, 613 struct inode *inode2, u64 loff2, u64 len) 614 { 615 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); 616 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); 617 } 618 619 static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, 620 struct inode *inode2, u64 loff2, u64 len) 621 { 622 u64 range1_end = loff1 + len - 1; 623 u64 range2_end = loff2 + len - 1; 624 625 if (inode1 < inode2) { 626 swap(inode1, inode2); 627 swap(loff1, loff2); 628 swap(range1_end, range2_end); 629 } else if (inode1 == inode2 && loff2 < loff1) { 630 swap(loff1, loff2); 631 swap(range1_end, range2_end); 632 } 633 634 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, range1_end); 635 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, range2_end); 636 637 btrfs_assert_inode_range_clean(BTRFS_I(inode1), loff1, range1_end); 638 btrfs_assert_inode_range_clean(BTRFS_I(inode2), loff2, range2_end); 639 } 640 641 static void btrfs_double_mmap_lock(struct inode *inode1, struct inode *inode2) 642 { 643 if (inode1 < inode2) 644 swap(inode1, inode2); 645 down_write(&BTRFS_I(inode1)->i_mmap_lock); 646 down_write_nested(&BTRFS_I(inode2)->i_mmap_lock, SINGLE_DEPTH_NESTING); 647 } 648 649 static void btrfs_double_mmap_unlock(struct inode *inode1, struct inode *inode2) 650 { 651 up_write(&BTRFS_I(inode1)->i_mmap_lock); 652 up_write(&BTRFS_I(inode2)->i_mmap_lock); 653 } 654 655 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, 656 struct inode *dst, u64 dst_loff) 657 { 658 const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; 659 int ret; 660 661 /* 662 * Lock destination range to serialize with concurrent readahead() and 663 * source range to serialize with relocation. 664 */ 665 btrfs_double_extent_lock(src, loff, dst, dst_loff, len); 666 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1); 667 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); 668 669 return ret; 670 } 671 672 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, 673 struct inode *dst, u64 dst_loff) 674 { 675 int ret = 0; 676 u64 i, tail_len, chunk_count; 677 struct btrfs_root *root_dst = BTRFS_I(dst)->root; 678 679 spin_lock(&root_dst->root_item_lock); 680 if (root_dst->send_in_progress) { 681 btrfs_warn_rl(root_dst->fs_info, 682 "cannot deduplicate to root %llu while send operations are using it (%d in progress)", 683 root_dst->root_key.objectid, 684 root_dst->send_in_progress); 685 spin_unlock(&root_dst->root_item_lock); 686 return -EAGAIN; 687 } 688 root_dst->dedupe_in_progress++; 689 spin_unlock(&root_dst->root_item_lock); 690 691 tail_len = olen % BTRFS_MAX_DEDUPE_LEN; 692 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN); 693 694 for (i = 0; i < chunk_count; i++) { 695 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, 696 dst, dst_loff); 697 if (ret) 698 goto out; 699 700 loff += BTRFS_MAX_DEDUPE_LEN; 701 dst_loff += BTRFS_MAX_DEDUPE_LEN; 702 } 703 704 if (tail_len > 0) 705 ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff); 706 out: 707 spin_lock(&root_dst->root_item_lock); 708 root_dst->dedupe_in_progress--; 709 spin_unlock(&root_dst->root_item_lock); 710 711 return ret; 712 } 713 714 static noinline int btrfs_clone_files(struct file *file, struct file *file_src, 715 u64 off, u64 olen, u64 destoff) 716 { 717 struct inode *inode = file_inode(file); 718 struct inode *src = file_inode(file_src); 719 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 720 int ret; 721 int wb_ret; 722 u64 len = olen; 723 u64 bs = fs_info->sb->s_blocksize; 724 725 /* 726 * VFS's generic_remap_file_range_prep() protects us from cloning the 727 * eof block into the middle of a file, which would result in corruption 728 * if the file size is not blocksize aligned. So we don't need to check 729 * for that case here. 730 */ 731 if (off + len == src->i_size) 732 len = ALIGN(src->i_size, bs) - off; 733 734 if (destoff > inode->i_size) { 735 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); 736 737 ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff); 738 if (ret) 739 return ret; 740 /* 741 * We may have truncated the last block if the inode's size is 742 * not sector size aligned, so we need to wait for writeback to 743 * complete before proceeding further, otherwise we can race 744 * with cloning and attempt to increment a reference to an 745 * extent that no longer exists (writeback completed right after 746 * we found the previous extent covering eof and before we 747 * attempted to increment its reference count). 748 */ 749 ret = btrfs_wait_ordered_range(inode, wb_start, 750 destoff - wb_start); 751 if (ret) 752 return ret; 753 } 754 755 /* 756 * Lock destination range to serialize with concurrent readahead() and 757 * source range to serialize with relocation. 758 */ 759 btrfs_double_extent_lock(src, off, inode, destoff, len); 760 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); 761 btrfs_double_extent_unlock(src, off, inode, destoff, len); 762 763 /* 764 * We may have copied an inline extent into a page of the destination 765 * range, so wait for writeback to complete before truncating pages 766 * from the page cache. This is a rare case. 767 */ 768 wb_ret = btrfs_wait_ordered_range(inode, destoff, len); 769 ret = ret ? ret : wb_ret; 770 /* 771 * Truncate page cache pages so that future reads will see the cloned 772 * data immediately and not the previous data. 773 */ 774 truncate_inode_pages_range(&inode->i_data, 775 round_down(destoff, PAGE_SIZE), 776 round_up(destoff + len, PAGE_SIZE) - 1); 777 778 return ret; 779 } 780 781 static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, 782 struct file *file_out, loff_t pos_out, 783 loff_t *len, unsigned int remap_flags) 784 { 785 struct inode *inode_in = file_inode(file_in); 786 struct inode *inode_out = file_inode(file_out); 787 u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize; 788 u64 wb_len; 789 int ret; 790 791 if (!(remap_flags & REMAP_FILE_DEDUP)) { 792 struct btrfs_root *root_out = BTRFS_I(inode_out)->root; 793 794 if (btrfs_root_readonly(root_out)) 795 return -EROFS; 796 797 ASSERT(inode_in->i_sb == inode_out->i_sb); 798 } 799 800 /* Don't make the dst file partly checksummed */ 801 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) != 802 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) { 803 return -EINVAL; 804 } 805 806 /* 807 * Now that the inodes are locked, we need to start writeback ourselves 808 * and can not rely on the writeback from the VFS's generic helper 809 * generic_remap_file_range_prep() because: 810 * 811 * 1) For compression we must call filemap_fdatawrite_range() range 812 * twice (btrfs_fdatawrite_range() does it for us), and the generic 813 * helper only calls it once; 814 * 815 * 2) filemap_fdatawrite_range(), called by the generic helper only 816 * waits for the writeback to complete, i.e. for IO to be done, and 817 * not for the ordered extents to complete. We need to wait for them 818 * to complete so that new file extent items are in the fs tree. 819 */ 820 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP)) 821 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs); 822 else 823 wb_len = ALIGN(*len, bs); 824 825 /* 826 * Workaround to make sure NOCOW buffered write reach disk as NOCOW. 827 * 828 * Btrfs' back references do not have a block level granularity, they 829 * work at the whole extent level. 830 * NOCOW buffered write without data space reserved may not be able 831 * to fall back to CoW due to lack of data space, thus could cause 832 * data loss. 833 * 834 * Here we take a shortcut by flushing the whole inode, so that all 835 * nocow write should reach disk as nocow before we increase the 836 * reference of the extent. We could do better by only flushing NOCOW 837 * data, but that needs extra accounting. 838 * 839 * Also we don't need to check ASYNC_EXTENT, as async extent will be 840 * CoWed anyway, not affecting nocow part. 841 */ 842 ret = filemap_flush(inode_in->i_mapping); 843 if (ret < 0) 844 return ret; 845 846 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), 847 wb_len); 848 if (ret < 0) 849 return ret; 850 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), 851 wb_len); 852 if (ret < 0) 853 return ret; 854 855 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, 856 len, remap_flags); 857 } 858 859 static bool file_sync_write(const struct file *file) 860 { 861 if (file->f_flags & (__O_SYNC | O_DSYNC)) 862 return true; 863 if (IS_SYNC(file_inode(file))) 864 return true; 865 866 return false; 867 } 868 869 loff_t btrfs_remap_file_range(struct file *src_file, loff_t off, 870 struct file *dst_file, loff_t destoff, loff_t len, 871 unsigned int remap_flags) 872 { 873 struct inode *src_inode = file_inode(src_file); 874 struct inode *dst_inode = file_inode(dst_file); 875 bool same_inode = dst_inode == src_inode; 876 int ret; 877 878 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) 879 return -EINVAL; 880 881 if (same_inode) { 882 btrfs_inode_lock(src_inode, BTRFS_ILOCK_MMAP); 883 } else { 884 lock_two_nondirectories(src_inode, dst_inode); 885 btrfs_double_mmap_lock(src_inode, dst_inode); 886 } 887 888 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff, 889 &len, remap_flags); 890 if (ret < 0 || len == 0) 891 goto out_unlock; 892 893 if (remap_flags & REMAP_FILE_DEDUP) 894 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff); 895 else 896 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff); 897 898 out_unlock: 899 if (same_inode) { 900 btrfs_inode_unlock(src_inode, BTRFS_ILOCK_MMAP); 901 } else { 902 btrfs_double_mmap_unlock(src_inode, dst_inode); 903 unlock_two_nondirectories(src_inode, dst_inode); 904 } 905 906 /* 907 * If either the source or the destination file was opened with O_SYNC, 908 * O_DSYNC or has the S_SYNC attribute, fsync both the destination and 909 * source files/ranges, so that after a successful return (0) followed 910 * by a power failure results in the reflinked data to be readable from 911 * both files/ranges. 912 */ 913 if (ret == 0 && len > 0 && 914 (file_sync_write(src_file) || file_sync_write(dst_file))) { 915 ret = btrfs_sync_file(src_file, off, off + len - 1, 0); 916 if (ret == 0) 917 ret = btrfs_sync_file(dst_file, destoff, 918 destoff + len - 1, 0); 919 } 920 921 return ret < 0 ? ret : len; 922 } 923