1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <linux/blkdev.h> 4 #include <linux/iversion.h> 5 #include "compression.h" 6 #include "ctree.h" 7 #include "delalloc-space.h" 8 #include "reflink.h" 9 #include "transaction.h" 10 11 #define BTRFS_MAX_DEDUPE_LEN SZ_16M 12 13 static int clone_finish_inode_update(struct btrfs_trans_handle *trans, 14 struct inode *inode, 15 u64 endoff, 16 const u64 destoff, 17 const u64 olen, 18 int no_time_update) 19 { 20 struct btrfs_root *root = BTRFS_I(inode)->root; 21 int ret; 22 23 inode_inc_iversion(inode); 24 if (!no_time_update) 25 inode->i_mtime = inode->i_ctime = current_time(inode); 26 /* 27 * We round up to the block size at eof when determining which 28 * extents to clone above, but shouldn't round up the file size. 29 */ 30 if (endoff > destoff + olen) 31 endoff = destoff + olen; 32 if (endoff > inode->i_size) { 33 i_size_write(inode, endoff); 34 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode), 0); 35 } 36 37 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 38 if (ret) { 39 btrfs_abort_transaction(trans, ret); 40 btrfs_end_transaction(trans); 41 goto out; 42 } 43 ret = btrfs_end_transaction(trans); 44 out: 45 return ret; 46 } 47 48 static int copy_inline_to_page(struct btrfs_inode *inode, 49 const u64 file_offset, 50 char *inline_data, 51 const u64 size, 52 const u64 datal, 53 const u8 comp_type) 54 { 55 const u64 block_size = btrfs_inode_sectorsize(inode); 56 const u64 range_end = file_offset + block_size - 1; 57 const size_t inline_size = size - btrfs_file_extent_calc_inline_size(0); 58 char *data_start = inline_data + btrfs_file_extent_calc_inline_size(0); 59 struct extent_changeset *data_reserved = NULL; 60 struct page *page = NULL; 61 struct address_space *mapping = inode->vfs_inode.i_mapping; 62 int ret; 63 64 ASSERT(IS_ALIGNED(file_offset, block_size)); 65 66 /* 67 * We have flushed and locked the ranges of the source and destination 68 * inodes, we also have locked the inodes, so we are safe to do a 69 * reservation here. Also we must not do the reservation while holding 70 * a transaction open, otherwise we would deadlock. 71 */ 72 ret = btrfs_delalloc_reserve_space(inode, &data_reserved, file_offset, 73 block_size); 74 if (ret) 75 goto out; 76 77 page = find_or_create_page(mapping, file_offset >> PAGE_SHIFT, 78 btrfs_alloc_write_mask(mapping)); 79 if (!page) { 80 ret = -ENOMEM; 81 goto out_unlock; 82 } 83 84 ret = set_page_extent_mapped(page); 85 if (ret < 0) 86 goto out_unlock; 87 88 clear_extent_bit(&inode->io_tree, file_offset, range_end, 89 EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 90 0, 0, NULL); 91 ret = btrfs_set_extent_delalloc(inode, file_offset, range_end, 0, NULL); 92 if (ret) 93 goto out_unlock; 94 95 /* 96 * After dirtying the page our caller will need to start a transaction, 97 * and if we are low on metadata free space, that can cause flushing of 98 * delalloc for all inodes in order to get metadata space released. 99 * However we are holding the range locked for the whole duration of 100 * the clone/dedupe operation, so we may deadlock if that happens and no 101 * other task releases enough space. So mark this inode as not being 102 * possible to flush to avoid such deadlock. We will clear that flag 103 * when we finish cloning all extents, since a transaction is started 104 * after finding each extent to clone. 105 */ 106 set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags); 107 108 if (comp_type == BTRFS_COMPRESS_NONE) { 109 memcpy_to_page(page, 0, data_start, datal); 110 flush_dcache_page(page); 111 } else { 112 ret = btrfs_decompress(comp_type, data_start, page, 0, 113 inline_size, datal); 114 if (ret) 115 goto out_unlock; 116 flush_dcache_page(page); 117 } 118 119 /* 120 * If our inline data is smaller then the block/page size, then the 121 * remaining of the block/page is equivalent to zeroes. We had something 122 * like the following done: 123 * 124 * $ xfs_io -f -c "pwrite -S 0xab 0 500" file 125 * $ sync # (or fsync) 126 * $ xfs_io -c "falloc 0 4K" file 127 * $ xfs_io -c "pwrite -S 0xcd 4K 4K" 128 * 129 * So what's in the range [500, 4095] corresponds to zeroes. 130 */ 131 if (datal < block_size) { 132 char *map; 133 134 map = kmap(page); 135 memset(map + datal, 0, block_size - datal); 136 flush_dcache_page(page); 137 kunmap(page); 138 } 139 140 SetPageUptodate(page); 141 ClearPageChecked(page); 142 set_page_dirty(page); 143 out_unlock: 144 if (page) { 145 unlock_page(page); 146 put_page(page); 147 } 148 if (ret) 149 btrfs_delalloc_release_space(inode, data_reserved, file_offset, 150 block_size, true); 151 btrfs_delalloc_release_extents(inode, block_size); 152 out: 153 extent_changeset_free(data_reserved); 154 155 return ret; 156 } 157 158 /* 159 * Deal with cloning of inline extents. We try to copy the inline extent from 160 * the source inode to destination inode when possible. When not possible we 161 * copy the inline extent's data into the respective page of the inode. 162 */ 163 static int clone_copy_inline_extent(struct inode *dst, 164 struct btrfs_path *path, 165 struct btrfs_key *new_key, 166 const u64 drop_start, 167 const u64 datal, 168 const u64 size, 169 const u8 comp_type, 170 char *inline_data, 171 struct btrfs_trans_handle **trans_out) 172 { 173 struct btrfs_fs_info *fs_info = btrfs_sb(dst->i_sb); 174 struct btrfs_root *root = BTRFS_I(dst)->root; 175 const u64 aligned_end = ALIGN(new_key->offset + datal, 176 fs_info->sectorsize); 177 struct btrfs_trans_handle *trans = NULL; 178 struct btrfs_drop_extents_args drop_args = { 0 }; 179 int ret; 180 struct btrfs_key key; 181 182 if (new_key->offset > 0) { 183 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 184 inline_data, size, datal, comp_type); 185 goto out; 186 } 187 188 key.objectid = btrfs_ino(BTRFS_I(dst)); 189 key.type = BTRFS_EXTENT_DATA_KEY; 190 key.offset = 0; 191 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 192 if (ret < 0) { 193 return ret; 194 } else if (ret > 0) { 195 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 196 ret = btrfs_next_leaf(root, path); 197 if (ret < 0) 198 return ret; 199 else if (ret > 0) 200 goto copy_inline_extent; 201 } 202 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 203 if (key.objectid == btrfs_ino(BTRFS_I(dst)) && 204 key.type == BTRFS_EXTENT_DATA_KEY) { 205 /* 206 * There's an implicit hole at file offset 0, copy the 207 * inline extent's data to the page. 208 */ 209 ASSERT(key.offset > 0); 210 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 211 inline_data, size, datal, 212 comp_type); 213 goto out; 214 } 215 } else if (i_size_read(dst) <= datal) { 216 struct btrfs_file_extent_item *ei; 217 218 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 219 struct btrfs_file_extent_item); 220 /* 221 * If it's an inline extent replace it with the source inline 222 * extent, otherwise copy the source inline extent data into 223 * the respective page at the destination inode. 224 */ 225 if (btrfs_file_extent_type(path->nodes[0], ei) == 226 BTRFS_FILE_EXTENT_INLINE) 227 goto copy_inline_extent; 228 229 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 230 inline_data, size, datal, comp_type); 231 goto out; 232 } 233 234 copy_inline_extent: 235 ret = 0; 236 /* 237 * We have no extent items, or we have an extent at offset 0 which may 238 * or may not be inlined. All these cases are dealt the same way. 239 */ 240 if (i_size_read(dst) > datal) { 241 /* 242 * At the destination offset 0 we have either a hole, a regular 243 * extent or an inline extent larger then the one we want to 244 * clone. Deal with all these cases by copying the inline extent 245 * data into the respective page at the destination inode. 246 */ 247 ret = copy_inline_to_page(BTRFS_I(dst), new_key->offset, 248 inline_data, size, datal, comp_type); 249 goto out; 250 } 251 252 btrfs_release_path(path); 253 /* 254 * If we end up here it means were copy the inline extent into a leaf 255 * of the destination inode. We know we will drop or adjust at most one 256 * extent item in the destination root. 257 * 258 * 1 unit - adjusting old extent (we may have to split it) 259 * 1 unit - add new extent 260 * 1 unit - inode update 261 */ 262 trans = btrfs_start_transaction(root, 3); 263 if (IS_ERR(trans)) { 264 ret = PTR_ERR(trans); 265 trans = NULL; 266 goto out; 267 } 268 drop_args.path = path; 269 drop_args.start = drop_start; 270 drop_args.end = aligned_end; 271 drop_args.drop_cache = true; 272 ret = btrfs_drop_extents(trans, root, BTRFS_I(dst), &drop_args); 273 if (ret) 274 goto out; 275 ret = btrfs_insert_empty_item(trans, root, path, new_key, size); 276 if (ret) 277 goto out; 278 279 write_extent_buffer(path->nodes[0], inline_data, 280 btrfs_item_ptr_offset(path->nodes[0], 281 path->slots[0]), 282 size); 283 btrfs_update_inode_bytes(BTRFS_I(dst), datal, drop_args.bytes_found); 284 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &BTRFS_I(dst)->runtime_flags); 285 ret = btrfs_inode_set_file_extent_range(BTRFS_I(dst), 0, aligned_end); 286 out: 287 if (!ret && !trans) { 288 /* 289 * No transaction here means we copied the inline extent into a 290 * page of the destination inode. 291 * 292 * 1 unit to update inode item 293 */ 294 trans = btrfs_start_transaction(root, 1); 295 if (IS_ERR(trans)) { 296 ret = PTR_ERR(trans); 297 trans = NULL; 298 } 299 } 300 if (ret && trans) { 301 btrfs_abort_transaction(trans, ret); 302 btrfs_end_transaction(trans); 303 } 304 if (!ret) 305 *trans_out = trans; 306 307 return ret; 308 } 309 310 /** 311 * btrfs_clone() - clone a range from inode file to another 312 * 313 * @src: Inode to clone from 314 * @inode: Inode to clone to 315 * @off: Offset within source to start clone from 316 * @olen: Original length, passed by user, of range to clone 317 * @olen_aligned: Block-aligned value of olen 318 * @destoff: Offset within @inode to start clone 319 * @no_time_update: Whether to update mtime/ctime on the target inode 320 */ 321 static int btrfs_clone(struct inode *src, struct inode *inode, 322 const u64 off, const u64 olen, const u64 olen_aligned, 323 const u64 destoff, int no_time_update) 324 { 325 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 326 struct btrfs_path *path = NULL; 327 struct extent_buffer *leaf; 328 struct btrfs_trans_handle *trans; 329 char *buf = NULL; 330 struct btrfs_key key; 331 u32 nritems; 332 int slot; 333 int ret; 334 const u64 len = olen_aligned; 335 u64 last_dest_end = destoff; 336 337 ret = -ENOMEM; 338 buf = kvmalloc(fs_info->nodesize, GFP_KERNEL); 339 if (!buf) 340 return ret; 341 342 path = btrfs_alloc_path(); 343 if (!path) { 344 kvfree(buf); 345 return ret; 346 } 347 348 path->reada = READA_FORWARD; 349 /* Clone data */ 350 key.objectid = btrfs_ino(BTRFS_I(src)); 351 key.type = BTRFS_EXTENT_DATA_KEY; 352 key.offset = off; 353 354 while (1) { 355 u64 next_key_min_offset = key.offset + 1; 356 struct btrfs_file_extent_item *extent; 357 u64 extent_gen; 358 int type; 359 u32 size; 360 struct btrfs_key new_key; 361 u64 disko = 0, diskl = 0; 362 u64 datao = 0, datal = 0; 363 u8 comp; 364 u64 drop_start; 365 366 /* Note the key will change type as we walk through the tree */ 367 ret = btrfs_search_slot(NULL, BTRFS_I(src)->root, &key, path, 368 0, 0); 369 if (ret < 0) 370 goto out; 371 /* 372 * First search, if no extent item that starts at offset off was 373 * found but the previous item is an extent item, it's possible 374 * it might overlap our target range, therefore process it. 375 */ 376 if (key.offset == off && ret > 0 && path->slots[0] > 0) { 377 btrfs_item_key_to_cpu(path->nodes[0], &key, 378 path->slots[0] - 1); 379 if (key.type == BTRFS_EXTENT_DATA_KEY) 380 path->slots[0]--; 381 } 382 383 nritems = btrfs_header_nritems(path->nodes[0]); 384 process_slot: 385 if (path->slots[0] >= nritems) { 386 ret = btrfs_next_leaf(BTRFS_I(src)->root, path); 387 if (ret < 0) 388 goto out; 389 if (ret > 0) 390 break; 391 nritems = btrfs_header_nritems(path->nodes[0]); 392 } 393 leaf = path->nodes[0]; 394 slot = path->slots[0]; 395 396 btrfs_item_key_to_cpu(leaf, &key, slot); 397 if (key.type > BTRFS_EXTENT_DATA_KEY || 398 key.objectid != btrfs_ino(BTRFS_I(src))) 399 break; 400 401 ASSERT(key.type == BTRFS_EXTENT_DATA_KEY); 402 403 extent = btrfs_item_ptr(leaf, slot, 404 struct btrfs_file_extent_item); 405 extent_gen = btrfs_file_extent_generation(leaf, extent); 406 comp = btrfs_file_extent_compression(leaf, extent); 407 type = btrfs_file_extent_type(leaf, extent); 408 if (type == BTRFS_FILE_EXTENT_REG || 409 type == BTRFS_FILE_EXTENT_PREALLOC) { 410 disko = btrfs_file_extent_disk_bytenr(leaf, extent); 411 diskl = btrfs_file_extent_disk_num_bytes(leaf, extent); 412 datao = btrfs_file_extent_offset(leaf, extent); 413 datal = btrfs_file_extent_num_bytes(leaf, extent); 414 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 415 /* Take upper bound, may be compressed */ 416 datal = btrfs_file_extent_ram_bytes(leaf, extent); 417 } 418 419 /* 420 * The first search might have left us at an extent item that 421 * ends before our target range's start, can happen if we have 422 * holes and NO_HOLES feature enabled. 423 */ 424 if (key.offset + datal <= off) { 425 path->slots[0]++; 426 goto process_slot; 427 } else if (key.offset >= off + len) { 428 break; 429 } 430 next_key_min_offset = key.offset + datal; 431 size = btrfs_item_size_nr(leaf, slot); 432 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf, slot), 433 size); 434 435 btrfs_release_path(path); 436 437 memcpy(&new_key, &key, sizeof(new_key)); 438 new_key.objectid = btrfs_ino(BTRFS_I(inode)); 439 if (off <= key.offset) 440 new_key.offset = key.offset + destoff - off; 441 else 442 new_key.offset = destoff; 443 444 /* 445 * Deal with a hole that doesn't have an extent item that 446 * represents it (NO_HOLES feature enabled). 447 * This hole is either in the middle of the cloning range or at 448 * the beginning (fully overlaps it or partially overlaps it). 449 */ 450 if (new_key.offset != last_dest_end) 451 drop_start = last_dest_end; 452 else 453 drop_start = new_key.offset; 454 455 if (type == BTRFS_FILE_EXTENT_REG || 456 type == BTRFS_FILE_EXTENT_PREALLOC) { 457 struct btrfs_replace_extent_info clone_info; 458 459 /* 460 * a | --- range to clone ---| b 461 * | ------------- extent ------------- | 462 */ 463 464 /* Subtract range b */ 465 if (key.offset + datal > off + len) 466 datal = off + len - key.offset; 467 468 /* Subtract range a */ 469 if (off > key.offset) { 470 datao += off - key.offset; 471 datal -= off - key.offset; 472 } 473 474 clone_info.disk_offset = disko; 475 clone_info.disk_len = diskl; 476 clone_info.data_offset = datao; 477 clone_info.data_len = datal; 478 clone_info.file_offset = new_key.offset; 479 clone_info.extent_buf = buf; 480 clone_info.is_new_extent = false; 481 ret = btrfs_replace_file_extents(inode, path, drop_start, 482 new_key.offset + datal - 1, &clone_info, 483 &trans); 484 if (ret) 485 goto out; 486 } else if (type == BTRFS_FILE_EXTENT_INLINE) { 487 /* 488 * Inline extents always have to start at file offset 0 489 * and can never be bigger then the sector size. We can 490 * never clone only parts of an inline extent, since all 491 * reflink operations must start at a sector size aligned 492 * offset, and the length must be aligned too or end at 493 * the i_size (which implies the whole inlined data). 494 */ 495 ASSERT(key.offset == 0); 496 ASSERT(datal <= fs_info->sectorsize); 497 if (key.offset != 0 || datal > fs_info->sectorsize) 498 return -EUCLEAN; 499 500 ret = clone_copy_inline_extent(inode, path, &new_key, 501 drop_start, datal, size, 502 comp, buf, &trans); 503 if (ret) 504 goto out; 505 } 506 507 btrfs_release_path(path); 508 509 /* 510 * If this is a new extent update the last_reflink_trans of both 511 * inodes. This is used by fsync to make sure it does not log 512 * multiple checksum items with overlapping ranges. For older 513 * extents we don't need to do it since inode logging skips the 514 * checksums for older extents. Also ignore holes and inline 515 * extents because they don't have checksums in the csum tree. 516 */ 517 if (extent_gen == trans->transid && disko > 0) { 518 BTRFS_I(src)->last_reflink_trans = trans->transid; 519 BTRFS_I(inode)->last_reflink_trans = trans->transid; 520 } 521 522 last_dest_end = ALIGN(new_key.offset + datal, 523 fs_info->sectorsize); 524 ret = clone_finish_inode_update(trans, inode, last_dest_end, 525 destoff, olen, no_time_update); 526 if (ret) 527 goto out; 528 if (new_key.offset + datal >= destoff + len) 529 break; 530 531 btrfs_release_path(path); 532 key.offset = next_key_min_offset; 533 534 if (fatal_signal_pending(current)) { 535 ret = -EINTR; 536 goto out; 537 } 538 539 cond_resched(); 540 } 541 ret = 0; 542 543 if (last_dest_end < destoff + len) { 544 /* 545 * We have an implicit hole that fully or partially overlaps our 546 * cloning range at its end. This means that we either have the 547 * NO_HOLES feature enabled or the implicit hole happened due to 548 * mixing buffered and direct IO writes against this file. 549 */ 550 btrfs_release_path(path); 551 552 /* 553 * When using NO_HOLES and we are cloning a range that covers 554 * only a hole (no extents) into a range beyond the current 555 * i_size, punching a hole in the target range will not create 556 * an extent map defining a hole, because the range starts at or 557 * beyond current i_size. If the file previously had an i_size 558 * greater than the new i_size set by this clone operation, we 559 * need to make sure the next fsync is a full fsync, so that it 560 * detects and logs a hole covering a range from the current 561 * i_size to the new i_size. If the clone range covers extents, 562 * besides a hole, then we know the full sync flag was already 563 * set by previous calls to btrfs_replace_file_extents() that 564 * replaced file extent items. 565 */ 566 if (last_dest_end >= i_size_read(inode)) 567 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 568 &BTRFS_I(inode)->runtime_flags); 569 570 ret = btrfs_replace_file_extents(inode, path, last_dest_end, 571 destoff + len - 1, NULL, &trans); 572 if (ret) 573 goto out; 574 575 ret = clone_finish_inode_update(trans, inode, destoff + len, 576 destoff, olen, no_time_update); 577 } 578 579 out: 580 btrfs_free_path(path); 581 kvfree(buf); 582 clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags); 583 584 return ret; 585 } 586 587 static void btrfs_double_extent_unlock(struct inode *inode1, u64 loff1, 588 struct inode *inode2, u64 loff2, u64 len) 589 { 590 unlock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); 591 unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); 592 } 593 594 static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1, 595 struct inode *inode2, u64 loff2, u64 len) 596 { 597 if (inode1 < inode2) { 598 swap(inode1, inode2); 599 swap(loff1, loff2); 600 } else if (inode1 == inode2 && loff2 < loff1) { 601 swap(loff1, loff2); 602 } 603 lock_extent(&BTRFS_I(inode1)->io_tree, loff1, loff1 + len - 1); 604 lock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1); 605 } 606 607 static int btrfs_extent_same_range(struct inode *src, u64 loff, u64 len, 608 struct inode *dst, u64 dst_loff) 609 { 610 const u64 bs = BTRFS_I(src)->root->fs_info->sb->s_blocksize; 611 int ret; 612 613 /* 614 * Lock destination range to serialize with concurrent readpages() and 615 * source range to serialize with relocation. 616 */ 617 btrfs_double_extent_lock(src, loff, dst, dst_loff, len); 618 ret = btrfs_clone(src, dst, loff, len, ALIGN(len, bs), dst_loff, 1); 619 btrfs_double_extent_unlock(src, loff, dst, dst_loff, len); 620 621 return ret; 622 } 623 624 static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen, 625 struct inode *dst, u64 dst_loff) 626 { 627 int ret; 628 u64 i, tail_len, chunk_count; 629 struct btrfs_root *root_dst = BTRFS_I(dst)->root; 630 631 spin_lock(&root_dst->root_item_lock); 632 if (root_dst->send_in_progress) { 633 btrfs_warn_rl(root_dst->fs_info, 634 "cannot deduplicate to root %llu while send operations are using it (%d in progress)", 635 root_dst->root_key.objectid, 636 root_dst->send_in_progress); 637 spin_unlock(&root_dst->root_item_lock); 638 return -EAGAIN; 639 } 640 root_dst->dedupe_in_progress++; 641 spin_unlock(&root_dst->root_item_lock); 642 643 tail_len = olen % BTRFS_MAX_DEDUPE_LEN; 644 chunk_count = div_u64(olen, BTRFS_MAX_DEDUPE_LEN); 645 646 for (i = 0; i < chunk_count; i++) { 647 ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN, 648 dst, dst_loff); 649 if (ret) 650 goto out; 651 652 loff += BTRFS_MAX_DEDUPE_LEN; 653 dst_loff += BTRFS_MAX_DEDUPE_LEN; 654 } 655 656 if (tail_len > 0) 657 ret = btrfs_extent_same_range(src, loff, tail_len, dst, dst_loff); 658 out: 659 spin_lock(&root_dst->root_item_lock); 660 root_dst->dedupe_in_progress--; 661 spin_unlock(&root_dst->root_item_lock); 662 663 return ret; 664 } 665 666 static noinline int btrfs_clone_files(struct file *file, struct file *file_src, 667 u64 off, u64 olen, u64 destoff) 668 { 669 struct inode *inode = file_inode(file); 670 struct inode *src = file_inode(file_src); 671 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); 672 int ret; 673 int wb_ret; 674 u64 len = olen; 675 u64 bs = fs_info->sb->s_blocksize; 676 677 /* 678 * VFS's generic_remap_file_range_prep() protects us from cloning the 679 * eof block into the middle of a file, which would result in corruption 680 * if the file size is not blocksize aligned. So we don't need to check 681 * for that case here. 682 */ 683 if (off + len == src->i_size) 684 len = ALIGN(src->i_size, bs) - off; 685 686 if (destoff > inode->i_size) { 687 const u64 wb_start = ALIGN_DOWN(inode->i_size, bs); 688 689 ret = btrfs_cont_expand(BTRFS_I(inode), inode->i_size, destoff); 690 if (ret) 691 return ret; 692 /* 693 * We may have truncated the last block if the inode's size is 694 * not sector size aligned, so we need to wait for writeback to 695 * complete before proceeding further, otherwise we can race 696 * with cloning and attempt to increment a reference to an 697 * extent that no longer exists (writeback completed right after 698 * we found the previous extent covering eof and before we 699 * attempted to increment its reference count). 700 */ 701 ret = btrfs_wait_ordered_range(inode, wb_start, 702 destoff - wb_start); 703 if (ret) 704 return ret; 705 } 706 707 /* 708 * Lock destination range to serialize with concurrent readpages() and 709 * source range to serialize with relocation. 710 */ 711 btrfs_double_extent_lock(src, off, inode, destoff, len); 712 ret = btrfs_clone(src, inode, off, olen, len, destoff, 0); 713 btrfs_double_extent_unlock(src, off, inode, destoff, len); 714 715 /* 716 * We may have copied an inline extent into a page of the destination 717 * range, so wait for writeback to complete before truncating pages 718 * from the page cache. This is a rare case. 719 */ 720 wb_ret = btrfs_wait_ordered_range(inode, destoff, len); 721 ret = ret ? ret : wb_ret; 722 /* 723 * Truncate page cache pages so that future reads will see the cloned 724 * data immediately and not the previous data. 725 */ 726 truncate_inode_pages_range(&inode->i_data, 727 round_down(destoff, PAGE_SIZE), 728 round_up(destoff + len, PAGE_SIZE) - 1); 729 730 return ret; 731 } 732 733 static int btrfs_remap_file_range_prep(struct file *file_in, loff_t pos_in, 734 struct file *file_out, loff_t pos_out, 735 loff_t *len, unsigned int remap_flags) 736 { 737 struct inode *inode_in = file_inode(file_in); 738 struct inode *inode_out = file_inode(file_out); 739 u64 bs = BTRFS_I(inode_out)->root->fs_info->sb->s_blocksize; 740 bool same_inode = inode_out == inode_in; 741 u64 wb_len; 742 int ret; 743 744 if (!(remap_flags & REMAP_FILE_DEDUP)) { 745 struct btrfs_root *root_out = BTRFS_I(inode_out)->root; 746 747 if (btrfs_root_readonly(root_out)) 748 return -EROFS; 749 750 if (file_in->f_path.mnt != file_out->f_path.mnt || 751 inode_in->i_sb != inode_out->i_sb) 752 return -EXDEV; 753 } 754 755 /* Don't make the dst file partly checksummed */ 756 if ((BTRFS_I(inode_in)->flags & BTRFS_INODE_NODATASUM) != 757 (BTRFS_I(inode_out)->flags & BTRFS_INODE_NODATASUM)) { 758 return -EINVAL; 759 } 760 761 /* 762 * Now that the inodes are locked, we need to start writeback ourselves 763 * and can not rely on the writeback from the VFS's generic helper 764 * generic_remap_file_range_prep() because: 765 * 766 * 1) For compression we must call filemap_fdatawrite_range() range 767 * twice (btrfs_fdatawrite_range() does it for us), and the generic 768 * helper only calls it once; 769 * 770 * 2) filemap_fdatawrite_range(), called by the generic helper only 771 * waits for the writeback to complete, i.e. for IO to be done, and 772 * not for the ordered extents to complete. We need to wait for them 773 * to complete so that new file extent items are in the fs tree. 774 */ 775 if (*len == 0 && !(remap_flags & REMAP_FILE_DEDUP)) 776 wb_len = ALIGN(inode_in->i_size, bs) - ALIGN_DOWN(pos_in, bs); 777 else 778 wb_len = ALIGN(*len, bs); 779 780 /* 781 * Since we don't lock ranges, wait for ongoing lockless dio writes (as 782 * any in progress could create its ordered extents after we wait for 783 * existing ordered extents below). 784 */ 785 inode_dio_wait(inode_in); 786 if (!same_inode) 787 inode_dio_wait(inode_out); 788 789 /* 790 * Workaround to make sure NOCOW buffered write reach disk as NOCOW. 791 * 792 * Btrfs' back references do not have a block level granularity, they 793 * work at the whole extent level. 794 * NOCOW buffered write without data space reserved may not be able 795 * to fall back to CoW due to lack of data space, thus could cause 796 * data loss. 797 * 798 * Here we take a shortcut by flushing the whole inode, so that all 799 * nocow write should reach disk as nocow before we increase the 800 * reference of the extent. We could do better by only flushing NOCOW 801 * data, but that needs extra accounting. 802 * 803 * Also we don't need to check ASYNC_EXTENT, as async extent will be 804 * CoWed anyway, not affecting nocow part. 805 */ 806 ret = filemap_flush(inode_in->i_mapping); 807 if (ret < 0) 808 return ret; 809 810 ret = btrfs_wait_ordered_range(inode_in, ALIGN_DOWN(pos_in, bs), 811 wb_len); 812 if (ret < 0) 813 return ret; 814 ret = btrfs_wait_ordered_range(inode_out, ALIGN_DOWN(pos_out, bs), 815 wb_len); 816 if (ret < 0) 817 return ret; 818 819 return generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out, 820 len, remap_flags); 821 } 822 823 loff_t btrfs_remap_file_range(struct file *src_file, loff_t off, 824 struct file *dst_file, loff_t destoff, loff_t len, 825 unsigned int remap_flags) 826 { 827 struct inode *src_inode = file_inode(src_file); 828 struct inode *dst_inode = file_inode(dst_file); 829 bool same_inode = dst_inode == src_inode; 830 int ret; 831 832 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY)) 833 return -EINVAL; 834 835 if (same_inode) 836 inode_lock(src_inode); 837 else 838 lock_two_nondirectories(src_inode, dst_inode); 839 840 ret = btrfs_remap_file_range_prep(src_file, off, dst_file, destoff, 841 &len, remap_flags); 842 if (ret < 0 || len == 0) 843 goto out_unlock; 844 845 if (remap_flags & REMAP_FILE_DEDUP) 846 ret = btrfs_extent_same(src_inode, off, len, dst_inode, destoff); 847 else 848 ret = btrfs_clone_files(dst_file, src_file, off, len, destoff); 849 850 out_unlock: 851 if (same_inode) 852 inode_unlock(src_inode); 853 else 854 unlock_two_nondirectories(src_inode, dst_inode); 855 856 return ret < 0 ? ret : len; 857 } 858