1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2012 Alexander Block. All rights reserved. 4 */ 5 6 #include <linux/bsearch.h> 7 #include <linux/fs.h> 8 #include <linux/file.h> 9 #include <linux/sort.h> 10 #include <linux/mount.h> 11 #include <linux/xattr.h> 12 #include <linux/posix_acl_xattr.h> 13 #include <linux/radix-tree.h> 14 #include <linux/vmalloc.h> 15 #include <linux/string.h> 16 #include <linux/compat.h> 17 #include <linux/crc32c.h> 18 19 #include "send.h" 20 #include "backref.h" 21 #include "locking.h" 22 #include "disk-io.h" 23 #include "btrfs_inode.h" 24 #include "transaction.h" 25 #include "compression.h" 26 27 /* 28 * A fs_path is a helper to dynamically build path names with unknown size. 29 * It reallocates the internal buffer on demand. 30 * It allows fast adding of path elements on the right side (normal path) and 31 * fast adding to the left side (reversed path). A reversed path can also be 32 * unreversed if needed. 33 */ 34 struct fs_path { 35 union { 36 struct { 37 char *start; 38 char *end; 39 40 char *buf; 41 unsigned short buf_len:15; 42 unsigned short reversed:1; 43 char inline_buf[]; 44 }; 45 /* 46 * Average path length does not exceed 200 bytes, we'll have 47 * better packing in the slab and higher chance to satisfy 48 * a allocation later during send. 49 */ 50 char pad[256]; 51 }; 52 }; 53 #define FS_PATH_INLINE_SIZE \ 54 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf)) 55 56 57 /* reused for each extent */ 58 struct clone_root { 59 struct btrfs_root *root; 60 u64 ino; 61 u64 offset; 62 63 u64 found_refs; 64 }; 65 66 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128 67 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2) 68 69 struct send_ctx { 70 struct file *send_filp; 71 loff_t send_off; 72 char *send_buf; 73 u32 send_size; 74 u32 send_max_size; 75 u64 total_send_size; 76 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1]; 77 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */ 78 79 struct btrfs_root *send_root; 80 struct btrfs_root *parent_root; 81 struct clone_root *clone_roots; 82 int clone_roots_cnt; 83 84 /* current state of the compare_tree call */ 85 struct btrfs_path *left_path; 86 struct btrfs_path *right_path; 87 struct btrfs_key *cmp_key; 88 89 /* 90 * infos of the currently processed inode. In case of deleted inodes, 91 * these are the values from the deleted inode. 92 */ 93 u64 cur_ino; 94 u64 cur_inode_gen; 95 int cur_inode_new; 96 int cur_inode_new_gen; 97 int cur_inode_deleted; 98 u64 cur_inode_size; 99 u64 cur_inode_mode; 100 u64 cur_inode_rdev; 101 u64 cur_inode_last_extent; 102 u64 cur_inode_next_write_offset; 103 bool ignore_cur_inode; 104 105 u64 send_progress; 106 107 struct list_head new_refs; 108 struct list_head deleted_refs; 109 110 struct radix_tree_root name_cache; 111 struct list_head name_cache_list; 112 int name_cache_size; 113 114 struct file_ra_state ra; 115 116 char *read_buf; 117 118 /* 119 * We process inodes by their increasing order, so if before an 120 * incremental send we reverse the parent/child relationship of 121 * directories such that a directory with a lower inode number was 122 * the parent of a directory with a higher inode number, and the one 123 * becoming the new parent got renamed too, we can't rename/move the 124 * directory with lower inode number when we finish processing it - we 125 * must process the directory with higher inode number first, then 126 * rename/move it and then rename/move the directory with lower inode 127 * number. Example follows. 128 * 129 * Tree state when the first send was performed: 130 * 131 * . 132 * |-- a (ino 257) 133 * |-- b (ino 258) 134 * | 135 * | 136 * |-- c (ino 259) 137 * | |-- d (ino 260) 138 * | 139 * |-- c2 (ino 261) 140 * 141 * Tree state when the second (incremental) send is performed: 142 * 143 * . 144 * |-- a (ino 257) 145 * |-- b (ino 258) 146 * |-- c2 (ino 261) 147 * |-- d2 (ino 260) 148 * |-- cc (ino 259) 149 * 150 * The sequence of steps that lead to the second state was: 151 * 152 * mv /a/b/c/d /a/b/c2/d2 153 * mv /a/b/c /a/b/c2/d2/cc 154 * 155 * "c" has lower inode number, but we can't move it (2nd mv operation) 156 * before we move "d", which has higher inode number. 157 * 158 * So we just memorize which move/rename operations must be performed 159 * later when their respective parent is processed and moved/renamed. 160 */ 161 162 /* Indexed by parent directory inode number. */ 163 struct rb_root pending_dir_moves; 164 165 /* 166 * Reverse index, indexed by the inode number of a directory that 167 * is waiting for the move/rename of its immediate parent before its 168 * own move/rename can be performed. 169 */ 170 struct rb_root waiting_dir_moves; 171 172 /* 173 * A directory that is going to be rm'ed might have a child directory 174 * which is in the pending directory moves index above. In this case, 175 * the directory can only be removed after the move/rename of its child 176 * is performed. Example: 177 * 178 * Parent snapshot: 179 * 180 * . (ino 256) 181 * |-- a/ (ino 257) 182 * |-- b/ (ino 258) 183 * |-- c/ (ino 259) 184 * | |-- x/ (ino 260) 185 * | 186 * |-- y/ (ino 261) 187 * 188 * Send snapshot: 189 * 190 * . (ino 256) 191 * |-- a/ (ino 257) 192 * |-- b/ (ino 258) 193 * |-- YY/ (ino 261) 194 * |-- x/ (ino 260) 195 * 196 * Sequence of steps that lead to the send snapshot: 197 * rm -f /a/b/c/foo.txt 198 * mv /a/b/y /a/b/YY 199 * mv /a/b/c/x /a/b/YY 200 * rmdir /a/b/c 201 * 202 * When the child is processed, its move/rename is delayed until its 203 * parent is processed (as explained above), but all other operations 204 * like update utimes, chown, chgrp, etc, are performed and the paths 205 * that it uses for those operations must use the orphanized name of 206 * its parent (the directory we're going to rm later), so we need to 207 * memorize that name. 208 * 209 * Indexed by the inode number of the directory to be deleted. 210 */ 211 struct rb_root orphan_dirs; 212 }; 213 214 struct pending_dir_move { 215 struct rb_node node; 216 struct list_head list; 217 u64 parent_ino; 218 u64 ino; 219 u64 gen; 220 struct list_head update_refs; 221 }; 222 223 struct waiting_dir_move { 224 struct rb_node node; 225 u64 ino; 226 /* 227 * There might be some directory that could not be removed because it 228 * was waiting for this directory inode to be moved first. Therefore 229 * after this directory is moved, we can try to rmdir the ino rmdir_ino. 230 */ 231 u64 rmdir_ino; 232 bool orphanized; 233 }; 234 235 struct orphan_dir_info { 236 struct rb_node node; 237 u64 ino; 238 u64 gen; 239 u64 last_dir_index_offset; 240 }; 241 242 struct name_cache_entry { 243 struct list_head list; 244 /* 245 * radix_tree has only 32bit entries but we need to handle 64bit inums. 246 * We use the lower 32bit of the 64bit inum to store it in the tree. If 247 * more then one inum would fall into the same entry, we use radix_list 248 * to store the additional entries. radix_list is also used to store 249 * entries where two entries have the same inum but different 250 * generations. 251 */ 252 struct list_head radix_list; 253 u64 ino; 254 u64 gen; 255 u64 parent_ino; 256 u64 parent_gen; 257 int ret; 258 int need_later_update; 259 int name_len; 260 char name[]; 261 }; 262 263 __cold 264 static void inconsistent_snapshot_error(struct send_ctx *sctx, 265 enum btrfs_compare_tree_result result, 266 const char *what) 267 { 268 const char *result_string; 269 270 switch (result) { 271 case BTRFS_COMPARE_TREE_NEW: 272 result_string = "new"; 273 break; 274 case BTRFS_COMPARE_TREE_DELETED: 275 result_string = "deleted"; 276 break; 277 case BTRFS_COMPARE_TREE_CHANGED: 278 result_string = "updated"; 279 break; 280 case BTRFS_COMPARE_TREE_SAME: 281 ASSERT(0); 282 result_string = "unchanged"; 283 break; 284 default: 285 ASSERT(0); 286 result_string = "unexpected"; 287 } 288 289 btrfs_err(sctx->send_root->fs_info, 290 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu", 291 result_string, what, sctx->cmp_key->objectid, 292 sctx->send_root->root_key.objectid, 293 (sctx->parent_root ? 294 sctx->parent_root->root_key.objectid : 0)); 295 } 296 297 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); 298 299 static struct waiting_dir_move * 300 get_waiting_dir_move(struct send_ctx *sctx, u64 ino); 301 302 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino); 303 304 static int need_send_hole(struct send_ctx *sctx) 305 { 306 return (sctx->parent_root && !sctx->cur_inode_new && 307 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted && 308 S_ISREG(sctx->cur_inode_mode)); 309 } 310 311 static void fs_path_reset(struct fs_path *p) 312 { 313 if (p->reversed) { 314 p->start = p->buf + p->buf_len - 1; 315 p->end = p->start; 316 *p->start = 0; 317 } else { 318 p->start = p->buf; 319 p->end = p->start; 320 *p->start = 0; 321 } 322 } 323 324 static struct fs_path *fs_path_alloc(void) 325 { 326 struct fs_path *p; 327 328 p = kmalloc(sizeof(*p), GFP_KERNEL); 329 if (!p) 330 return NULL; 331 p->reversed = 0; 332 p->buf = p->inline_buf; 333 p->buf_len = FS_PATH_INLINE_SIZE; 334 fs_path_reset(p); 335 return p; 336 } 337 338 static struct fs_path *fs_path_alloc_reversed(void) 339 { 340 struct fs_path *p; 341 342 p = fs_path_alloc(); 343 if (!p) 344 return NULL; 345 p->reversed = 1; 346 fs_path_reset(p); 347 return p; 348 } 349 350 static void fs_path_free(struct fs_path *p) 351 { 352 if (!p) 353 return; 354 if (p->buf != p->inline_buf) 355 kfree(p->buf); 356 kfree(p); 357 } 358 359 static int fs_path_len(struct fs_path *p) 360 { 361 return p->end - p->start; 362 } 363 364 static int fs_path_ensure_buf(struct fs_path *p, int len) 365 { 366 char *tmp_buf; 367 int path_len; 368 int old_buf_len; 369 370 len++; 371 372 if (p->buf_len >= len) 373 return 0; 374 375 if (len > PATH_MAX) { 376 WARN_ON(1); 377 return -ENOMEM; 378 } 379 380 path_len = p->end - p->start; 381 old_buf_len = p->buf_len; 382 383 /* 384 * First time the inline_buf does not suffice 385 */ 386 if (p->buf == p->inline_buf) { 387 tmp_buf = kmalloc(len, GFP_KERNEL); 388 if (tmp_buf) 389 memcpy(tmp_buf, p->buf, old_buf_len); 390 } else { 391 tmp_buf = krealloc(p->buf, len, GFP_KERNEL); 392 } 393 if (!tmp_buf) 394 return -ENOMEM; 395 p->buf = tmp_buf; 396 /* 397 * The real size of the buffer is bigger, this will let the fast path 398 * happen most of the time 399 */ 400 p->buf_len = ksize(p->buf); 401 402 if (p->reversed) { 403 tmp_buf = p->buf + old_buf_len - path_len - 1; 404 p->end = p->buf + p->buf_len - 1; 405 p->start = p->end - path_len; 406 memmove(p->start, tmp_buf, path_len + 1); 407 } else { 408 p->start = p->buf; 409 p->end = p->start + path_len; 410 } 411 return 0; 412 } 413 414 static int fs_path_prepare_for_add(struct fs_path *p, int name_len, 415 char **prepared) 416 { 417 int ret; 418 int new_len; 419 420 new_len = p->end - p->start + name_len; 421 if (p->start != p->end) 422 new_len++; 423 ret = fs_path_ensure_buf(p, new_len); 424 if (ret < 0) 425 goto out; 426 427 if (p->reversed) { 428 if (p->start != p->end) 429 *--p->start = '/'; 430 p->start -= name_len; 431 *prepared = p->start; 432 } else { 433 if (p->start != p->end) 434 *p->end++ = '/'; 435 *prepared = p->end; 436 p->end += name_len; 437 *p->end = 0; 438 } 439 440 out: 441 return ret; 442 } 443 444 static int fs_path_add(struct fs_path *p, const char *name, int name_len) 445 { 446 int ret; 447 char *prepared; 448 449 ret = fs_path_prepare_for_add(p, name_len, &prepared); 450 if (ret < 0) 451 goto out; 452 memcpy(prepared, name, name_len); 453 454 out: 455 return ret; 456 } 457 458 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) 459 { 460 int ret; 461 char *prepared; 462 463 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared); 464 if (ret < 0) 465 goto out; 466 memcpy(prepared, p2->start, p2->end - p2->start); 467 468 out: 469 return ret; 470 } 471 472 static int fs_path_add_from_extent_buffer(struct fs_path *p, 473 struct extent_buffer *eb, 474 unsigned long off, int len) 475 { 476 int ret; 477 char *prepared; 478 479 ret = fs_path_prepare_for_add(p, len, &prepared); 480 if (ret < 0) 481 goto out; 482 483 read_extent_buffer(eb, prepared, off, len); 484 485 out: 486 return ret; 487 } 488 489 static int fs_path_copy(struct fs_path *p, struct fs_path *from) 490 { 491 int ret; 492 493 p->reversed = from->reversed; 494 fs_path_reset(p); 495 496 ret = fs_path_add_path(p, from); 497 498 return ret; 499 } 500 501 502 static void fs_path_unreverse(struct fs_path *p) 503 { 504 char *tmp; 505 int len; 506 507 if (!p->reversed) 508 return; 509 510 tmp = p->start; 511 len = p->end - p->start; 512 p->start = p->buf; 513 p->end = p->start + len; 514 memmove(p->start, tmp, len + 1); 515 p->reversed = 0; 516 } 517 518 static struct btrfs_path *alloc_path_for_send(void) 519 { 520 struct btrfs_path *path; 521 522 path = btrfs_alloc_path(); 523 if (!path) 524 return NULL; 525 path->search_commit_root = 1; 526 path->skip_locking = 1; 527 path->need_commit_sem = 1; 528 return path; 529 } 530 531 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off) 532 { 533 int ret; 534 u32 pos = 0; 535 536 while (pos < len) { 537 ret = kernel_write(filp, buf + pos, len - pos, off); 538 /* TODO handle that correctly */ 539 /*if (ret == -ERESTARTSYS) { 540 continue; 541 }*/ 542 if (ret < 0) 543 return ret; 544 if (ret == 0) { 545 return -EIO; 546 } 547 pos += ret; 548 } 549 550 return 0; 551 } 552 553 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len) 554 { 555 struct btrfs_tlv_header *hdr; 556 int total_len = sizeof(*hdr) + len; 557 int left = sctx->send_max_size - sctx->send_size; 558 559 if (unlikely(left < total_len)) 560 return -EOVERFLOW; 561 562 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size); 563 hdr->tlv_type = cpu_to_le16(attr); 564 hdr->tlv_len = cpu_to_le16(len); 565 memcpy(hdr + 1, data, len); 566 sctx->send_size += total_len; 567 568 return 0; 569 } 570 571 #define TLV_PUT_DEFINE_INT(bits) \ 572 static int tlv_put_u##bits(struct send_ctx *sctx, \ 573 u##bits attr, u##bits value) \ 574 { \ 575 __le##bits __tmp = cpu_to_le##bits(value); \ 576 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \ 577 } 578 579 TLV_PUT_DEFINE_INT(64) 580 581 static int tlv_put_string(struct send_ctx *sctx, u16 attr, 582 const char *str, int len) 583 { 584 if (len == -1) 585 len = strlen(str); 586 return tlv_put(sctx, attr, str, len); 587 } 588 589 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr, 590 const u8 *uuid) 591 { 592 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE); 593 } 594 595 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, 596 struct extent_buffer *eb, 597 struct btrfs_timespec *ts) 598 { 599 struct btrfs_timespec bts; 600 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); 601 return tlv_put(sctx, attr, &bts, sizeof(bts)); 602 } 603 604 605 #define TLV_PUT(sctx, attrtype, data, attrlen) \ 606 do { \ 607 ret = tlv_put(sctx, attrtype, data, attrlen); \ 608 if (ret < 0) \ 609 goto tlv_put_failure; \ 610 } while (0) 611 612 #define TLV_PUT_INT(sctx, attrtype, bits, value) \ 613 do { \ 614 ret = tlv_put_u##bits(sctx, attrtype, value); \ 615 if (ret < 0) \ 616 goto tlv_put_failure; \ 617 } while (0) 618 619 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data) 620 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data) 621 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data) 622 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data) 623 #define TLV_PUT_STRING(sctx, attrtype, str, len) \ 624 do { \ 625 ret = tlv_put_string(sctx, attrtype, str, len); \ 626 if (ret < 0) \ 627 goto tlv_put_failure; \ 628 } while (0) 629 #define TLV_PUT_PATH(sctx, attrtype, p) \ 630 do { \ 631 ret = tlv_put_string(sctx, attrtype, p->start, \ 632 p->end - p->start); \ 633 if (ret < 0) \ 634 goto tlv_put_failure; \ 635 } while(0) 636 #define TLV_PUT_UUID(sctx, attrtype, uuid) \ 637 do { \ 638 ret = tlv_put_uuid(sctx, attrtype, uuid); \ 639 if (ret < 0) \ 640 goto tlv_put_failure; \ 641 } while (0) 642 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \ 643 do { \ 644 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \ 645 if (ret < 0) \ 646 goto tlv_put_failure; \ 647 } while (0) 648 649 static int send_header(struct send_ctx *sctx) 650 { 651 struct btrfs_stream_header hdr; 652 653 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); 654 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION); 655 656 return write_buf(sctx->send_filp, &hdr, sizeof(hdr), 657 &sctx->send_off); 658 } 659 660 /* 661 * For each command/item we want to send to userspace, we call this function. 662 */ 663 static int begin_cmd(struct send_ctx *sctx, int cmd) 664 { 665 struct btrfs_cmd_header *hdr; 666 667 if (WARN_ON(!sctx->send_buf)) 668 return -EINVAL; 669 670 BUG_ON(sctx->send_size); 671 672 sctx->send_size += sizeof(*hdr); 673 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 674 hdr->cmd = cpu_to_le16(cmd); 675 676 return 0; 677 } 678 679 static int send_cmd(struct send_ctx *sctx) 680 { 681 int ret; 682 struct btrfs_cmd_header *hdr; 683 u32 crc; 684 685 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 686 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr)); 687 hdr->crc = 0; 688 689 crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); 690 hdr->crc = cpu_to_le32(crc); 691 692 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, 693 &sctx->send_off); 694 695 sctx->total_send_size += sctx->send_size; 696 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size; 697 sctx->send_size = 0; 698 699 return ret; 700 } 701 702 /* 703 * Sends a move instruction to user space 704 */ 705 static int send_rename(struct send_ctx *sctx, 706 struct fs_path *from, struct fs_path *to) 707 { 708 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 709 int ret; 710 711 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start); 712 713 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); 714 if (ret < 0) 715 goto out; 716 717 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from); 718 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to); 719 720 ret = send_cmd(sctx); 721 722 tlv_put_failure: 723 out: 724 return ret; 725 } 726 727 /* 728 * Sends a link instruction to user space 729 */ 730 static int send_link(struct send_ctx *sctx, 731 struct fs_path *path, struct fs_path *lnk) 732 { 733 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 734 int ret; 735 736 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start); 737 738 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); 739 if (ret < 0) 740 goto out; 741 742 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 743 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk); 744 745 ret = send_cmd(sctx); 746 747 tlv_put_failure: 748 out: 749 return ret; 750 } 751 752 /* 753 * Sends an unlink instruction to user space 754 */ 755 static int send_unlink(struct send_ctx *sctx, struct fs_path *path) 756 { 757 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 758 int ret; 759 760 btrfs_debug(fs_info, "send_unlink %s", path->start); 761 762 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); 763 if (ret < 0) 764 goto out; 765 766 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 767 768 ret = send_cmd(sctx); 769 770 tlv_put_failure: 771 out: 772 return ret; 773 } 774 775 /* 776 * Sends a rmdir instruction to user space 777 */ 778 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) 779 { 780 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 781 int ret; 782 783 btrfs_debug(fs_info, "send_rmdir %s", path->start); 784 785 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); 786 if (ret < 0) 787 goto out; 788 789 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 790 791 ret = send_cmd(sctx); 792 793 tlv_put_failure: 794 out: 795 return ret; 796 } 797 798 /* 799 * Helper function to retrieve some fields from an inode item. 800 */ 801 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path, 802 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid, 803 u64 *gid, u64 *rdev) 804 { 805 int ret; 806 struct btrfs_inode_item *ii; 807 struct btrfs_key key; 808 809 key.objectid = ino; 810 key.type = BTRFS_INODE_ITEM_KEY; 811 key.offset = 0; 812 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 813 if (ret) { 814 if (ret > 0) 815 ret = -ENOENT; 816 return ret; 817 } 818 819 ii = btrfs_item_ptr(path->nodes[0], path->slots[0], 820 struct btrfs_inode_item); 821 if (size) 822 *size = btrfs_inode_size(path->nodes[0], ii); 823 if (gen) 824 *gen = btrfs_inode_generation(path->nodes[0], ii); 825 if (mode) 826 *mode = btrfs_inode_mode(path->nodes[0], ii); 827 if (uid) 828 *uid = btrfs_inode_uid(path->nodes[0], ii); 829 if (gid) 830 *gid = btrfs_inode_gid(path->nodes[0], ii); 831 if (rdev) 832 *rdev = btrfs_inode_rdev(path->nodes[0], ii); 833 834 return ret; 835 } 836 837 static int get_inode_info(struct btrfs_root *root, 838 u64 ino, u64 *size, u64 *gen, 839 u64 *mode, u64 *uid, u64 *gid, 840 u64 *rdev) 841 { 842 struct btrfs_path *path; 843 int ret; 844 845 path = alloc_path_for_send(); 846 if (!path) 847 return -ENOMEM; 848 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid, 849 rdev); 850 btrfs_free_path(path); 851 return ret; 852 } 853 854 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index, 855 struct fs_path *p, 856 void *ctx); 857 858 /* 859 * Helper function to iterate the entries in ONE btrfs_inode_ref or 860 * btrfs_inode_extref. 861 * The iterate callback may return a non zero value to stop iteration. This can 862 * be a negative value for error codes or 1 to simply stop it. 863 * 864 * path must point to the INODE_REF or INODE_EXTREF when called. 865 */ 866 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path, 867 struct btrfs_key *found_key, int resolve, 868 iterate_inode_ref_t iterate, void *ctx) 869 { 870 struct extent_buffer *eb = path->nodes[0]; 871 struct btrfs_item *item; 872 struct btrfs_inode_ref *iref; 873 struct btrfs_inode_extref *extref; 874 struct btrfs_path *tmp_path; 875 struct fs_path *p; 876 u32 cur = 0; 877 u32 total; 878 int slot = path->slots[0]; 879 u32 name_len; 880 char *start; 881 int ret = 0; 882 int num = 0; 883 int index; 884 u64 dir; 885 unsigned long name_off; 886 unsigned long elem_size; 887 unsigned long ptr; 888 889 p = fs_path_alloc_reversed(); 890 if (!p) 891 return -ENOMEM; 892 893 tmp_path = alloc_path_for_send(); 894 if (!tmp_path) { 895 fs_path_free(p); 896 return -ENOMEM; 897 } 898 899 900 if (found_key->type == BTRFS_INODE_REF_KEY) { 901 ptr = (unsigned long)btrfs_item_ptr(eb, slot, 902 struct btrfs_inode_ref); 903 item = btrfs_item_nr(slot); 904 total = btrfs_item_size(eb, item); 905 elem_size = sizeof(*iref); 906 } else { 907 ptr = btrfs_item_ptr_offset(eb, slot); 908 total = btrfs_item_size_nr(eb, slot); 909 elem_size = sizeof(*extref); 910 } 911 912 while (cur < total) { 913 fs_path_reset(p); 914 915 if (found_key->type == BTRFS_INODE_REF_KEY) { 916 iref = (struct btrfs_inode_ref *)(ptr + cur); 917 name_len = btrfs_inode_ref_name_len(eb, iref); 918 name_off = (unsigned long)(iref + 1); 919 index = btrfs_inode_ref_index(eb, iref); 920 dir = found_key->offset; 921 } else { 922 extref = (struct btrfs_inode_extref *)(ptr + cur); 923 name_len = btrfs_inode_extref_name_len(eb, extref); 924 name_off = (unsigned long)&extref->name; 925 index = btrfs_inode_extref_index(eb, extref); 926 dir = btrfs_inode_extref_parent(eb, extref); 927 } 928 929 if (resolve) { 930 start = btrfs_ref_to_path(root, tmp_path, name_len, 931 name_off, eb, dir, 932 p->buf, p->buf_len); 933 if (IS_ERR(start)) { 934 ret = PTR_ERR(start); 935 goto out; 936 } 937 if (start < p->buf) { 938 /* overflow , try again with larger buffer */ 939 ret = fs_path_ensure_buf(p, 940 p->buf_len + p->buf - start); 941 if (ret < 0) 942 goto out; 943 start = btrfs_ref_to_path(root, tmp_path, 944 name_len, name_off, 945 eb, dir, 946 p->buf, p->buf_len); 947 if (IS_ERR(start)) { 948 ret = PTR_ERR(start); 949 goto out; 950 } 951 BUG_ON(start < p->buf); 952 } 953 p->start = start; 954 } else { 955 ret = fs_path_add_from_extent_buffer(p, eb, name_off, 956 name_len); 957 if (ret < 0) 958 goto out; 959 } 960 961 cur += elem_size + name_len; 962 ret = iterate(num, dir, index, p, ctx); 963 if (ret) 964 goto out; 965 num++; 966 } 967 968 out: 969 btrfs_free_path(tmp_path); 970 fs_path_free(p); 971 return ret; 972 } 973 974 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key, 975 const char *name, int name_len, 976 const char *data, int data_len, 977 u8 type, void *ctx); 978 979 /* 980 * Helper function to iterate the entries in ONE btrfs_dir_item. 981 * The iterate callback may return a non zero value to stop iteration. This can 982 * be a negative value for error codes or 1 to simply stop it. 983 * 984 * path must point to the dir item when called. 985 */ 986 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, 987 iterate_dir_item_t iterate, void *ctx) 988 { 989 int ret = 0; 990 struct extent_buffer *eb; 991 struct btrfs_item *item; 992 struct btrfs_dir_item *di; 993 struct btrfs_key di_key; 994 char *buf = NULL; 995 int buf_len; 996 u32 name_len; 997 u32 data_len; 998 u32 cur; 999 u32 len; 1000 u32 total; 1001 int slot; 1002 int num; 1003 u8 type; 1004 1005 /* 1006 * Start with a small buffer (1 page). If later we end up needing more 1007 * space, which can happen for xattrs on a fs with a leaf size greater 1008 * then the page size, attempt to increase the buffer. Typically xattr 1009 * values are small. 1010 */ 1011 buf_len = PATH_MAX; 1012 buf = kmalloc(buf_len, GFP_KERNEL); 1013 if (!buf) { 1014 ret = -ENOMEM; 1015 goto out; 1016 } 1017 1018 eb = path->nodes[0]; 1019 slot = path->slots[0]; 1020 item = btrfs_item_nr(slot); 1021 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 1022 cur = 0; 1023 len = 0; 1024 total = btrfs_item_size(eb, item); 1025 1026 num = 0; 1027 while (cur < total) { 1028 name_len = btrfs_dir_name_len(eb, di); 1029 data_len = btrfs_dir_data_len(eb, di); 1030 type = btrfs_dir_type(eb, di); 1031 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 1032 1033 if (type == BTRFS_FT_XATTR) { 1034 if (name_len > XATTR_NAME_MAX) { 1035 ret = -ENAMETOOLONG; 1036 goto out; 1037 } 1038 if (name_len + data_len > 1039 BTRFS_MAX_XATTR_SIZE(root->fs_info)) { 1040 ret = -E2BIG; 1041 goto out; 1042 } 1043 } else { 1044 /* 1045 * Path too long 1046 */ 1047 if (name_len + data_len > PATH_MAX) { 1048 ret = -ENAMETOOLONG; 1049 goto out; 1050 } 1051 } 1052 1053 if (name_len + data_len > buf_len) { 1054 buf_len = name_len + data_len; 1055 if (is_vmalloc_addr(buf)) { 1056 vfree(buf); 1057 buf = NULL; 1058 } else { 1059 char *tmp = krealloc(buf, buf_len, 1060 GFP_KERNEL | __GFP_NOWARN); 1061 1062 if (!tmp) 1063 kfree(buf); 1064 buf = tmp; 1065 } 1066 if (!buf) { 1067 buf = kvmalloc(buf_len, GFP_KERNEL); 1068 if (!buf) { 1069 ret = -ENOMEM; 1070 goto out; 1071 } 1072 } 1073 } 1074 1075 read_extent_buffer(eb, buf, (unsigned long)(di + 1), 1076 name_len + data_len); 1077 1078 len = sizeof(*di) + name_len + data_len; 1079 di = (struct btrfs_dir_item *)((char *)di + len); 1080 cur += len; 1081 1082 ret = iterate(num, &di_key, buf, name_len, buf + name_len, 1083 data_len, type, ctx); 1084 if (ret < 0) 1085 goto out; 1086 if (ret) { 1087 ret = 0; 1088 goto out; 1089 } 1090 1091 num++; 1092 } 1093 1094 out: 1095 kvfree(buf); 1096 return ret; 1097 } 1098 1099 static int __copy_first_ref(int num, u64 dir, int index, 1100 struct fs_path *p, void *ctx) 1101 { 1102 int ret; 1103 struct fs_path *pt = ctx; 1104 1105 ret = fs_path_copy(pt, p); 1106 if (ret < 0) 1107 return ret; 1108 1109 /* we want the first only */ 1110 return 1; 1111 } 1112 1113 /* 1114 * Retrieve the first path of an inode. If an inode has more then one 1115 * ref/hardlink, this is ignored. 1116 */ 1117 static int get_inode_path(struct btrfs_root *root, 1118 u64 ino, struct fs_path *path) 1119 { 1120 int ret; 1121 struct btrfs_key key, found_key; 1122 struct btrfs_path *p; 1123 1124 p = alloc_path_for_send(); 1125 if (!p) 1126 return -ENOMEM; 1127 1128 fs_path_reset(path); 1129 1130 key.objectid = ino; 1131 key.type = BTRFS_INODE_REF_KEY; 1132 key.offset = 0; 1133 1134 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0); 1135 if (ret < 0) 1136 goto out; 1137 if (ret) { 1138 ret = 1; 1139 goto out; 1140 } 1141 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); 1142 if (found_key.objectid != ino || 1143 (found_key.type != BTRFS_INODE_REF_KEY && 1144 found_key.type != BTRFS_INODE_EXTREF_KEY)) { 1145 ret = -ENOENT; 1146 goto out; 1147 } 1148 1149 ret = iterate_inode_ref(root, p, &found_key, 1, 1150 __copy_first_ref, path); 1151 if (ret < 0) 1152 goto out; 1153 ret = 0; 1154 1155 out: 1156 btrfs_free_path(p); 1157 return ret; 1158 } 1159 1160 struct backref_ctx { 1161 struct send_ctx *sctx; 1162 1163 struct btrfs_path *path; 1164 /* number of total found references */ 1165 u64 found; 1166 1167 /* 1168 * used for clones found in send_root. clones found behind cur_objectid 1169 * and cur_offset are not considered as allowed clones. 1170 */ 1171 u64 cur_objectid; 1172 u64 cur_offset; 1173 1174 /* may be truncated in case it's the last extent in a file */ 1175 u64 extent_len; 1176 1177 /* data offset in the file extent item */ 1178 u64 data_offset; 1179 1180 /* Just to check for bugs in backref resolving */ 1181 int found_itself; 1182 }; 1183 1184 static int __clone_root_cmp_bsearch(const void *key, const void *elt) 1185 { 1186 u64 root = (u64)(uintptr_t)key; 1187 struct clone_root *cr = (struct clone_root *)elt; 1188 1189 if (root < cr->root->objectid) 1190 return -1; 1191 if (root > cr->root->objectid) 1192 return 1; 1193 return 0; 1194 } 1195 1196 static int __clone_root_cmp_sort(const void *e1, const void *e2) 1197 { 1198 struct clone_root *cr1 = (struct clone_root *)e1; 1199 struct clone_root *cr2 = (struct clone_root *)e2; 1200 1201 if (cr1->root->objectid < cr2->root->objectid) 1202 return -1; 1203 if (cr1->root->objectid > cr2->root->objectid) 1204 return 1; 1205 return 0; 1206 } 1207 1208 /* 1209 * Called for every backref that is found for the current extent. 1210 * Results are collected in sctx->clone_roots->ino/offset/found_refs 1211 */ 1212 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) 1213 { 1214 struct backref_ctx *bctx = ctx_; 1215 struct clone_root *found; 1216 int ret; 1217 u64 i_size; 1218 1219 /* First check if the root is in the list of accepted clone sources */ 1220 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots, 1221 bctx->sctx->clone_roots_cnt, 1222 sizeof(struct clone_root), 1223 __clone_root_cmp_bsearch); 1224 if (!found) 1225 return 0; 1226 1227 if (found->root == bctx->sctx->send_root && 1228 ino == bctx->cur_objectid && 1229 offset == bctx->cur_offset) { 1230 bctx->found_itself = 1; 1231 } 1232 1233 /* 1234 * There are inodes that have extents that lie behind its i_size. Don't 1235 * accept clones from these extents. 1236 */ 1237 ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL, 1238 NULL, NULL, NULL); 1239 btrfs_release_path(bctx->path); 1240 if (ret < 0) 1241 return ret; 1242 1243 if (offset + bctx->data_offset + bctx->extent_len > i_size) 1244 return 0; 1245 1246 /* 1247 * Make sure we don't consider clones from send_root that are 1248 * behind the current inode/offset. 1249 */ 1250 if (found->root == bctx->sctx->send_root) { 1251 /* 1252 * TODO for the moment we don't accept clones from the inode 1253 * that is currently send. We may change this when 1254 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same 1255 * file. 1256 */ 1257 if (ino >= bctx->cur_objectid) 1258 return 0; 1259 } 1260 1261 bctx->found++; 1262 found->found_refs++; 1263 if (ino < found->ino) { 1264 found->ino = ino; 1265 found->offset = offset; 1266 } else if (found->ino == ino) { 1267 /* 1268 * same extent found more then once in the same file. 1269 */ 1270 if (found->offset > offset + bctx->extent_len) 1271 found->offset = offset; 1272 } 1273 1274 return 0; 1275 } 1276 1277 /* 1278 * Given an inode, offset and extent item, it finds a good clone for a clone 1279 * instruction. Returns -ENOENT when none could be found. The function makes 1280 * sure that the returned clone is usable at the point where sending is at the 1281 * moment. This means, that no clones are accepted which lie behind the current 1282 * inode+offset. 1283 * 1284 * path must point to the extent item when called. 1285 */ 1286 static int find_extent_clone(struct send_ctx *sctx, 1287 struct btrfs_path *path, 1288 u64 ino, u64 data_offset, 1289 u64 ino_size, 1290 struct clone_root **found) 1291 { 1292 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 1293 int ret; 1294 int extent_type; 1295 u64 logical; 1296 u64 disk_byte; 1297 u64 num_bytes; 1298 u64 extent_item_pos; 1299 u64 flags = 0; 1300 struct btrfs_file_extent_item *fi; 1301 struct extent_buffer *eb = path->nodes[0]; 1302 struct backref_ctx *backref_ctx = NULL; 1303 struct clone_root *cur_clone_root; 1304 struct btrfs_key found_key; 1305 struct btrfs_path *tmp_path; 1306 int compressed; 1307 u32 i; 1308 1309 tmp_path = alloc_path_for_send(); 1310 if (!tmp_path) 1311 return -ENOMEM; 1312 1313 /* We only use this path under the commit sem */ 1314 tmp_path->need_commit_sem = 0; 1315 1316 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL); 1317 if (!backref_ctx) { 1318 ret = -ENOMEM; 1319 goto out; 1320 } 1321 1322 backref_ctx->path = tmp_path; 1323 1324 if (data_offset >= ino_size) { 1325 /* 1326 * There may be extents that lie behind the file's size. 1327 * I at least had this in combination with snapshotting while 1328 * writing large files. 1329 */ 1330 ret = 0; 1331 goto out; 1332 } 1333 1334 fi = btrfs_item_ptr(eb, path->slots[0], 1335 struct btrfs_file_extent_item); 1336 extent_type = btrfs_file_extent_type(eb, fi); 1337 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1338 ret = -ENOENT; 1339 goto out; 1340 } 1341 compressed = btrfs_file_extent_compression(eb, fi); 1342 1343 num_bytes = btrfs_file_extent_num_bytes(eb, fi); 1344 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 1345 if (disk_byte == 0) { 1346 ret = -ENOENT; 1347 goto out; 1348 } 1349 logical = disk_byte + btrfs_file_extent_offset(eb, fi); 1350 1351 down_read(&fs_info->commit_root_sem); 1352 ret = extent_from_logical(fs_info, disk_byte, tmp_path, 1353 &found_key, &flags); 1354 up_read(&fs_info->commit_root_sem); 1355 btrfs_release_path(tmp_path); 1356 1357 if (ret < 0) 1358 goto out; 1359 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1360 ret = -EIO; 1361 goto out; 1362 } 1363 1364 /* 1365 * Setup the clone roots. 1366 */ 1367 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1368 cur_clone_root = sctx->clone_roots + i; 1369 cur_clone_root->ino = (u64)-1; 1370 cur_clone_root->offset = 0; 1371 cur_clone_root->found_refs = 0; 1372 } 1373 1374 backref_ctx->sctx = sctx; 1375 backref_ctx->found = 0; 1376 backref_ctx->cur_objectid = ino; 1377 backref_ctx->cur_offset = data_offset; 1378 backref_ctx->found_itself = 0; 1379 backref_ctx->extent_len = num_bytes; 1380 /* 1381 * For non-compressed extents iterate_extent_inodes() gives us extent 1382 * offsets that already take into account the data offset, but not for 1383 * compressed extents, since the offset is logical and not relative to 1384 * the physical extent locations. We must take this into account to 1385 * avoid sending clone offsets that go beyond the source file's size, 1386 * which would result in the clone ioctl failing with -EINVAL on the 1387 * receiving end. 1388 */ 1389 if (compressed == BTRFS_COMPRESS_NONE) 1390 backref_ctx->data_offset = 0; 1391 else 1392 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi); 1393 1394 /* 1395 * The last extent of a file may be too large due to page alignment. 1396 * We need to adjust extent_len in this case so that the checks in 1397 * __iterate_backrefs work. 1398 */ 1399 if (data_offset + num_bytes >= ino_size) 1400 backref_ctx->extent_len = ino_size - data_offset; 1401 1402 /* 1403 * Now collect all backrefs. 1404 */ 1405 if (compressed == BTRFS_COMPRESS_NONE) 1406 extent_item_pos = logical - found_key.objectid; 1407 else 1408 extent_item_pos = 0; 1409 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1410 extent_item_pos, 1, __iterate_backrefs, 1411 backref_ctx, false); 1412 1413 if (ret < 0) 1414 goto out; 1415 1416 if (!backref_ctx->found_itself) { 1417 /* found a bug in backref code? */ 1418 ret = -EIO; 1419 btrfs_err(fs_info, 1420 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu", 1421 ino, data_offset, disk_byte, found_key.objectid); 1422 goto out; 1423 } 1424 1425 btrfs_debug(fs_info, 1426 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu", 1427 data_offset, ino, num_bytes, logical); 1428 1429 if (!backref_ctx->found) 1430 btrfs_debug(fs_info, "no clones found"); 1431 1432 cur_clone_root = NULL; 1433 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1434 if (sctx->clone_roots[i].found_refs) { 1435 if (!cur_clone_root) 1436 cur_clone_root = sctx->clone_roots + i; 1437 else if (sctx->clone_roots[i].root == sctx->send_root) 1438 /* prefer clones from send_root over others */ 1439 cur_clone_root = sctx->clone_roots + i; 1440 } 1441 1442 } 1443 1444 if (cur_clone_root) { 1445 *found = cur_clone_root; 1446 ret = 0; 1447 } else { 1448 ret = -ENOENT; 1449 } 1450 1451 out: 1452 btrfs_free_path(tmp_path); 1453 kfree(backref_ctx); 1454 return ret; 1455 } 1456 1457 static int read_symlink(struct btrfs_root *root, 1458 u64 ino, 1459 struct fs_path *dest) 1460 { 1461 int ret; 1462 struct btrfs_path *path; 1463 struct btrfs_key key; 1464 struct btrfs_file_extent_item *ei; 1465 u8 type; 1466 u8 compression; 1467 unsigned long off; 1468 int len; 1469 1470 path = alloc_path_for_send(); 1471 if (!path) 1472 return -ENOMEM; 1473 1474 key.objectid = ino; 1475 key.type = BTRFS_EXTENT_DATA_KEY; 1476 key.offset = 0; 1477 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1478 if (ret < 0) 1479 goto out; 1480 if (ret) { 1481 /* 1482 * An empty symlink inode. Can happen in rare error paths when 1483 * creating a symlink (transaction committed before the inode 1484 * eviction handler removed the symlink inode items and a crash 1485 * happened in between or the subvol was snapshoted in between). 1486 * Print an informative message to dmesg/syslog so that the user 1487 * can delete the symlink. 1488 */ 1489 btrfs_err(root->fs_info, 1490 "Found empty symlink inode %llu at root %llu", 1491 ino, root->root_key.objectid); 1492 ret = -EIO; 1493 goto out; 1494 } 1495 1496 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 1497 struct btrfs_file_extent_item); 1498 type = btrfs_file_extent_type(path->nodes[0], ei); 1499 compression = btrfs_file_extent_compression(path->nodes[0], ei); 1500 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE); 1501 BUG_ON(compression); 1502 1503 off = btrfs_file_extent_inline_start(ei); 1504 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei); 1505 1506 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); 1507 1508 out: 1509 btrfs_free_path(path); 1510 return ret; 1511 } 1512 1513 /* 1514 * Helper function to generate a file name that is unique in the root of 1515 * send_root and parent_root. This is used to generate names for orphan inodes. 1516 */ 1517 static int gen_unique_name(struct send_ctx *sctx, 1518 u64 ino, u64 gen, 1519 struct fs_path *dest) 1520 { 1521 int ret = 0; 1522 struct btrfs_path *path; 1523 struct btrfs_dir_item *di; 1524 char tmp[64]; 1525 int len; 1526 u64 idx = 0; 1527 1528 path = alloc_path_for_send(); 1529 if (!path) 1530 return -ENOMEM; 1531 1532 while (1) { 1533 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu", 1534 ino, gen, idx); 1535 ASSERT(len < sizeof(tmp)); 1536 1537 di = btrfs_lookup_dir_item(NULL, sctx->send_root, 1538 path, BTRFS_FIRST_FREE_OBJECTID, 1539 tmp, strlen(tmp), 0); 1540 btrfs_release_path(path); 1541 if (IS_ERR(di)) { 1542 ret = PTR_ERR(di); 1543 goto out; 1544 } 1545 if (di) { 1546 /* not unique, try again */ 1547 idx++; 1548 continue; 1549 } 1550 1551 if (!sctx->parent_root) { 1552 /* unique */ 1553 ret = 0; 1554 break; 1555 } 1556 1557 di = btrfs_lookup_dir_item(NULL, sctx->parent_root, 1558 path, BTRFS_FIRST_FREE_OBJECTID, 1559 tmp, strlen(tmp), 0); 1560 btrfs_release_path(path); 1561 if (IS_ERR(di)) { 1562 ret = PTR_ERR(di); 1563 goto out; 1564 } 1565 if (di) { 1566 /* not unique, try again */ 1567 idx++; 1568 continue; 1569 } 1570 /* unique */ 1571 break; 1572 } 1573 1574 ret = fs_path_add(dest, tmp, strlen(tmp)); 1575 1576 out: 1577 btrfs_free_path(path); 1578 return ret; 1579 } 1580 1581 enum inode_state { 1582 inode_state_no_change, 1583 inode_state_will_create, 1584 inode_state_did_create, 1585 inode_state_will_delete, 1586 inode_state_did_delete, 1587 }; 1588 1589 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen) 1590 { 1591 int ret; 1592 int left_ret; 1593 int right_ret; 1594 u64 left_gen; 1595 u64 right_gen; 1596 1597 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL, 1598 NULL, NULL); 1599 if (ret < 0 && ret != -ENOENT) 1600 goto out; 1601 left_ret = ret; 1602 1603 if (!sctx->parent_root) { 1604 right_ret = -ENOENT; 1605 } else { 1606 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen, 1607 NULL, NULL, NULL, NULL); 1608 if (ret < 0 && ret != -ENOENT) 1609 goto out; 1610 right_ret = ret; 1611 } 1612 1613 if (!left_ret && !right_ret) { 1614 if (left_gen == gen && right_gen == gen) { 1615 ret = inode_state_no_change; 1616 } else if (left_gen == gen) { 1617 if (ino < sctx->send_progress) 1618 ret = inode_state_did_create; 1619 else 1620 ret = inode_state_will_create; 1621 } else if (right_gen == gen) { 1622 if (ino < sctx->send_progress) 1623 ret = inode_state_did_delete; 1624 else 1625 ret = inode_state_will_delete; 1626 } else { 1627 ret = -ENOENT; 1628 } 1629 } else if (!left_ret) { 1630 if (left_gen == gen) { 1631 if (ino < sctx->send_progress) 1632 ret = inode_state_did_create; 1633 else 1634 ret = inode_state_will_create; 1635 } else { 1636 ret = -ENOENT; 1637 } 1638 } else if (!right_ret) { 1639 if (right_gen == gen) { 1640 if (ino < sctx->send_progress) 1641 ret = inode_state_did_delete; 1642 else 1643 ret = inode_state_will_delete; 1644 } else { 1645 ret = -ENOENT; 1646 } 1647 } else { 1648 ret = -ENOENT; 1649 } 1650 1651 out: 1652 return ret; 1653 } 1654 1655 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen) 1656 { 1657 int ret; 1658 1659 if (ino == BTRFS_FIRST_FREE_OBJECTID) 1660 return 1; 1661 1662 ret = get_cur_inode_state(sctx, ino, gen); 1663 if (ret < 0) 1664 goto out; 1665 1666 if (ret == inode_state_no_change || 1667 ret == inode_state_did_create || 1668 ret == inode_state_will_delete) 1669 ret = 1; 1670 else 1671 ret = 0; 1672 1673 out: 1674 return ret; 1675 } 1676 1677 /* 1678 * Helper function to lookup a dir item in a dir. 1679 */ 1680 static int lookup_dir_item_inode(struct btrfs_root *root, 1681 u64 dir, const char *name, int name_len, 1682 u64 *found_inode, 1683 u8 *found_type) 1684 { 1685 int ret = 0; 1686 struct btrfs_dir_item *di; 1687 struct btrfs_key key; 1688 struct btrfs_path *path; 1689 1690 path = alloc_path_for_send(); 1691 if (!path) 1692 return -ENOMEM; 1693 1694 di = btrfs_lookup_dir_item(NULL, root, path, 1695 dir, name, name_len, 0); 1696 if (!di) { 1697 ret = -ENOENT; 1698 goto out; 1699 } 1700 if (IS_ERR(di)) { 1701 ret = PTR_ERR(di); 1702 goto out; 1703 } 1704 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 1705 if (key.type == BTRFS_ROOT_ITEM_KEY) { 1706 ret = -ENOENT; 1707 goto out; 1708 } 1709 *found_inode = key.objectid; 1710 *found_type = btrfs_dir_type(path->nodes[0], di); 1711 1712 out: 1713 btrfs_free_path(path); 1714 return ret; 1715 } 1716 1717 /* 1718 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir, 1719 * generation of the parent dir and the name of the dir entry. 1720 */ 1721 static int get_first_ref(struct btrfs_root *root, u64 ino, 1722 u64 *dir, u64 *dir_gen, struct fs_path *name) 1723 { 1724 int ret; 1725 struct btrfs_key key; 1726 struct btrfs_key found_key; 1727 struct btrfs_path *path; 1728 int len; 1729 u64 parent_dir; 1730 1731 path = alloc_path_for_send(); 1732 if (!path) 1733 return -ENOMEM; 1734 1735 key.objectid = ino; 1736 key.type = BTRFS_INODE_REF_KEY; 1737 key.offset = 0; 1738 1739 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); 1740 if (ret < 0) 1741 goto out; 1742 if (!ret) 1743 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1744 path->slots[0]); 1745 if (ret || found_key.objectid != ino || 1746 (found_key.type != BTRFS_INODE_REF_KEY && 1747 found_key.type != BTRFS_INODE_EXTREF_KEY)) { 1748 ret = -ENOENT; 1749 goto out; 1750 } 1751 1752 if (found_key.type == BTRFS_INODE_REF_KEY) { 1753 struct btrfs_inode_ref *iref; 1754 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1755 struct btrfs_inode_ref); 1756 len = btrfs_inode_ref_name_len(path->nodes[0], iref); 1757 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 1758 (unsigned long)(iref + 1), 1759 len); 1760 parent_dir = found_key.offset; 1761 } else { 1762 struct btrfs_inode_extref *extref; 1763 extref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1764 struct btrfs_inode_extref); 1765 len = btrfs_inode_extref_name_len(path->nodes[0], extref); 1766 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 1767 (unsigned long)&extref->name, len); 1768 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref); 1769 } 1770 if (ret < 0) 1771 goto out; 1772 btrfs_release_path(path); 1773 1774 if (dir_gen) { 1775 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, 1776 NULL, NULL, NULL); 1777 if (ret < 0) 1778 goto out; 1779 } 1780 1781 *dir = parent_dir; 1782 1783 out: 1784 btrfs_free_path(path); 1785 return ret; 1786 } 1787 1788 static int is_first_ref(struct btrfs_root *root, 1789 u64 ino, u64 dir, 1790 const char *name, int name_len) 1791 { 1792 int ret; 1793 struct fs_path *tmp_name; 1794 u64 tmp_dir; 1795 1796 tmp_name = fs_path_alloc(); 1797 if (!tmp_name) 1798 return -ENOMEM; 1799 1800 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name); 1801 if (ret < 0) 1802 goto out; 1803 1804 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) { 1805 ret = 0; 1806 goto out; 1807 } 1808 1809 ret = !memcmp(tmp_name->start, name, name_len); 1810 1811 out: 1812 fs_path_free(tmp_name); 1813 return ret; 1814 } 1815 1816 /* 1817 * Used by process_recorded_refs to determine if a new ref would overwrite an 1818 * already existing ref. In case it detects an overwrite, it returns the 1819 * inode/gen in who_ino/who_gen. 1820 * When an overwrite is detected, process_recorded_refs does proper orphanizing 1821 * to make sure later references to the overwritten inode are possible. 1822 * Orphanizing is however only required for the first ref of an inode. 1823 * process_recorded_refs does an additional is_first_ref check to see if 1824 * orphanizing is really required. 1825 */ 1826 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, 1827 const char *name, int name_len, 1828 u64 *who_ino, u64 *who_gen, u64 *who_mode) 1829 { 1830 int ret = 0; 1831 u64 gen; 1832 u64 other_inode = 0; 1833 u8 other_type = 0; 1834 1835 if (!sctx->parent_root) 1836 goto out; 1837 1838 ret = is_inode_existent(sctx, dir, dir_gen); 1839 if (ret <= 0) 1840 goto out; 1841 1842 /* 1843 * If we have a parent root we need to verify that the parent dir was 1844 * not deleted and then re-created, if it was then we have no overwrite 1845 * and we can just unlink this entry. 1846 */ 1847 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) { 1848 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, 1849 NULL, NULL, NULL); 1850 if (ret < 0 && ret != -ENOENT) 1851 goto out; 1852 if (ret) { 1853 ret = 0; 1854 goto out; 1855 } 1856 if (gen != dir_gen) 1857 goto out; 1858 } 1859 1860 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len, 1861 &other_inode, &other_type); 1862 if (ret < 0 && ret != -ENOENT) 1863 goto out; 1864 if (ret) { 1865 ret = 0; 1866 goto out; 1867 } 1868 1869 /* 1870 * Check if the overwritten ref was already processed. If yes, the ref 1871 * was already unlinked/moved, so we can safely assume that we will not 1872 * overwrite anything at this point in time. 1873 */ 1874 if (other_inode > sctx->send_progress || 1875 is_waiting_for_move(sctx, other_inode)) { 1876 ret = get_inode_info(sctx->parent_root, other_inode, NULL, 1877 who_gen, who_mode, NULL, NULL, NULL); 1878 if (ret < 0) 1879 goto out; 1880 1881 ret = 1; 1882 *who_ino = other_inode; 1883 } else { 1884 ret = 0; 1885 } 1886 1887 out: 1888 return ret; 1889 } 1890 1891 /* 1892 * Checks if the ref was overwritten by an already processed inode. This is 1893 * used by __get_cur_name_and_parent to find out if the ref was orphanized and 1894 * thus the orphan name needs be used. 1895 * process_recorded_refs also uses it to avoid unlinking of refs that were 1896 * overwritten. 1897 */ 1898 static int did_overwrite_ref(struct send_ctx *sctx, 1899 u64 dir, u64 dir_gen, 1900 u64 ino, u64 ino_gen, 1901 const char *name, int name_len) 1902 { 1903 int ret = 0; 1904 u64 gen; 1905 u64 ow_inode; 1906 u8 other_type; 1907 1908 if (!sctx->parent_root) 1909 goto out; 1910 1911 ret = is_inode_existent(sctx, dir, dir_gen); 1912 if (ret <= 0) 1913 goto out; 1914 1915 if (dir != BTRFS_FIRST_FREE_OBJECTID) { 1916 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, 1917 NULL, NULL, NULL); 1918 if (ret < 0 && ret != -ENOENT) 1919 goto out; 1920 if (ret) { 1921 ret = 0; 1922 goto out; 1923 } 1924 if (gen != dir_gen) 1925 goto out; 1926 } 1927 1928 /* check if the ref was overwritten by another ref */ 1929 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, 1930 &ow_inode, &other_type); 1931 if (ret < 0 && ret != -ENOENT) 1932 goto out; 1933 if (ret) { 1934 /* was never and will never be overwritten */ 1935 ret = 0; 1936 goto out; 1937 } 1938 1939 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL, 1940 NULL, NULL); 1941 if (ret < 0) 1942 goto out; 1943 1944 if (ow_inode == ino && gen == ino_gen) { 1945 ret = 0; 1946 goto out; 1947 } 1948 1949 /* 1950 * We know that it is or will be overwritten. Check this now. 1951 * The current inode being processed might have been the one that caused 1952 * inode 'ino' to be orphanized, therefore check if ow_inode matches 1953 * the current inode being processed. 1954 */ 1955 if ((ow_inode < sctx->send_progress) || 1956 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino && 1957 gen == sctx->cur_inode_gen)) 1958 ret = 1; 1959 else 1960 ret = 0; 1961 1962 out: 1963 return ret; 1964 } 1965 1966 /* 1967 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode 1968 * that got overwritten. This is used by process_recorded_refs to determine 1969 * if it has to use the path as returned by get_cur_path or the orphan name. 1970 */ 1971 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) 1972 { 1973 int ret = 0; 1974 struct fs_path *name = NULL; 1975 u64 dir; 1976 u64 dir_gen; 1977 1978 if (!sctx->parent_root) 1979 goto out; 1980 1981 name = fs_path_alloc(); 1982 if (!name) 1983 return -ENOMEM; 1984 1985 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name); 1986 if (ret < 0) 1987 goto out; 1988 1989 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, 1990 name->start, fs_path_len(name)); 1991 1992 out: 1993 fs_path_free(name); 1994 return ret; 1995 } 1996 1997 /* 1998 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit, 1999 * so we need to do some special handling in case we have clashes. This function 2000 * takes care of this with the help of name_cache_entry::radix_list. 2001 * In case of error, nce is kfreed. 2002 */ 2003 static int name_cache_insert(struct send_ctx *sctx, 2004 struct name_cache_entry *nce) 2005 { 2006 int ret = 0; 2007 struct list_head *nce_head; 2008 2009 nce_head = radix_tree_lookup(&sctx->name_cache, 2010 (unsigned long)nce->ino); 2011 if (!nce_head) { 2012 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL); 2013 if (!nce_head) { 2014 kfree(nce); 2015 return -ENOMEM; 2016 } 2017 INIT_LIST_HEAD(nce_head); 2018 2019 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head); 2020 if (ret < 0) { 2021 kfree(nce_head); 2022 kfree(nce); 2023 return ret; 2024 } 2025 } 2026 list_add_tail(&nce->radix_list, nce_head); 2027 list_add_tail(&nce->list, &sctx->name_cache_list); 2028 sctx->name_cache_size++; 2029 2030 return ret; 2031 } 2032 2033 static void name_cache_delete(struct send_ctx *sctx, 2034 struct name_cache_entry *nce) 2035 { 2036 struct list_head *nce_head; 2037 2038 nce_head = radix_tree_lookup(&sctx->name_cache, 2039 (unsigned long)nce->ino); 2040 if (!nce_head) { 2041 btrfs_err(sctx->send_root->fs_info, 2042 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory", 2043 nce->ino, sctx->name_cache_size); 2044 } 2045 2046 list_del(&nce->radix_list); 2047 list_del(&nce->list); 2048 sctx->name_cache_size--; 2049 2050 /* 2051 * We may not get to the final release of nce_head if the lookup fails 2052 */ 2053 if (nce_head && list_empty(nce_head)) { 2054 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); 2055 kfree(nce_head); 2056 } 2057 } 2058 2059 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx, 2060 u64 ino, u64 gen) 2061 { 2062 struct list_head *nce_head; 2063 struct name_cache_entry *cur; 2064 2065 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino); 2066 if (!nce_head) 2067 return NULL; 2068 2069 list_for_each_entry(cur, nce_head, radix_list) { 2070 if (cur->ino == ino && cur->gen == gen) 2071 return cur; 2072 } 2073 return NULL; 2074 } 2075 2076 /* 2077 * Removes the entry from the list and adds it back to the end. This marks the 2078 * entry as recently used so that name_cache_clean_unused does not remove it. 2079 */ 2080 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce) 2081 { 2082 list_del(&nce->list); 2083 list_add_tail(&nce->list, &sctx->name_cache_list); 2084 } 2085 2086 /* 2087 * Remove some entries from the beginning of name_cache_list. 2088 */ 2089 static void name_cache_clean_unused(struct send_ctx *sctx) 2090 { 2091 struct name_cache_entry *nce; 2092 2093 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE) 2094 return; 2095 2096 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) { 2097 nce = list_entry(sctx->name_cache_list.next, 2098 struct name_cache_entry, list); 2099 name_cache_delete(sctx, nce); 2100 kfree(nce); 2101 } 2102 } 2103 2104 static void name_cache_free(struct send_ctx *sctx) 2105 { 2106 struct name_cache_entry *nce; 2107 2108 while (!list_empty(&sctx->name_cache_list)) { 2109 nce = list_entry(sctx->name_cache_list.next, 2110 struct name_cache_entry, list); 2111 name_cache_delete(sctx, nce); 2112 kfree(nce); 2113 } 2114 } 2115 2116 /* 2117 * Used by get_cur_path for each ref up to the root. 2118 * Returns 0 if it succeeded. 2119 * Returns 1 if the inode is not existent or got overwritten. In that case, the 2120 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1 2121 * is returned, parent_ino/parent_gen are not guaranteed to be valid. 2122 * Returns <0 in case of error. 2123 */ 2124 static int __get_cur_name_and_parent(struct send_ctx *sctx, 2125 u64 ino, u64 gen, 2126 u64 *parent_ino, 2127 u64 *parent_gen, 2128 struct fs_path *dest) 2129 { 2130 int ret; 2131 int nce_ret; 2132 struct name_cache_entry *nce = NULL; 2133 2134 /* 2135 * First check if we already did a call to this function with the same 2136 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes 2137 * return the cached result. 2138 */ 2139 nce = name_cache_search(sctx, ino, gen); 2140 if (nce) { 2141 if (ino < sctx->send_progress && nce->need_later_update) { 2142 name_cache_delete(sctx, nce); 2143 kfree(nce); 2144 nce = NULL; 2145 } else { 2146 name_cache_used(sctx, nce); 2147 *parent_ino = nce->parent_ino; 2148 *parent_gen = nce->parent_gen; 2149 ret = fs_path_add(dest, nce->name, nce->name_len); 2150 if (ret < 0) 2151 goto out; 2152 ret = nce->ret; 2153 goto out; 2154 } 2155 } 2156 2157 /* 2158 * If the inode is not existent yet, add the orphan name and return 1. 2159 * This should only happen for the parent dir that we determine in 2160 * __record_new_ref 2161 */ 2162 ret = is_inode_existent(sctx, ino, gen); 2163 if (ret < 0) 2164 goto out; 2165 2166 if (!ret) { 2167 ret = gen_unique_name(sctx, ino, gen, dest); 2168 if (ret < 0) 2169 goto out; 2170 ret = 1; 2171 goto out_cache; 2172 } 2173 2174 /* 2175 * Depending on whether the inode was already processed or not, use 2176 * send_root or parent_root for ref lookup. 2177 */ 2178 if (ino < sctx->send_progress) 2179 ret = get_first_ref(sctx->send_root, ino, 2180 parent_ino, parent_gen, dest); 2181 else 2182 ret = get_first_ref(sctx->parent_root, ino, 2183 parent_ino, parent_gen, dest); 2184 if (ret < 0) 2185 goto out; 2186 2187 /* 2188 * Check if the ref was overwritten by an inode's ref that was processed 2189 * earlier. If yes, treat as orphan and return 1. 2190 */ 2191 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, 2192 dest->start, dest->end - dest->start); 2193 if (ret < 0) 2194 goto out; 2195 if (ret) { 2196 fs_path_reset(dest); 2197 ret = gen_unique_name(sctx, ino, gen, dest); 2198 if (ret < 0) 2199 goto out; 2200 ret = 1; 2201 } 2202 2203 out_cache: 2204 /* 2205 * Store the result of the lookup in the name cache. 2206 */ 2207 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL); 2208 if (!nce) { 2209 ret = -ENOMEM; 2210 goto out; 2211 } 2212 2213 nce->ino = ino; 2214 nce->gen = gen; 2215 nce->parent_ino = *parent_ino; 2216 nce->parent_gen = *parent_gen; 2217 nce->name_len = fs_path_len(dest); 2218 nce->ret = ret; 2219 strcpy(nce->name, dest->start); 2220 2221 if (ino < sctx->send_progress) 2222 nce->need_later_update = 0; 2223 else 2224 nce->need_later_update = 1; 2225 2226 nce_ret = name_cache_insert(sctx, nce); 2227 if (nce_ret < 0) 2228 ret = nce_ret; 2229 name_cache_clean_unused(sctx); 2230 2231 out: 2232 return ret; 2233 } 2234 2235 /* 2236 * Magic happens here. This function returns the first ref to an inode as it 2237 * would look like while receiving the stream at this point in time. 2238 * We walk the path up to the root. For every inode in between, we check if it 2239 * was already processed/sent. If yes, we continue with the parent as found 2240 * in send_root. If not, we continue with the parent as found in parent_root. 2241 * If we encounter an inode that was deleted at this point in time, we use the 2242 * inodes "orphan" name instead of the real name and stop. Same with new inodes 2243 * that were not created yet and overwritten inodes/refs. 2244 * 2245 * When do we have have orphan inodes: 2246 * 1. When an inode is freshly created and thus no valid refs are available yet 2247 * 2. When a directory lost all it's refs (deleted) but still has dir items 2248 * inside which were not processed yet (pending for move/delete). If anyone 2249 * tried to get the path to the dir items, it would get a path inside that 2250 * orphan directory. 2251 * 3. When an inode is moved around or gets new links, it may overwrite the ref 2252 * of an unprocessed inode. If in that case the first ref would be 2253 * overwritten, the overwritten inode gets "orphanized". Later when we 2254 * process this overwritten inode, it is restored at a new place by moving 2255 * the orphan inode. 2256 * 2257 * sctx->send_progress tells this function at which point in time receiving 2258 * would be. 2259 */ 2260 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, 2261 struct fs_path *dest) 2262 { 2263 int ret = 0; 2264 struct fs_path *name = NULL; 2265 u64 parent_inode = 0; 2266 u64 parent_gen = 0; 2267 int stop = 0; 2268 2269 name = fs_path_alloc(); 2270 if (!name) { 2271 ret = -ENOMEM; 2272 goto out; 2273 } 2274 2275 dest->reversed = 1; 2276 fs_path_reset(dest); 2277 2278 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { 2279 struct waiting_dir_move *wdm; 2280 2281 fs_path_reset(name); 2282 2283 if (is_waiting_for_rm(sctx, ino)) { 2284 ret = gen_unique_name(sctx, ino, gen, name); 2285 if (ret < 0) 2286 goto out; 2287 ret = fs_path_add_path(dest, name); 2288 break; 2289 } 2290 2291 wdm = get_waiting_dir_move(sctx, ino); 2292 if (wdm && wdm->orphanized) { 2293 ret = gen_unique_name(sctx, ino, gen, name); 2294 stop = 1; 2295 } else if (wdm) { 2296 ret = get_first_ref(sctx->parent_root, ino, 2297 &parent_inode, &parent_gen, name); 2298 } else { 2299 ret = __get_cur_name_and_parent(sctx, ino, gen, 2300 &parent_inode, 2301 &parent_gen, name); 2302 if (ret) 2303 stop = 1; 2304 } 2305 2306 if (ret < 0) 2307 goto out; 2308 2309 ret = fs_path_add_path(dest, name); 2310 if (ret < 0) 2311 goto out; 2312 2313 ino = parent_inode; 2314 gen = parent_gen; 2315 } 2316 2317 out: 2318 fs_path_free(name); 2319 if (!ret) 2320 fs_path_unreverse(dest); 2321 return ret; 2322 } 2323 2324 /* 2325 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace 2326 */ 2327 static int send_subvol_begin(struct send_ctx *sctx) 2328 { 2329 int ret; 2330 struct btrfs_root *send_root = sctx->send_root; 2331 struct btrfs_root *parent_root = sctx->parent_root; 2332 struct btrfs_path *path; 2333 struct btrfs_key key; 2334 struct btrfs_root_ref *ref; 2335 struct extent_buffer *leaf; 2336 char *name = NULL; 2337 int namelen; 2338 2339 path = btrfs_alloc_path(); 2340 if (!path) 2341 return -ENOMEM; 2342 2343 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL); 2344 if (!name) { 2345 btrfs_free_path(path); 2346 return -ENOMEM; 2347 } 2348 2349 key.objectid = send_root->objectid; 2350 key.type = BTRFS_ROOT_BACKREF_KEY; 2351 key.offset = 0; 2352 2353 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root, 2354 &key, path, 1, 0); 2355 if (ret < 0) 2356 goto out; 2357 if (ret) { 2358 ret = -ENOENT; 2359 goto out; 2360 } 2361 2362 leaf = path->nodes[0]; 2363 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2364 if (key.type != BTRFS_ROOT_BACKREF_KEY || 2365 key.objectid != send_root->objectid) { 2366 ret = -ENOENT; 2367 goto out; 2368 } 2369 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 2370 namelen = btrfs_root_ref_name_len(leaf, ref); 2371 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); 2372 btrfs_release_path(path); 2373 2374 if (parent_root) { 2375 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); 2376 if (ret < 0) 2377 goto out; 2378 } else { 2379 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL); 2380 if (ret < 0) 2381 goto out; 2382 } 2383 2384 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen); 2385 2386 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid)) 2387 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, 2388 sctx->send_root->root_item.received_uuid); 2389 else 2390 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, 2391 sctx->send_root->root_item.uuid); 2392 2393 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID, 2394 le64_to_cpu(sctx->send_root->root_item.ctransid)); 2395 if (parent_root) { 2396 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid)) 2397 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 2398 parent_root->root_item.received_uuid); 2399 else 2400 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 2401 parent_root->root_item.uuid); 2402 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, 2403 le64_to_cpu(sctx->parent_root->root_item.ctransid)); 2404 } 2405 2406 ret = send_cmd(sctx); 2407 2408 tlv_put_failure: 2409 out: 2410 btrfs_free_path(path); 2411 kfree(name); 2412 return ret; 2413 } 2414 2415 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) 2416 { 2417 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2418 int ret = 0; 2419 struct fs_path *p; 2420 2421 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size); 2422 2423 p = fs_path_alloc(); 2424 if (!p) 2425 return -ENOMEM; 2426 2427 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); 2428 if (ret < 0) 2429 goto out; 2430 2431 ret = get_cur_path(sctx, ino, gen, p); 2432 if (ret < 0) 2433 goto out; 2434 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2435 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size); 2436 2437 ret = send_cmd(sctx); 2438 2439 tlv_put_failure: 2440 out: 2441 fs_path_free(p); 2442 return ret; 2443 } 2444 2445 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) 2446 { 2447 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2448 int ret = 0; 2449 struct fs_path *p; 2450 2451 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode); 2452 2453 p = fs_path_alloc(); 2454 if (!p) 2455 return -ENOMEM; 2456 2457 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); 2458 if (ret < 0) 2459 goto out; 2460 2461 ret = get_cur_path(sctx, ino, gen, p); 2462 if (ret < 0) 2463 goto out; 2464 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2465 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777); 2466 2467 ret = send_cmd(sctx); 2468 2469 tlv_put_failure: 2470 out: 2471 fs_path_free(p); 2472 return ret; 2473 } 2474 2475 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) 2476 { 2477 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2478 int ret = 0; 2479 struct fs_path *p; 2480 2481 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu", 2482 ino, uid, gid); 2483 2484 p = fs_path_alloc(); 2485 if (!p) 2486 return -ENOMEM; 2487 2488 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); 2489 if (ret < 0) 2490 goto out; 2491 2492 ret = get_cur_path(sctx, ino, gen, p); 2493 if (ret < 0) 2494 goto out; 2495 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2496 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); 2497 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); 2498 2499 ret = send_cmd(sctx); 2500 2501 tlv_put_failure: 2502 out: 2503 fs_path_free(p); 2504 return ret; 2505 } 2506 2507 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) 2508 { 2509 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2510 int ret = 0; 2511 struct fs_path *p = NULL; 2512 struct btrfs_inode_item *ii; 2513 struct btrfs_path *path = NULL; 2514 struct extent_buffer *eb; 2515 struct btrfs_key key; 2516 int slot; 2517 2518 btrfs_debug(fs_info, "send_utimes %llu", ino); 2519 2520 p = fs_path_alloc(); 2521 if (!p) 2522 return -ENOMEM; 2523 2524 path = alloc_path_for_send(); 2525 if (!path) { 2526 ret = -ENOMEM; 2527 goto out; 2528 } 2529 2530 key.objectid = ino; 2531 key.type = BTRFS_INODE_ITEM_KEY; 2532 key.offset = 0; 2533 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); 2534 if (ret > 0) 2535 ret = -ENOENT; 2536 if (ret < 0) 2537 goto out; 2538 2539 eb = path->nodes[0]; 2540 slot = path->slots[0]; 2541 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 2542 2543 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES); 2544 if (ret < 0) 2545 goto out; 2546 2547 ret = get_cur_path(sctx, ino, gen, p); 2548 if (ret < 0) 2549 goto out; 2550 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2551 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); 2552 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); 2553 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime); 2554 /* TODO Add otime support when the otime patches get into upstream */ 2555 2556 ret = send_cmd(sctx); 2557 2558 tlv_put_failure: 2559 out: 2560 fs_path_free(p); 2561 btrfs_free_path(path); 2562 return ret; 2563 } 2564 2565 /* 2566 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have 2567 * a valid path yet because we did not process the refs yet. So, the inode 2568 * is created as orphan. 2569 */ 2570 static int send_create_inode(struct send_ctx *sctx, u64 ino) 2571 { 2572 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2573 int ret = 0; 2574 struct fs_path *p; 2575 int cmd; 2576 u64 gen; 2577 u64 mode; 2578 u64 rdev; 2579 2580 btrfs_debug(fs_info, "send_create_inode %llu", ino); 2581 2582 p = fs_path_alloc(); 2583 if (!p) 2584 return -ENOMEM; 2585 2586 if (ino != sctx->cur_ino) { 2587 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, 2588 NULL, NULL, &rdev); 2589 if (ret < 0) 2590 goto out; 2591 } else { 2592 gen = sctx->cur_inode_gen; 2593 mode = sctx->cur_inode_mode; 2594 rdev = sctx->cur_inode_rdev; 2595 } 2596 2597 if (S_ISREG(mode)) { 2598 cmd = BTRFS_SEND_C_MKFILE; 2599 } else if (S_ISDIR(mode)) { 2600 cmd = BTRFS_SEND_C_MKDIR; 2601 } else if (S_ISLNK(mode)) { 2602 cmd = BTRFS_SEND_C_SYMLINK; 2603 } else if (S_ISCHR(mode) || S_ISBLK(mode)) { 2604 cmd = BTRFS_SEND_C_MKNOD; 2605 } else if (S_ISFIFO(mode)) { 2606 cmd = BTRFS_SEND_C_MKFIFO; 2607 } else if (S_ISSOCK(mode)) { 2608 cmd = BTRFS_SEND_C_MKSOCK; 2609 } else { 2610 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o", 2611 (int)(mode & S_IFMT)); 2612 ret = -EOPNOTSUPP; 2613 goto out; 2614 } 2615 2616 ret = begin_cmd(sctx, cmd); 2617 if (ret < 0) 2618 goto out; 2619 2620 ret = gen_unique_name(sctx, ino, gen, p); 2621 if (ret < 0) 2622 goto out; 2623 2624 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2625 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino); 2626 2627 if (S_ISLNK(mode)) { 2628 fs_path_reset(p); 2629 ret = read_symlink(sctx->send_root, ino, p); 2630 if (ret < 0) 2631 goto out; 2632 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); 2633 } else if (S_ISCHR(mode) || S_ISBLK(mode) || 2634 S_ISFIFO(mode) || S_ISSOCK(mode)) { 2635 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev)); 2636 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode); 2637 } 2638 2639 ret = send_cmd(sctx); 2640 if (ret < 0) 2641 goto out; 2642 2643 2644 tlv_put_failure: 2645 out: 2646 fs_path_free(p); 2647 return ret; 2648 } 2649 2650 /* 2651 * We need some special handling for inodes that get processed before the parent 2652 * directory got created. See process_recorded_refs for details. 2653 * This function does the check if we already created the dir out of order. 2654 */ 2655 static int did_create_dir(struct send_ctx *sctx, u64 dir) 2656 { 2657 int ret = 0; 2658 struct btrfs_path *path = NULL; 2659 struct btrfs_key key; 2660 struct btrfs_key found_key; 2661 struct btrfs_key di_key; 2662 struct extent_buffer *eb; 2663 struct btrfs_dir_item *di; 2664 int slot; 2665 2666 path = alloc_path_for_send(); 2667 if (!path) { 2668 ret = -ENOMEM; 2669 goto out; 2670 } 2671 2672 key.objectid = dir; 2673 key.type = BTRFS_DIR_INDEX_KEY; 2674 key.offset = 0; 2675 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); 2676 if (ret < 0) 2677 goto out; 2678 2679 while (1) { 2680 eb = path->nodes[0]; 2681 slot = path->slots[0]; 2682 if (slot >= btrfs_header_nritems(eb)) { 2683 ret = btrfs_next_leaf(sctx->send_root, path); 2684 if (ret < 0) { 2685 goto out; 2686 } else if (ret > 0) { 2687 ret = 0; 2688 break; 2689 } 2690 continue; 2691 } 2692 2693 btrfs_item_key_to_cpu(eb, &found_key, slot); 2694 if (found_key.objectid != key.objectid || 2695 found_key.type != key.type) { 2696 ret = 0; 2697 goto out; 2698 } 2699 2700 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 2701 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 2702 2703 if (di_key.type != BTRFS_ROOT_ITEM_KEY && 2704 di_key.objectid < sctx->send_progress) { 2705 ret = 1; 2706 goto out; 2707 } 2708 2709 path->slots[0]++; 2710 } 2711 2712 out: 2713 btrfs_free_path(path); 2714 return ret; 2715 } 2716 2717 /* 2718 * Only creates the inode if it is: 2719 * 1. Not a directory 2720 * 2. Or a directory which was not created already due to out of order 2721 * directories. See did_create_dir and process_recorded_refs for details. 2722 */ 2723 static int send_create_inode_if_needed(struct send_ctx *sctx) 2724 { 2725 int ret; 2726 2727 if (S_ISDIR(sctx->cur_inode_mode)) { 2728 ret = did_create_dir(sctx, sctx->cur_ino); 2729 if (ret < 0) 2730 goto out; 2731 if (ret) { 2732 ret = 0; 2733 goto out; 2734 } 2735 } 2736 2737 ret = send_create_inode(sctx, sctx->cur_ino); 2738 if (ret < 0) 2739 goto out; 2740 2741 out: 2742 return ret; 2743 } 2744 2745 struct recorded_ref { 2746 struct list_head list; 2747 char *name; 2748 struct fs_path *full_path; 2749 u64 dir; 2750 u64 dir_gen; 2751 int name_len; 2752 }; 2753 2754 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path) 2755 { 2756 ref->full_path = path; 2757 ref->name = (char *)kbasename(ref->full_path->start); 2758 ref->name_len = ref->full_path->end - ref->name; 2759 } 2760 2761 /* 2762 * We need to process new refs before deleted refs, but compare_tree gives us 2763 * everything mixed. So we first record all refs and later process them. 2764 * This function is a helper to record one ref. 2765 */ 2766 static int __record_ref(struct list_head *head, u64 dir, 2767 u64 dir_gen, struct fs_path *path) 2768 { 2769 struct recorded_ref *ref; 2770 2771 ref = kmalloc(sizeof(*ref), GFP_KERNEL); 2772 if (!ref) 2773 return -ENOMEM; 2774 2775 ref->dir = dir; 2776 ref->dir_gen = dir_gen; 2777 set_ref_path(ref, path); 2778 list_add_tail(&ref->list, head); 2779 return 0; 2780 } 2781 2782 static int dup_ref(struct recorded_ref *ref, struct list_head *list) 2783 { 2784 struct recorded_ref *new; 2785 2786 new = kmalloc(sizeof(*ref), GFP_KERNEL); 2787 if (!new) 2788 return -ENOMEM; 2789 2790 new->dir = ref->dir; 2791 new->dir_gen = ref->dir_gen; 2792 new->full_path = NULL; 2793 INIT_LIST_HEAD(&new->list); 2794 list_add_tail(&new->list, list); 2795 return 0; 2796 } 2797 2798 static void __free_recorded_refs(struct list_head *head) 2799 { 2800 struct recorded_ref *cur; 2801 2802 while (!list_empty(head)) { 2803 cur = list_entry(head->next, struct recorded_ref, list); 2804 fs_path_free(cur->full_path); 2805 list_del(&cur->list); 2806 kfree(cur); 2807 } 2808 } 2809 2810 static void free_recorded_refs(struct send_ctx *sctx) 2811 { 2812 __free_recorded_refs(&sctx->new_refs); 2813 __free_recorded_refs(&sctx->deleted_refs); 2814 } 2815 2816 /* 2817 * Renames/moves a file/dir to its orphan name. Used when the first 2818 * ref of an unprocessed inode gets overwritten and for all non empty 2819 * directories. 2820 */ 2821 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, 2822 struct fs_path *path) 2823 { 2824 int ret; 2825 struct fs_path *orphan; 2826 2827 orphan = fs_path_alloc(); 2828 if (!orphan) 2829 return -ENOMEM; 2830 2831 ret = gen_unique_name(sctx, ino, gen, orphan); 2832 if (ret < 0) 2833 goto out; 2834 2835 ret = send_rename(sctx, path, orphan); 2836 2837 out: 2838 fs_path_free(orphan); 2839 return ret; 2840 } 2841 2842 static struct orphan_dir_info * 2843 add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) 2844 { 2845 struct rb_node **p = &sctx->orphan_dirs.rb_node; 2846 struct rb_node *parent = NULL; 2847 struct orphan_dir_info *entry, *odi; 2848 2849 while (*p) { 2850 parent = *p; 2851 entry = rb_entry(parent, struct orphan_dir_info, node); 2852 if (dir_ino < entry->ino) { 2853 p = &(*p)->rb_left; 2854 } else if (dir_ino > entry->ino) { 2855 p = &(*p)->rb_right; 2856 } else { 2857 return entry; 2858 } 2859 } 2860 2861 odi = kmalloc(sizeof(*odi), GFP_KERNEL); 2862 if (!odi) 2863 return ERR_PTR(-ENOMEM); 2864 odi->ino = dir_ino; 2865 odi->gen = 0; 2866 odi->last_dir_index_offset = 0; 2867 2868 rb_link_node(&odi->node, parent, p); 2869 rb_insert_color(&odi->node, &sctx->orphan_dirs); 2870 return odi; 2871 } 2872 2873 static struct orphan_dir_info * 2874 get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) 2875 { 2876 struct rb_node *n = sctx->orphan_dirs.rb_node; 2877 struct orphan_dir_info *entry; 2878 2879 while (n) { 2880 entry = rb_entry(n, struct orphan_dir_info, node); 2881 if (dir_ino < entry->ino) 2882 n = n->rb_left; 2883 else if (dir_ino > entry->ino) 2884 n = n->rb_right; 2885 else 2886 return entry; 2887 } 2888 return NULL; 2889 } 2890 2891 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino) 2892 { 2893 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino); 2894 2895 return odi != NULL; 2896 } 2897 2898 static void free_orphan_dir_info(struct send_ctx *sctx, 2899 struct orphan_dir_info *odi) 2900 { 2901 if (!odi) 2902 return; 2903 rb_erase(&odi->node, &sctx->orphan_dirs); 2904 kfree(odi); 2905 } 2906 2907 /* 2908 * Returns 1 if a directory can be removed at this point in time. 2909 * We check this by iterating all dir items and checking if the inode behind 2910 * the dir item was already processed. 2911 */ 2912 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen, 2913 u64 send_progress) 2914 { 2915 int ret = 0; 2916 struct btrfs_root *root = sctx->parent_root; 2917 struct btrfs_path *path; 2918 struct btrfs_key key; 2919 struct btrfs_key found_key; 2920 struct btrfs_key loc; 2921 struct btrfs_dir_item *di; 2922 struct orphan_dir_info *odi = NULL; 2923 2924 /* 2925 * Don't try to rmdir the top/root subvolume dir. 2926 */ 2927 if (dir == BTRFS_FIRST_FREE_OBJECTID) 2928 return 0; 2929 2930 path = alloc_path_for_send(); 2931 if (!path) 2932 return -ENOMEM; 2933 2934 key.objectid = dir; 2935 key.type = BTRFS_DIR_INDEX_KEY; 2936 key.offset = 0; 2937 2938 odi = get_orphan_dir_info(sctx, dir); 2939 if (odi) 2940 key.offset = odi->last_dir_index_offset; 2941 2942 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2943 if (ret < 0) 2944 goto out; 2945 2946 while (1) { 2947 struct waiting_dir_move *dm; 2948 2949 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2950 ret = btrfs_next_leaf(root, path); 2951 if (ret < 0) 2952 goto out; 2953 else if (ret > 0) 2954 break; 2955 continue; 2956 } 2957 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2958 path->slots[0]); 2959 if (found_key.objectid != key.objectid || 2960 found_key.type != key.type) 2961 break; 2962 2963 di = btrfs_item_ptr(path->nodes[0], path->slots[0], 2964 struct btrfs_dir_item); 2965 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); 2966 2967 dm = get_waiting_dir_move(sctx, loc.objectid); 2968 if (dm) { 2969 odi = add_orphan_dir_info(sctx, dir); 2970 if (IS_ERR(odi)) { 2971 ret = PTR_ERR(odi); 2972 goto out; 2973 } 2974 odi->gen = dir_gen; 2975 odi->last_dir_index_offset = found_key.offset; 2976 dm->rmdir_ino = dir; 2977 ret = 0; 2978 goto out; 2979 } 2980 2981 if (loc.objectid > send_progress) { 2982 odi = add_orphan_dir_info(sctx, dir); 2983 if (IS_ERR(odi)) { 2984 ret = PTR_ERR(odi); 2985 goto out; 2986 } 2987 odi->gen = dir_gen; 2988 odi->last_dir_index_offset = found_key.offset; 2989 ret = 0; 2990 goto out; 2991 } 2992 2993 path->slots[0]++; 2994 } 2995 free_orphan_dir_info(sctx, odi); 2996 2997 ret = 1; 2998 2999 out: 3000 btrfs_free_path(path); 3001 return ret; 3002 } 3003 3004 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino) 3005 { 3006 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino); 3007 3008 return entry != NULL; 3009 } 3010 3011 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized) 3012 { 3013 struct rb_node **p = &sctx->waiting_dir_moves.rb_node; 3014 struct rb_node *parent = NULL; 3015 struct waiting_dir_move *entry, *dm; 3016 3017 dm = kmalloc(sizeof(*dm), GFP_KERNEL); 3018 if (!dm) 3019 return -ENOMEM; 3020 dm->ino = ino; 3021 dm->rmdir_ino = 0; 3022 dm->orphanized = orphanized; 3023 3024 while (*p) { 3025 parent = *p; 3026 entry = rb_entry(parent, struct waiting_dir_move, node); 3027 if (ino < entry->ino) { 3028 p = &(*p)->rb_left; 3029 } else if (ino > entry->ino) { 3030 p = &(*p)->rb_right; 3031 } else { 3032 kfree(dm); 3033 return -EEXIST; 3034 } 3035 } 3036 3037 rb_link_node(&dm->node, parent, p); 3038 rb_insert_color(&dm->node, &sctx->waiting_dir_moves); 3039 return 0; 3040 } 3041 3042 static struct waiting_dir_move * 3043 get_waiting_dir_move(struct send_ctx *sctx, u64 ino) 3044 { 3045 struct rb_node *n = sctx->waiting_dir_moves.rb_node; 3046 struct waiting_dir_move *entry; 3047 3048 while (n) { 3049 entry = rb_entry(n, struct waiting_dir_move, node); 3050 if (ino < entry->ino) 3051 n = n->rb_left; 3052 else if (ino > entry->ino) 3053 n = n->rb_right; 3054 else 3055 return entry; 3056 } 3057 return NULL; 3058 } 3059 3060 static void free_waiting_dir_move(struct send_ctx *sctx, 3061 struct waiting_dir_move *dm) 3062 { 3063 if (!dm) 3064 return; 3065 rb_erase(&dm->node, &sctx->waiting_dir_moves); 3066 kfree(dm); 3067 } 3068 3069 static int add_pending_dir_move(struct send_ctx *sctx, 3070 u64 ino, 3071 u64 ino_gen, 3072 u64 parent_ino, 3073 struct list_head *new_refs, 3074 struct list_head *deleted_refs, 3075 const bool is_orphan) 3076 { 3077 struct rb_node **p = &sctx->pending_dir_moves.rb_node; 3078 struct rb_node *parent = NULL; 3079 struct pending_dir_move *entry = NULL, *pm; 3080 struct recorded_ref *cur; 3081 int exists = 0; 3082 int ret; 3083 3084 pm = kmalloc(sizeof(*pm), GFP_KERNEL); 3085 if (!pm) 3086 return -ENOMEM; 3087 pm->parent_ino = parent_ino; 3088 pm->ino = ino; 3089 pm->gen = ino_gen; 3090 INIT_LIST_HEAD(&pm->list); 3091 INIT_LIST_HEAD(&pm->update_refs); 3092 RB_CLEAR_NODE(&pm->node); 3093 3094 while (*p) { 3095 parent = *p; 3096 entry = rb_entry(parent, struct pending_dir_move, node); 3097 if (parent_ino < entry->parent_ino) { 3098 p = &(*p)->rb_left; 3099 } else if (parent_ino > entry->parent_ino) { 3100 p = &(*p)->rb_right; 3101 } else { 3102 exists = 1; 3103 break; 3104 } 3105 } 3106 3107 list_for_each_entry(cur, deleted_refs, list) { 3108 ret = dup_ref(cur, &pm->update_refs); 3109 if (ret < 0) 3110 goto out; 3111 } 3112 list_for_each_entry(cur, new_refs, list) { 3113 ret = dup_ref(cur, &pm->update_refs); 3114 if (ret < 0) 3115 goto out; 3116 } 3117 3118 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan); 3119 if (ret) 3120 goto out; 3121 3122 if (exists) { 3123 list_add_tail(&pm->list, &entry->list); 3124 } else { 3125 rb_link_node(&pm->node, parent, p); 3126 rb_insert_color(&pm->node, &sctx->pending_dir_moves); 3127 } 3128 ret = 0; 3129 out: 3130 if (ret) { 3131 __free_recorded_refs(&pm->update_refs); 3132 kfree(pm); 3133 } 3134 return ret; 3135 } 3136 3137 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx, 3138 u64 parent_ino) 3139 { 3140 struct rb_node *n = sctx->pending_dir_moves.rb_node; 3141 struct pending_dir_move *entry; 3142 3143 while (n) { 3144 entry = rb_entry(n, struct pending_dir_move, node); 3145 if (parent_ino < entry->parent_ino) 3146 n = n->rb_left; 3147 else if (parent_ino > entry->parent_ino) 3148 n = n->rb_right; 3149 else 3150 return entry; 3151 } 3152 return NULL; 3153 } 3154 3155 static int path_loop(struct send_ctx *sctx, struct fs_path *name, 3156 u64 ino, u64 gen, u64 *ancestor_ino) 3157 { 3158 int ret = 0; 3159 u64 parent_inode = 0; 3160 u64 parent_gen = 0; 3161 u64 start_ino = ino; 3162 3163 *ancestor_ino = 0; 3164 while (ino != BTRFS_FIRST_FREE_OBJECTID) { 3165 fs_path_reset(name); 3166 3167 if (is_waiting_for_rm(sctx, ino)) 3168 break; 3169 if (is_waiting_for_move(sctx, ino)) { 3170 if (*ancestor_ino == 0) 3171 *ancestor_ino = ino; 3172 ret = get_first_ref(sctx->parent_root, ino, 3173 &parent_inode, &parent_gen, name); 3174 } else { 3175 ret = __get_cur_name_and_parent(sctx, ino, gen, 3176 &parent_inode, 3177 &parent_gen, name); 3178 if (ret > 0) { 3179 ret = 0; 3180 break; 3181 } 3182 } 3183 if (ret < 0) 3184 break; 3185 if (parent_inode == start_ino) { 3186 ret = 1; 3187 if (*ancestor_ino == 0) 3188 *ancestor_ino = ino; 3189 break; 3190 } 3191 ino = parent_inode; 3192 gen = parent_gen; 3193 } 3194 return ret; 3195 } 3196 3197 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) 3198 { 3199 struct fs_path *from_path = NULL; 3200 struct fs_path *to_path = NULL; 3201 struct fs_path *name = NULL; 3202 u64 orig_progress = sctx->send_progress; 3203 struct recorded_ref *cur; 3204 u64 parent_ino, parent_gen; 3205 struct waiting_dir_move *dm = NULL; 3206 u64 rmdir_ino = 0; 3207 u64 ancestor; 3208 bool is_orphan; 3209 int ret; 3210 3211 name = fs_path_alloc(); 3212 from_path = fs_path_alloc(); 3213 if (!name || !from_path) { 3214 ret = -ENOMEM; 3215 goto out; 3216 } 3217 3218 dm = get_waiting_dir_move(sctx, pm->ino); 3219 ASSERT(dm); 3220 rmdir_ino = dm->rmdir_ino; 3221 is_orphan = dm->orphanized; 3222 free_waiting_dir_move(sctx, dm); 3223 3224 if (is_orphan) { 3225 ret = gen_unique_name(sctx, pm->ino, 3226 pm->gen, from_path); 3227 } else { 3228 ret = get_first_ref(sctx->parent_root, pm->ino, 3229 &parent_ino, &parent_gen, name); 3230 if (ret < 0) 3231 goto out; 3232 ret = get_cur_path(sctx, parent_ino, parent_gen, 3233 from_path); 3234 if (ret < 0) 3235 goto out; 3236 ret = fs_path_add_path(from_path, name); 3237 } 3238 if (ret < 0) 3239 goto out; 3240 3241 sctx->send_progress = sctx->cur_ino + 1; 3242 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor); 3243 if (ret < 0) 3244 goto out; 3245 if (ret) { 3246 LIST_HEAD(deleted_refs); 3247 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); 3248 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, 3249 &pm->update_refs, &deleted_refs, 3250 is_orphan); 3251 if (ret < 0) 3252 goto out; 3253 if (rmdir_ino) { 3254 dm = get_waiting_dir_move(sctx, pm->ino); 3255 ASSERT(dm); 3256 dm->rmdir_ino = rmdir_ino; 3257 } 3258 goto out; 3259 } 3260 fs_path_reset(name); 3261 to_path = name; 3262 name = NULL; 3263 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); 3264 if (ret < 0) 3265 goto out; 3266 3267 ret = send_rename(sctx, from_path, to_path); 3268 if (ret < 0) 3269 goto out; 3270 3271 if (rmdir_ino) { 3272 struct orphan_dir_info *odi; 3273 u64 gen; 3274 3275 odi = get_orphan_dir_info(sctx, rmdir_ino); 3276 if (!odi) { 3277 /* already deleted */ 3278 goto finish; 3279 } 3280 gen = odi->gen; 3281 3282 ret = can_rmdir(sctx, rmdir_ino, gen, sctx->cur_ino); 3283 if (ret < 0) 3284 goto out; 3285 if (!ret) 3286 goto finish; 3287 3288 name = fs_path_alloc(); 3289 if (!name) { 3290 ret = -ENOMEM; 3291 goto out; 3292 } 3293 ret = get_cur_path(sctx, rmdir_ino, gen, name); 3294 if (ret < 0) 3295 goto out; 3296 ret = send_rmdir(sctx, name); 3297 if (ret < 0) 3298 goto out; 3299 } 3300 3301 finish: 3302 ret = send_utimes(sctx, pm->ino, pm->gen); 3303 if (ret < 0) 3304 goto out; 3305 3306 /* 3307 * After rename/move, need to update the utimes of both new parent(s) 3308 * and old parent(s). 3309 */ 3310 list_for_each_entry(cur, &pm->update_refs, list) { 3311 /* 3312 * The parent inode might have been deleted in the send snapshot 3313 */ 3314 ret = get_inode_info(sctx->send_root, cur->dir, NULL, 3315 NULL, NULL, NULL, NULL, NULL); 3316 if (ret == -ENOENT) { 3317 ret = 0; 3318 continue; 3319 } 3320 if (ret < 0) 3321 goto out; 3322 3323 ret = send_utimes(sctx, cur->dir, cur->dir_gen); 3324 if (ret < 0) 3325 goto out; 3326 } 3327 3328 out: 3329 fs_path_free(name); 3330 fs_path_free(from_path); 3331 fs_path_free(to_path); 3332 sctx->send_progress = orig_progress; 3333 3334 return ret; 3335 } 3336 3337 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m) 3338 { 3339 if (!list_empty(&m->list)) 3340 list_del(&m->list); 3341 if (!RB_EMPTY_NODE(&m->node)) 3342 rb_erase(&m->node, &sctx->pending_dir_moves); 3343 __free_recorded_refs(&m->update_refs); 3344 kfree(m); 3345 } 3346 3347 static void tail_append_pending_moves(struct pending_dir_move *moves, 3348 struct list_head *stack) 3349 { 3350 if (list_empty(&moves->list)) { 3351 list_add_tail(&moves->list, stack); 3352 } else { 3353 LIST_HEAD(list); 3354 list_splice_init(&moves->list, &list); 3355 list_add_tail(&moves->list, stack); 3356 list_splice_tail(&list, stack); 3357 } 3358 } 3359 3360 static int apply_children_dir_moves(struct send_ctx *sctx) 3361 { 3362 struct pending_dir_move *pm; 3363 struct list_head stack; 3364 u64 parent_ino = sctx->cur_ino; 3365 int ret = 0; 3366 3367 pm = get_pending_dir_moves(sctx, parent_ino); 3368 if (!pm) 3369 return 0; 3370 3371 INIT_LIST_HEAD(&stack); 3372 tail_append_pending_moves(pm, &stack); 3373 3374 while (!list_empty(&stack)) { 3375 pm = list_first_entry(&stack, struct pending_dir_move, list); 3376 parent_ino = pm->ino; 3377 ret = apply_dir_move(sctx, pm); 3378 free_pending_move(sctx, pm); 3379 if (ret) 3380 goto out; 3381 pm = get_pending_dir_moves(sctx, parent_ino); 3382 if (pm) 3383 tail_append_pending_moves(pm, &stack); 3384 } 3385 return 0; 3386 3387 out: 3388 while (!list_empty(&stack)) { 3389 pm = list_first_entry(&stack, struct pending_dir_move, list); 3390 free_pending_move(sctx, pm); 3391 } 3392 return ret; 3393 } 3394 3395 /* 3396 * We might need to delay a directory rename even when no ancestor directory 3397 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was 3398 * renamed. This happens when we rename a directory to the old name (the name 3399 * in the parent root) of some other unrelated directory that got its rename 3400 * delayed due to some ancestor with higher number that got renamed. 3401 * 3402 * Example: 3403 * 3404 * Parent snapshot: 3405 * . (ino 256) 3406 * |---- a/ (ino 257) 3407 * | |---- file (ino 260) 3408 * | 3409 * |---- b/ (ino 258) 3410 * |---- c/ (ino 259) 3411 * 3412 * Send snapshot: 3413 * . (ino 256) 3414 * |---- a/ (ino 258) 3415 * |---- x/ (ino 259) 3416 * |---- y/ (ino 257) 3417 * |----- file (ino 260) 3418 * 3419 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257 3420 * from 'a' to 'x/y' happening first, which in turn depends on the rename of 3421 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream 3422 * must issue is: 3423 * 3424 * 1 - rename 259 from 'c' to 'x' 3425 * 2 - rename 257 from 'a' to 'x/y' 3426 * 3 - rename 258 from 'b' to 'a' 3427 * 3428 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can 3429 * be done right away and < 0 on error. 3430 */ 3431 static int wait_for_dest_dir_move(struct send_ctx *sctx, 3432 struct recorded_ref *parent_ref, 3433 const bool is_orphan) 3434 { 3435 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info; 3436 struct btrfs_path *path; 3437 struct btrfs_key key; 3438 struct btrfs_key di_key; 3439 struct btrfs_dir_item *di; 3440 u64 left_gen; 3441 u64 right_gen; 3442 int ret = 0; 3443 struct waiting_dir_move *wdm; 3444 3445 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) 3446 return 0; 3447 3448 path = alloc_path_for_send(); 3449 if (!path) 3450 return -ENOMEM; 3451 3452 key.objectid = parent_ref->dir; 3453 key.type = BTRFS_DIR_ITEM_KEY; 3454 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len); 3455 3456 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0); 3457 if (ret < 0) { 3458 goto out; 3459 } else if (ret > 0) { 3460 ret = 0; 3461 goto out; 3462 } 3463 3464 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name, 3465 parent_ref->name_len); 3466 if (!di) { 3467 ret = 0; 3468 goto out; 3469 } 3470 /* 3471 * di_key.objectid has the number of the inode that has a dentry in the 3472 * parent directory with the same name that sctx->cur_ino is being 3473 * renamed to. We need to check if that inode is in the send root as 3474 * well and if it is currently marked as an inode with a pending rename, 3475 * if it is, we need to delay the rename of sctx->cur_ino as well, so 3476 * that it happens after that other inode is renamed. 3477 */ 3478 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key); 3479 if (di_key.type != BTRFS_INODE_ITEM_KEY) { 3480 ret = 0; 3481 goto out; 3482 } 3483 3484 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL, 3485 &left_gen, NULL, NULL, NULL, NULL); 3486 if (ret < 0) 3487 goto out; 3488 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL, 3489 &right_gen, NULL, NULL, NULL, NULL); 3490 if (ret < 0) { 3491 if (ret == -ENOENT) 3492 ret = 0; 3493 goto out; 3494 } 3495 3496 /* Different inode, no need to delay the rename of sctx->cur_ino */ 3497 if (right_gen != left_gen) { 3498 ret = 0; 3499 goto out; 3500 } 3501 3502 wdm = get_waiting_dir_move(sctx, di_key.objectid); 3503 if (wdm && !wdm->orphanized) { 3504 ret = add_pending_dir_move(sctx, 3505 sctx->cur_ino, 3506 sctx->cur_inode_gen, 3507 di_key.objectid, 3508 &sctx->new_refs, 3509 &sctx->deleted_refs, 3510 is_orphan); 3511 if (!ret) 3512 ret = 1; 3513 } 3514 out: 3515 btrfs_free_path(path); 3516 return ret; 3517 } 3518 3519 /* 3520 * Check if inode ino2, or any of its ancestors, is inode ino1. 3521 * Return 1 if true, 0 if false and < 0 on error. 3522 */ 3523 static int check_ino_in_path(struct btrfs_root *root, 3524 const u64 ino1, 3525 const u64 ino1_gen, 3526 const u64 ino2, 3527 const u64 ino2_gen, 3528 struct fs_path *fs_path) 3529 { 3530 u64 ino = ino2; 3531 3532 if (ino1 == ino2) 3533 return ino1_gen == ino2_gen; 3534 3535 while (ino > BTRFS_FIRST_FREE_OBJECTID) { 3536 u64 parent; 3537 u64 parent_gen; 3538 int ret; 3539 3540 fs_path_reset(fs_path); 3541 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path); 3542 if (ret < 0) 3543 return ret; 3544 if (parent == ino1) 3545 return parent_gen == ino1_gen; 3546 ino = parent; 3547 } 3548 return 0; 3549 } 3550 3551 /* 3552 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any 3553 * possible path (in case ino2 is not a directory and has multiple hard links). 3554 * Return 1 if true, 0 if false and < 0 on error. 3555 */ 3556 static int is_ancestor(struct btrfs_root *root, 3557 const u64 ino1, 3558 const u64 ino1_gen, 3559 const u64 ino2, 3560 struct fs_path *fs_path) 3561 { 3562 bool free_fs_path = false; 3563 int ret = 0; 3564 struct btrfs_path *path = NULL; 3565 struct btrfs_key key; 3566 3567 if (!fs_path) { 3568 fs_path = fs_path_alloc(); 3569 if (!fs_path) 3570 return -ENOMEM; 3571 free_fs_path = true; 3572 } 3573 3574 path = alloc_path_for_send(); 3575 if (!path) { 3576 ret = -ENOMEM; 3577 goto out; 3578 } 3579 3580 key.objectid = ino2; 3581 key.type = BTRFS_INODE_REF_KEY; 3582 key.offset = 0; 3583 3584 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3585 if (ret < 0) 3586 goto out; 3587 3588 while (true) { 3589 struct extent_buffer *leaf = path->nodes[0]; 3590 int slot = path->slots[0]; 3591 u32 cur_offset = 0; 3592 u32 item_size; 3593 3594 if (slot >= btrfs_header_nritems(leaf)) { 3595 ret = btrfs_next_leaf(root, path); 3596 if (ret < 0) 3597 goto out; 3598 if (ret > 0) 3599 break; 3600 continue; 3601 } 3602 3603 btrfs_item_key_to_cpu(leaf, &key, slot); 3604 if (key.objectid != ino2) 3605 break; 3606 if (key.type != BTRFS_INODE_REF_KEY && 3607 key.type != BTRFS_INODE_EXTREF_KEY) 3608 break; 3609 3610 item_size = btrfs_item_size_nr(leaf, slot); 3611 while (cur_offset < item_size) { 3612 u64 parent; 3613 u64 parent_gen; 3614 3615 if (key.type == BTRFS_INODE_EXTREF_KEY) { 3616 unsigned long ptr; 3617 struct btrfs_inode_extref *extref; 3618 3619 ptr = btrfs_item_ptr_offset(leaf, slot); 3620 extref = (struct btrfs_inode_extref *) 3621 (ptr + cur_offset); 3622 parent = btrfs_inode_extref_parent(leaf, 3623 extref); 3624 cur_offset += sizeof(*extref); 3625 cur_offset += btrfs_inode_extref_name_len(leaf, 3626 extref); 3627 } else { 3628 parent = key.offset; 3629 cur_offset = item_size; 3630 } 3631 3632 ret = get_inode_info(root, parent, NULL, &parent_gen, 3633 NULL, NULL, NULL, NULL); 3634 if (ret < 0) 3635 goto out; 3636 ret = check_ino_in_path(root, ino1, ino1_gen, 3637 parent, parent_gen, fs_path); 3638 if (ret) 3639 goto out; 3640 } 3641 path->slots[0]++; 3642 } 3643 ret = 0; 3644 out: 3645 btrfs_free_path(path); 3646 if (free_fs_path) 3647 fs_path_free(fs_path); 3648 return ret; 3649 } 3650 3651 static int wait_for_parent_move(struct send_ctx *sctx, 3652 struct recorded_ref *parent_ref, 3653 const bool is_orphan) 3654 { 3655 int ret = 0; 3656 u64 ino = parent_ref->dir; 3657 u64 ino_gen = parent_ref->dir_gen; 3658 u64 parent_ino_before, parent_ino_after; 3659 struct fs_path *path_before = NULL; 3660 struct fs_path *path_after = NULL; 3661 int len1, len2; 3662 3663 path_after = fs_path_alloc(); 3664 path_before = fs_path_alloc(); 3665 if (!path_after || !path_before) { 3666 ret = -ENOMEM; 3667 goto out; 3668 } 3669 3670 /* 3671 * Our current directory inode may not yet be renamed/moved because some 3672 * ancestor (immediate or not) has to be renamed/moved first. So find if 3673 * such ancestor exists and make sure our own rename/move happens after 3674 * that ancestor is processed to avoid path build infinite loops (done 3675 * at get_cur_path()). 3676 */ 3677 while (ino > BTRFS_FIRST_FREE_OBJECTID) { 3678 u64 parent_ino_after_gen; 3679 3680 if (is_waiting_for_move(sctx, ino)) { 3681 /* 3682 * If the current inode is an ancestor of ino in the 3683 * parent root, we need to delay the rename of the 3684 * current inode, otherwise don't delayed the rename 3685 * because we can end up with a circular dependency 3686 * of renames, resulting in some directories never 3687 * getting the respective rename operations issued in 3688 * the send stream or getting into infinite path build 3689 * loops. 3690 */ 3691 ret = is_ancestor(sctx->parent_root, 3692 sctx->cur_ino, sctx->cur_inode_gen, 3693 ino, path_before); 3694 if (ret) 3695 break; 3696 } 3697 3698 fs_path_reset(path_before); 3699 fs_path_reset(path_after); 3700 3701 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, 3702 &parent_ino_after_gen, path_after); 3703 if (ret < 0) 3704 goto out; 3705 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, 3706 NULL, path_before); 3707 if (ret < 0 && ret != -ENOENT) { 3708 goto out; 3709 } else if (ret == -ENOENT) { 3710 ret = 0; 3711 break; 3712 } 3713 3714 len1 = fs_path_len(path_before); 3715 len2 = fs_path_len(path_after); 3716 if (ino > sctx->cur_ino && 3717 (parent_ino_before != parent_ino_after || len1 != len2 || 3718 memcmp(path_before->start, path_after->start, len1))) { 3719 u64 parent_ino_gen; 3720 3721 ret = get_inode_info(sctx->parent_root, ino, NULL, 3722 &parent_ino_gen, NULL, NULL, NULL, 3723 NULL); 3724 if (ret < 0) 3725 goto out; 3726 if (ino_gen == parent_ino_gen) { 3727 ret = 1; 3728 break; 3729 } 3730 } 3731 ino = parent_ino_after; 3732 ino_gen = parent_ino_after_gen; 3733 } 3734 3735 out: 3736 fs_path_free(path_before); 3737 fs_path_free(path_after); 3738 3739 if (ret == 1) { 3740 ret = add_pending_dir_move(sctx, 3741 sctx->cur_ino, 3742 sctx->cur_inode_gen, 3743 ino, 3744 &sctx->new_refs, 3745 &sctx->deleted_refs, 3746 is_orphan); 3747 if (!ret) 3748 ret = 1; 3749 } 3750 3751 return ret; 3752 } 3753 3754 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) 3755 { 3756 int ret; 3757 struct fs_path *new_path; 3758 3759 /* 3760 * Our reference's name member points to its full_path member string, so 3761 * we use here a new path. 3762 */ 3763 new_path = fs_path_alloc(); 3764 if (!new_path) 3765 return -ENOMEM; 3766 3767 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path); 3768 if (ret < 0) { 3769 fs_path_free(new_path); 3770 return ret; 3771 } 3772 ret = fs_path_add(new_path, ref->name, ref->name_len); 3773 if (ret < 0) { 3774 fs_path_free(new_path); 3775 return ret; 3776 } 3777 3778 fs_path_free(ref->full_path); 3779 set_ref_path(ref, new_path); 3780 3781 return 0; 3782 } 3783 3784 /* 3785 * This does all the move/link/unlink/rmdir magic. 3786 */ 3787 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) 3788 { 3789 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 3790 int ret = 0; 3791 struct recorded_ref *cur; 3792 struct recorded_ref *cur2; 3793 struct list_head check_dirs; 3794 struct fs_path *valid_path = NULL; 3795 u64 ow_inode = 0; 3796 u64 ow_gen; 3797 u64 ow_mode; 3798 int did_overwrite = 0; 3799 int is_orphan = 0; 3800 u64 last_dir_ino_rm = 0; 3801 bool can_rename = true; 3802 bool orphanized_dir = false; 3803 bool orphanized_ancestor = false; 3804 3805 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino); 3806 3807 /* 3808 * This should never happen as the root dir always has the same ref 3809 * which is always '..' 3810 */ 3811 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID); 3812 INIT_LIST_HEAD(&check_dirs); 3813 3814 valid_path = fs_path_alloc(); 3815 if (!valid_path) { 3816 ret = -ENOMEM; 3817 goto out; 3818 } 3819 3820 /* 3821 * First, check if the first ref of the current inode was overwritten 3822 * before. If yes, we know that the current inode was already orphanized 3823 * and thus use the orphan name. If not, we can use get_cur_path to 3824 * get the path of the first ref as it would like while receiving at 3825 * this point in time. 3826 * New inodes are always orphan at the beginning, so force to use the 3827 * orphan name in this case. 3828 * The first ref is stored in valid_path and will be updated if it 3829 * gets moved around. 3830 */ 3831 if (!sctx->cur_inode_new) { 3832 ret = did_overwrite_first_ref(sctx, sctx->cur_ino, 3833 sctx->cur_inode_gen); 3834 if (ret < 0) 3835 goto out; 3836 if (ret) 3837 did_overwrite = 1; 3838 } 3839 if (sctx->cur_inode_new || did_overwrite) { 3840 ret = gen_unique_name(sctx, sctx->cur_ino, 3841 sctx->cur_inode_gen, valid_path); 3842 if (ret < 0) 3843 goto out; 3844 is_orphan = 1; 3845 } else { 3846 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, 3847 valid_path); 3848 if (ret < 0) 3849 goto out; 3850 } 3851 3852 list_for_each_entry(cur, &sctx->new_refs, list) { 3853 /* 3854 * We may have refs where the parent directory does not exist 3855 * yet. This happens if the parent directories inum is higher 3856 * the the current inum. To handle this case, we create the 3857 * parent directory out of order. But we need to check if this 3858 * did already happen before due to other refs in the same dir. 3859 */ 3860 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); 3861 if (ret < 0) 3862 goto out; 3863 if (ret == inode_state_will_create) { 3864 ret = 0; 3865 /* 3866 * First check if any of the current inodes refs did 3867 * already create the dir. 3868 */ 3869 list_for_each_entry(cur2, &sctx->new_refs, list) { 3870 if (cur == cur2) 3871 break; 3872 if (cur2->dir == cur->dir) { 3873 ret = 1; 3874 break; 3875 } 3876 } 3877 3878 /* 3879 * If that did not happen, check if a previous inode 3880 * did already create the dir. 3881 */ 3882 if (!ret) 3883 ret = did_create_dir(sctx, cur->dir); 3884 if (ret < 0) 3885 goto out; 3886 if (!ret) { 3887 ret = send_create_inode(sctx, cur->dir); 3888 if (ret < 0) 3889 goto out; 3890 } 3891 } 3892 3893 /* 3894 * Check if this new ref would overwrite the first ref of 3895 * another unprocessed inode. If yes, orphanize the 3896 * overwritten inode. If we find an overwritten ref that is 3897 * not the first ref, simply unlink it. 3898 */ 3899 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, 3900 cur->name, cur->name_len, 3901 &ow_inode, &ow_gen, &ow_mode); 3902 if (ret < 0) 3903 goto out; 3904 if (ret) { 3905 ret = is_first_ref(sctx->parent_root, 3906 ow_inode, cur->dir, cur->name, 3907 cur->name_len); 3908 if (ret < 0) 3909 goto out; 3910 if (ret) { 3911 struct name_cache_entry *nce; 3912 struct waiting_dir_move *wdm; 3913 3914 ret = orphanize_inode(sctx, ow_inode, ow_gen, 3915 cur->full_path); 3916 if (ret < 0) 3917 goto out; 3918 if (S_ISDIR(ow_mode)) 3919 orphanized_dir = true; 3920 3921 /* 3922 * If ow_inode has its rename operation delayed 3923 * make sure that its orphanized name is used in 3924 * the source path when performing its rename 3925 * operation. 3926 */ 3927 if (is_waiting_for_move(sctx, ow_inode)) { 3928 wdm = get_waiting_dir_move(sctx, 3929 ow_inode); 3930 ASSERT(wdm); 3931 wdm->orphanized = true; 3932 } 3933 3934 /* 3935 * Make sure we clear our orphanized inode's 3936 * name from the name cache. This is because the 3937 * inode ow_inode might be an ancestor of some 3938 * other inode that will be orphanized as well 3939 * later and has an inode number greater than 3940 * sctx->send_progress. We need to prevent 3941 * future name lookups from using the old name 3942 * and get instead the orphan name. 3943 */ 3944 nce = name_cache_search(sctx, ow_inode, ow_gen); 3945 if (nce) { 3946 name_cache_delete(sctx, nce); 3947 kfree(nce); 3948 } 3949 3950 /* 3951 * ow_inode might currently be an ancestor of 3952 * cur_ino, therefore compute valid_path (the 3953 * current path of cur_ino) again because it 3954 * might contain the pre-orphanization name of 3955 * ow_inode, which is no longer valid. 3956 */ 3957 ret = is_ancestor(sctx->parent_root, 3958 ow_inode, ow_gen, 3959 sctx->cur_ino, NULL); 3960 if (ret > 0) { 3961 orphanized_ancestor = true; 3962 fs_path_reset(valid_path); 3963 ret = get_cur_path(sctx, sctx->cur_ino, 3964 sctx->cur_inode_gen, 3965 valid_path); 3966 } 3967 if (ret < 0) 3968 goto out; 3969 } else { 3970 ret = send_unlink(sctx, cur->full_path); 3971 if (ret < 0) 3972 goto out; 3973 } 3974 } 3975 3976 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) { 3977 ret = wait_for_dest_dir_move(sctx, cur, is_orphan); 3978 if (ret < 0) 3979 goto out; 3980 if (ret == 1) { 3981 can_rename = false; 3982 *pending_move = 1; 3983 } 3984 } 3985 3986 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root && 3987 can_rename) { 3988 ret = wait_for_parent_move(sctx, cur, is_orphan); 3989 if (ret < 0) 3990 goto out; 3991 if (ret == 1) { 3992 can_rename = false; 3993 *pending_move = 1; 3994 } 3995 } 3996 3997 /* 3998 * link/move the ref to the new place. If we have an orphan 3999 * inode, move it and update valid_path. If not, link or move 4000 * it depending on the inode mode. 4001 */ 4002 if (is_orphan && can_rename) { 4003 ret = send_rename(sctx, valid_path, cur->full_path); 4004 if (ret < 0) 4005 goto out; 4006 is_orphan = 0; 4007 ret = fs_path_copy(valid_path, cur->full_path); 4008 if (ret < 0) 4009 goto out; 4010 } else if (can_rename) { 4011 if (S_ISDIR(sctx->cur_inode_mode)) { 4012 /* 4013 * Dirs can't be linked, so move it. For moved 4014 * dirs, we always have one new and one deleted 4015 * ref. The deleted ref is ignored later. 4016 */ 4017 ret = send_rename(sctx, valid_path, 4018 cur->full_path); 4019 if (!ret) 4020 ret = fs_path_copy(valid_path, 4021 cur->full_path); 4022 if (ret < 0) 4023 goto out; 4024 } else { 4025 /* 4026 * We might have previously orphanized an inode 4027 * which is an ancestor of our current inode, 4028 * so our reference's full path, which was 4029 * computed before any such orphanizations, must 4030 * be updated. 4031 */ 4032 if (orphanized_dir) { 4033 ret = update_ref_path(sctx, cur); 4034 if (ret < 0) 4035 goto out; 4036 } 4037 ret = send_link(sctx, cur->full_path, 4038 valid_path); 4039 if (ret < 0) 4040 goto out; 4041 } 4042 } 4043 ret = dup_ref(cur, &check_dirs); 4044 if (ret < 0) 4045 goto out; 4046 } 4047 4048 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) { 4049 /* 4050 * Check if we can already rmdir the directory. If not, 4051 * orphanize it. For every dir item inside that gets deleted 4052 * later, we do this check again and rmdir it then if possible. 4053 * See the use of check_dirs for more details. 4054 */ 4055 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen, 4056 sctx->cur_ino); 4057 if (ret < 0) 4058 goto out; 4059 if (ret) { 4060 ret = send_rmdir(sctx, valid_path); 4061 if (ret < 0) 4062 goto out; 4063 } else if (!is_orphan) { 4064 ret = orphanize_inode(sctx, sctx->cur_ino, 4065 sctx->cur_inode_gen, valid_path); 4066 if (ret < 0) 4067 goto out; 4068 is_orphan = 1; 4069 } 4070 4071 list_for_each_entry(cur, &sctx->deleted_refs, list) { 4072 ret = dup_ref(cur, &check_dirs); 4073 if (ret < 0) 4074 goto out; 4075 } 4076 } else if (S_ISDIR(sctx->cur_inode_mode) && 4077 !list_empty(&sctx->deleted_refs)) { 4078 /* 4079 * We have a moved dir. Add the old parent to check_dirs 4080 */ 4081 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, 4082 list); 4083 ret = dup_ref(cur, &check_dirs); 4084 if (ret < 0) 4085 goto out; 4086 } else if (!S_ISDIR(sctx->cur_inode_mode)) { 4087 /* 4088 * We have a non dir inode. Go through all deleted refs and 4089 * unlink them if they were not already overwritten by other 4090 * inodes. 4091 */ 4092 list_for_each_entry(cur, &sctx->deleted_refs, list) { 4093 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen, 4094 sctx->cur_ino, sctx->cur_inode_gen, 4095 cur->name, cur->name_len); 4096 if (ret < 0) 4097 goto out; 4098 if (!ret) { 4099 /* 4100 * If we orphanized any ancestor before, we need 4101 * to recompute the full path for deleted names, 4102 * since any such path was computed before we 4103 * processed any references and orphanized any 4104 * ancestor inode. 4105 */ 4106 if (orphanized_ancestor) { 4107 ret = update_ref_path(sctx, cur); 4108 if (ret < 0) 4109 goto out; 4110 } 4111 ret = send_unlink(sctx, cur->full_path); 4112 if (ret < 0) 4113 goto out; 4114 } 4115 ret = dup_ref(cur, &check_dirs); 4116 if (ret < 0) 4117 goto out; 4118 } 4119 /* 4120 * If the inode is still orphan, unlink the orphan. This may 4121 * happen when a previous inode did overwrite the first ref 4122 * of this inode and no new refs were added for the current 4123 * inode. Unlinking does not mean that the inode is deleted in 4124 * all cases. There may still be links to this inode in other 4125 * places. 4126 */ 4127 if (is_orphan) { 4128 ret = send_unlink(sctx, valid_path); 4129 if (ret < 0) 4130 goto out; 4131 } 4132 } 4133 4134 /* 4135 * We did collect all parent dirs where cur_inode was once located. We 4136 * now go through all these dirs and check if they are pending for 4137 * deletion and if it's finally possible to perform the rmdir now. 4138 * We also update the inode stats of the parent dirs here. 4139 */ 4140 list_for_each_entry(cur, &check_dirs, list) { 4141 /* 4142 * In case we had refs into dirs that were not processed yet, 4143 * we don't need to do the utime and rmdir logic for these dirs. 4144 * The dir will be processed later. 4145 */ 4146 if (cur->dir > sctx->cur_ino) 4147 continue; 4148 4149 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); 4150 if (ret < 0) 4151 goto out; 4152 4153 if (ret == inode_state_did_create || 4154 ret == inode_state_no_change) { 4155 /* TODO delayed utimes */ 4156 ret = send_utimes(sctx, cur->dir, cur->dir_gen); 4157 if (ret < 0) 4158 goto out; 4159 } else if (ret == inode_state_did_delete && 4160 cur->dir != last_dir_ino_rm) { 4161 ret = can_rmdir(sctx, cur->dir, cur->dir_gen, 4162 sctx->cur_ino); 4163 if (ret < 0) 4164 goto out; 4165 if (ret) { 4166 ret = get_cur_path(sctx, cur->dir, 4167 cur->dir_gen, valid_path); 4168 if (ret < 0) 4169 goto out; 4170 ret = send_rmdir(sctx, valid_path); 4171 if (ret < 0) 4172 goto out; 4173 last_dir_ino_rm = cur->dir; 4174 } 4175 } 4176 } 4177 4178 ret = 0; 4179 4180 out: 4181 __free_recorded_refs(&check_dirs); 4182 free_recorded_refs(sctx); 4183 fs_path_free(valid_path); 4184 return ret; 4185 } 4186 4187 static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name, 4188 void *ctx, struct list_head *refs) 4189 { 4190 int ret = 0; 4191 struct send_ctx *sctx = ctx; 4192 struct fs_path *p; 4193 u64 gen; 4194 4195 p = fs_path_alloc(); 4196 if (!p) 4197 return -ENOMEM; 4198 4199 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL, 4200 NULL, NULL); 4201 if (ret < 0) 4202 goto out; 4203 4204 ret = get_cur_path(sctx, dir, gen, p); 4205 if (ret < 0) 4206 goto out; 4207 ret = fs_path_add_path(p, name); 4208 if (ret < 0) 4209 goto out; 4210 4211 ret = __record_ref(refs, dir, gen, p); 4212 4213 out: 4214 if (ret) 4215 fs_path_free(p); 4216 return ret; 4217 } 4218 4219 static int __record_new_ref(int num, u64 dir, int index, 4220 struct fs_path *name, 4221 void *ctx) 4222 { 4223 struct send_ctx *sctx = ctx; 4224 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs); 4225 } 4226 4227 4228 static int __record_deleted_ref(int num, u64 dir, int index, 4229 struct fs_path *name, 4230 void *ctx) 4231 { 4232 struct send_ctx *sctx = ctx; 4233 return record_ref(sctx->parent_root, dir, name, ctx, 4234 &sctx->deleted_refs); 4235 } 4236 4237 static int record_new_ref(struct send_ctx *sctx) 4238 { 4239 int ret; 4240 4241 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, 4242 sctx->cmp_key, 0, __record_new_ref, sctx); 4243 if (ret < 0) 4244 goto out; 4245 ret = 0; 4246 4247 out: 4248 return ret; 4249 } 4250 4251 static int record_deleted_ref(struct send_ctx *sctx) 4252 { 4253 int ret; 4254 4255 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, 4256 sctx->cmp_key, 0, __record_deleted_ref, sctx); 4257 if (ret < 0) 4258 goto out; 4259 ret = 0; 4260 4261 out: 4262 return ret; 4263 } 4264 4265 struct find_ref_ctx { 4266 u64 dir; 4267 u64 dir_gen; 4268 struct btrfs_root *root; 4269 struct fs_path *name; 4270 int found_idx; 4271 }; 4272 4273 static int __find_iref(int num, u64 dir, int index, 4274 struct fs_path *name, 4275 void *ctx_) 4276 { 4277 struct find_ref_ctx *ctx = ctx_; 4278 u64 dir_gen; 4279 int ret; 4280 4281 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) && 4282 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) { 4283 /* 4284 * To avoid doing extra lookups we'll only do this if everything 4285 * else matches. 4286 */ 4287 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL, 4288 NULL, NULL, NULL); 4289 if (ret) 4290 return ret; 4291 if (dir_gen != ctx->dir_gen) 4292 return 0; 4293 ctx->found_idx = num; 4294 return 1; 4295 } 4296 return 0; 4297 } 4298 4299 static int find_iref(struct btrfs_root *root, 4300 struct btrfs_path *path, 4301 struct btrfs_key *key, 4302 u64 dir, u64 dir_gen, struct fs_path *name) 4303 { 4304 int ret; 4305 struct find_ref_ctx ctx; 4306 4307 ctx.dir = dir; 4308 ctx.name = name; 4309 ctx.dir_gen = dir_gen; 4310 ctx.found_idx = -1; 4311 ctx.root = root; 4312 4313 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx); 4314 if (ret < 0) 4315 return ret; 4316 4317 if (ctx.found_idx == -1) 4318 return -ENOENT; 4319 4320 return ctx.found_idx; 4321 } 4322 4323 static int __record_changed_new_ref(int num, u64 dir, int index, 4324 struct fs_path *name, 4325 void *ctx) 4326 { 4327 u64 dir_gen; 4328 int ret; 4329 struct send_ctx *sctx = ctx; 4330 4331 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL, 4332 NULL, NULL, NULL); 4333 if (ret) 4334 return ret; 4335 4336 ret = find_iref(sctx->parent_root, sctx->right_path, 4337 sctx->cmp_key, dir, dir_gen, name); 4338 if (ret == -ENOENT) 4339 ret = __record_new_ref(num, dir, index, name, sctx); 4340 else if (ret > 0) 4341 ret = 0; 4342 4343 return ret; 4344 } 4345 4346 static int __record_changed_deleted_ref(int num, u64 dir, int index, 4347 struct fs_path *name, 4348 void *ctx) 4349 { 4350 u64 dir_gen; 4351 int ret; 4352 struct send_ctx *sctx = ctx; 4353 4354 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL, 4355 NULL, NULL, NULL); 4356 if (ret) 4357 return ret; 4358 4359 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key, 4360 dir, dir_gen, name); 4361 if (ret == -ENOENT) 4362 ret = __record_deleted_ref(num, dir, index, name, sctx); 4363 else if (ret > 0) 4364 ret = 0; 4365 4366 return ret; 4367 } 4368 4369 static int record_changed_ref(struct send_ctx *sctx) 4370 { 4371 int ret = 0; 4372 4373 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, 4374 sctx->cmp_key, 0, __record_changed_new_ref, sctx); 4375 if (ret < 0) 4376 goto out; 4377 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, 4378 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx); 4379 if (ret < 0) 4380 goto out; 4381 ret = 0; 4382 4383 out: 4384 return ret; 4385 } 4386 4387 /* 4388 * Record and process all refs at once. Needed when an inode changes the 4389 * generation number, which means that it was deleted and recreated. 4390 */ 4391 static int process_all_refs(struct send_ctx *sctx, 4392 enum btrfs_compare_tree_result cmd) 4393 { 4394 int ret; 4395 struct btrfs_root *root; 4396 struct btrfs_path *path; 4397 struct btrfs_key key; 4398 struct btrfs_key found_key; 4399 struct extent_buffer *eb; 4400 int slot; 4401 iterate_inode_ref_t cb; 4402 int pending_move = 0; 4403 4404 path = alloc_path_for_send(); 4405 if (!path) 4406 return -ENOMEM; 4407 4408 if (cmd == BTRFS_COMPARE_TREE_NEW) { 4409 root = sctx->send_root; 4410 cb = __record_new_ref; 4411 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) { 4412 root = sctx->parent_root; 4413 cb = __record_deleted_ref; 4414 } else { 4415 btrfs_err(sctx->send_root->fs_info, 4416 "Wrong command %d in process_all_refs", cmd); 4417 ret = -EINVAL; 4418 goto out; 4419 } 4420 4421 key.objectid = sctx->cmp_key->objectid; 4422 key.type = BTRFS_INODE_REF_KEY; 4423 key.offset = 0; 4424 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4425 if (ret < 0) 4426 goto out; 4427 4428 while (1) { 4429 eb = path->nodes[0]; 4430 slot = path->slots[0]; 4431 if (slot >= btrfs_header_nritems(eb)) { 4432 ret = btrfs_next_leaf(root, path); 4433 if (ret < 0) 4434 goto out; 4435 else if (ret > 0) 4436 break; 4437 continue; 4438 } 4439 4440 btrfs_item_key_to_cpu(eb, &found_key, slot); 4441 4442 if (found_key.objectid != key.objectid || 4443 (found_key.type != BTRFS_INODE_REF_KEY && 4444 found_key.type != BTRFS_INODE_EXTREF_KEY)) 4445 break; 4446 4447 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); 4448 if (ret < 0) 4449 goto out; 4450 4451 path->slots[0]++; 4452 } 4453 btrfs_release_path(path); 4454 4455 /* 4456 * We don't actually care about pending_move as we are simply 4457 * re-creating this inode and will be rename'ing it into place once we 4458 * rename the parent directory. 4459 */ 4460 ret = process_recorded_refs(sctx, &pending_move); 4461 out: 4462 btrfs_free_path(path); 4463 return ret; 4464 } 4465 4466 static int send_set_xattr(struct send_ctx *sctx, 4467 struct fs_path *path, 4468 const char *name, int name_len, 4469 const char *data, int data_len) 4470 { 4471 int ret = 0; 4472 4473 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); 4474 if (ret < 0) 4475 goto out; 4476 4477 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 4478 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); 4479 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len); 4480 4481 ret = send_cmd(sctx); 4482 4483 tlv_put_failure: 4484 out: 4485 return ret; 4486 } 4487 4488 static int send_remove_xattr(struct send_ctx *sctx, 4489 struct fs_path *path, 4490 const char *name, int name_len) 4491 { 4492 int ret = 0; 4493 4494 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR); 4495 if (ret < 0) 4496 goto out; 4497 4498 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 4499 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); 4500 4501 ret = send_cmd(sctx); 4502 4503 tlv_put_failure: 4504 out: 4505 return ret; 4506 } 4507 4508 static int __process_new_xattr(int num, struct btrfs_key *di_key, 4509 const char *name, int name_len, 4510 const char *data, int data_len, 4511 u8 type, void *ctx) 4512 { 4513 int ret; 4514 struct send_ctx *sctx = ctx; 4515 struct fs_path *p; 4516 struct posix_acl_xattr_header dummy_acl; 4517 4518 p = fs_path_alloc(); 4519 if (!p) 4520 return -ENOMEM; 4521 4522 /* 4523 * This hack is needed because empty acls are stored as zero byte 4524 * data in xattrs. Problem with that is, that receiving these zero byte 4525 * acls will fail later. To fix this, we send a dummy acl list that 4526 * only contains the version number and no entries. 4527 */ 4528 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || 4529 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) { 4530 if (data_len == 0) { 4531 dummy_acl.a_version = 4532 cpu_to_le32(POSIX_ACL_XATTR_VERSION); 4533 data = (char *)&dummy_acl; 4534 data_len = sizeof(dummy_acl); 4535 } 4536 } 4537 4538 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4539 if (ret < 0) 4540 goto out; 4541 4542 ret = send_set_xattr(sctx, p, name, name_len, data, data_len); 4543 4544 out: 4545 fs_path_free(p); 4546 return ret; 4547 } 4548 4549 static int __process_deleted_xattr(int num, struct btrfs_key *di_key, 4550 const char *name, int name_len, 4551 const char *data, int data_len, 4552 u8 type, void *ctx) 4553 { 4554 int ret; 4555 struct send_ctx *sctx = ctx; 4556 struct fs_path *p; 4557 4558 p = fs_path_alloc(); 4559 if (!p) 4560 return -ENOMEM; 4561 4562 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4563 if (ret < 0) 4564 goto out; 4565 4566 ret = send_remove_xattr(sctx, p, name, name_len); 4567 4568 out: 4569 fs_path_free(p); 4570 return ret; 4571 } 4572 4573 static int process_new_xattr(struct send_ctx *sctx) 4574 { 4575 int ret = 0; 4576 4577 ret = iterate_dir_item(sctx->send_root, sctx->left_path, 4578 __process_new_xattr, sctx); 4579 4580 return ret; 4581 } 4582 4583 static int process_deleted_xattr(struct send_ctx *sctx) 4584 { 4585 return iterate_dir_item(sctx->parent_root, sctx->right_path, 4586 __process_deleted_xattr, sctx); 4587 } 4588 4589 struct find_xattr_ctx { 4590 const char *name; 4591 int name_len; 4592 int found_idx; 4593 char *found_data; 4594 int found_data_len; 4595 }; 4596 4597 static int __find_xattr(int num, struct btrfs_key *di_key, 4598 const char *name, int name_len, 4599 const char *data, int data_len, 4600 u8 type, void *vctx) 4601 { 4602 struct find_xattr_ctx *ctx = vctx; 4603 4604 if (name_len == ctx->name_len && 4605 strncmp(name, ctx->name, name_len) == 0) { 4606 ctx->found_idx = num; 4607 ctx->found_data_len = data_len; 4608 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL); 4609 if (!ctx->found_data) 4610 return -ENOMEM; 4611 return 1; 4612 } 4613 return 0; 4614 } 4615 4616 static int find_xattr(struct btrfs_root *root, 4617 struct btrfs_path *path, 4618 struct btrfs_key *key, 4619 const char *name, int name_len, 4620 char **data, int *data_len) 4621 { 4622 int ret; 4623 struct find_xattr_ctx ctx; 4624 4625 ctx.name = name; 4626 ctx.name_len = name_len; 4627 ctx.found_idx = -1; 4628 ctx.found_data = NULL; 4629 ctx.found_data_len = 0; 4630 4631 ret = iterate_dir_item(root, path, __find_xattr, &ctx); 4632 if (ret < 0) 4633 return ret; 4634 4635 if (ctx.found_idx == -1) 4636 return -ENOENT; 4637 if (data) { 4638 *data = ctx.found_data; 4639 *data_len = ctx.found_data_len; 4640 } else { 4641 kfree(ctx.found_data); 4642 } 4643 return ctx.found_idx; 4644 } 4645 4646 4647 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key, 4648 const char *name, int name_len, 4649 const char *data, int data_len, 4650 u8 type, void *ctx) 4651 { 4652 int ret; 4653 struct send_ctx *sctx = ctx; 4654 char *found_data = NULL; 4655 int found_data_len = 0; 4656 4657 ret = find_xattr(sctx->parent_root, sctx->right_path, 4658 sctx->cmp_key, name, name_len, &found_data, 4659 &found_data_len); 4660 if (ret == -ENOENT) { 4661 ret = __process_new_xattr(num, di_key, name, name_len, data, 4662 data_len, type, ctx); 4663 } else if (ret >= 0) { 4664 if (data_len != found_data_len || 4665 memcmp(data, found_data, data_len)) { 4666 ret = __process_new_xattr(num, di_key, name, name_len, 4667 data, data_len, type, ctx); 4668 } else { 4669 ret = 0; 4670 } 4671 } 4672 4673 kfree(found_data); 4674 return ret; 4675 } 4676 4677 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key, 4678 const char *name, int name_len, 4679 const char *data, int data_len, 4680 u8 type, void *ctx) 4681 { 4682 int ret; 4683 struct send_ctx *sctx = ctx; 4684 4685 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key, 4686 name, name_len, NULL, NULL); 4687 if (ret == -ENOENT) 4688 ret = __process_deleted_xattr(num, di_key, name, name_len, data, 4689 data_len, type, ctx); 4690 else if (ret >= 0) 4691 ret = 0; 4692 4693 return ret; 4694 } 4695 4696 static int process_changed_xattr(struct send_ctx *sctx) 4697 { 4698 int ret = 0; 4699 4700 ret = iterate_dir_item(sctx->send_root, sctx->left_path, 4701 __process_changed_new_xattr, sctx); 4702 if (ret < 0) 4703 goto out; 4704 ret = iterate_dir_item(sctx->parent_root, sctx->right_path, 4705 __process_changed_deleted_xattr, sctx); 4706 4707 out: 4708 return ret; 4709 } 4710 4711 static int process_all_new_xattrs(struct send_ctx *sctx) 4712 { 4713 int ret; 4714 struct btrfs_root *root; 4715 struct btrfs_path *path; 4716 struct btrfs_key key; 4717 struct btrfs_key found_key; 4718 struct extent_buffer *eb; 4719 int slot; 4720 4721 path = alloc_path_for_send(); 4722 if (!path) 4723 return -ENOMEM; 4724 4725 root = sctx->send_root; 4726 4727 key.objectid = sctx->cmp_key->objectid; 4728 key.type = BTRFS_XATTR_ITEM_KEY; 4729 key.offset = 0; 4730 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4731 if (ret < 0) 4732 goto out; 4733 4734 while (1) { 4735 eb = path->nodes[0]; 4736 slot = path->slots[0]; 4737 if (slot >= btrfs_header_nritems(eb)) { 4738 ret = btrfs_next_leaf(root, path); 4739 if (ret < 0) { 4740 goto out; 4741 } else if (ret > 0) { 4742 ret = 0; 4743 break; 4744 } 4745 continue; 4746 } 4747 4748 btrfs_item_key_to_cpu(eb, &found_key, slot); 4749 if (found_key.objectid != key.objectid || 4750 found_key.type != key.type) { 4751 ret = 0; 4752 goto out; 4753 } 4754 4755 ret = iterate_dir_item(root, path, __process_new_xattr, sctx); 4756 if (ret < 0) 4757 goto out; 4758 4759 path->slots[0]++; 4760 } 4761 4762 out: 4763 btrfs_free_path(path); 4764 return ret; 4765 } 4766 4767 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) 4768 { 4769 struct btrfs_root *root = sctx->send_root; 4770 struct btrfs_fs_info *fs_info = root->fs_info; 4771 struct inode *inode; 4772 struct page *page; 4773 char *addr; 4774 struct btrfs_key key; 4775 pgoff_t index = offset >> PAGE_SHIFT; 4776 pgoff_t last_index; 4777 unsigned pg_offset = offset & ~PAGE_MASK; 4778 ssize_t ret = 0; 4779 4780 key.objectid = sctx->cur_ino; 4781 key.type = BTRFS_INODE_ITEM_KEY; 4782 key.offset = 0; 4783 4784 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 4785 if (IS_ERR(inode)) 4786 return PTR_ERR(inode); 4787 4788 if (offset + len > i_size_read(inode)) { 4789 if (offset > i_size_read(inode)) 4790 len = 0; 4791 else 4792 len = offset - i_size_read(inode); 4793 } 4794 if (len == 0) 4795 goto out; 4796 4797 last_index = (offset + len - 1) >> PAGE_SHIFT; 4798 4799 /* initial readahead */ 4800 memset(&sctx->ra, 0, sizeof(struct file_ra_state)); 4801 file_ra_state_init(&sctx->ra, inode->i_mapping); 4802 4803 while (index <= last_index) { 4804 unsigned cur_len = min_t(unsigned, len, 4805 PAGE_SIZE - pg_offset); 4806 4807 page = find_lock_page(inode->i_mapping, index); 4808 if (!page) { 4809 page_cache_sync_readahead(inode->i_mapping, &sctx->ra, 4810 NULL, index, last_index + 1 - index); 4811 4812 page = find_or_create_page(inode->i_mapping, index, 4813 GFP_KERNEL); 4814 if (!page) { 4815 ret = -ENOMEM; 4816 break; 4817 } 4818 } 4819 4820 if (PageReadahead(page)) { 4821 page_cache_async_readahead(inode->i_mapping, &sctx->ra, 4822 NULL, page, index, last_index + 1 - index); 4823 } 4824 4825 if (!PageUptodate(page)) { 4826 btrfs_readpage(NULL, page); 4827 lock_page(page); 4828 if (!PageUptodate(page)) { 4829 unlock_page(page); 4830 put_page(page); 4831 ret = -EIO; 4832 break; 4833 } 4834 } 4835 4836 addr = kmap(page); 4837 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len); 4838 kunmap(page); 4839 unlock_page(page); 4840 put_page(page); 4841 index++; 4842 pg_offset = 0; 4843 len -= cur_len; 4844 ret += cur_len; 4845 } 4846 out: 4847 iput(inode); 4848 return ret; 4849 } 4850 4851 /* 4852 * Read some bytes from the current inode/file and send a write command to 4853 * user space. 4854 */ 4855 static int send_write(struct send_ctx *sctx, u64 offset, u32 len) 4856 { 4857 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 4858 int ret = 0; 4859 struct fs_path *p; 4860 ssize_t num_read = 0; 4861 4862 p = fs_path_alloc(); 4863 if (!p) 4864 return -ENOMEM; 4865 4866 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len); 4867 4868 num_read = fill_read_buf(sctx, offset, len); 4869 if (num_read <= 0) { 4870 if (num_read < 0) 4871 ret = num_read; 4872 goto out; 4873 } 4874 4875 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 4876 if (ret < 0) 4877 goto out; 4878 4879 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4880 if (ret < 0) 4881 goto out; 4882 4883 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4884 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4885 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read); 4886 4887 ret = send_cmd(sctx); 4888 4889 tlv_put_failure: 4890 out: 4891 fs_path_free(p); 4892 if (ret < 0) 4893 return ret; 4894 return num_read; 4895 } 4896 4897 /* 4898 * Send a clone command to user space. 4899 */ 4900 static int send_clone(struct send_ctx *sctx, 4901 u64 offset, u32 len, 4902 struct clone_root *clone_root) 4903 { 4904 int ret = 0; 4905 struct fs_path *p; 4906 u64 gen; 4907 4908 btrfs_debug(sctx->send_root->fs_info, 4909 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu", 4910 offset, len, clone_root->root->objectid, clone_root->ino, 4911 clone_root->offset); 4912 4913 p = fs_path_alloc(); 4914 if (!p) 4915 return -ENOMEM; 4916 4917 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE); 4918 if (ret < 0) 4919 goto out; 4920 4921 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4922 if (ret < 0) 4923 goto out; 4924 4925 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4926 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); 4927 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4928 4929 if (clone_root->root == sctx->send_root) { 4930 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL, 4931 &gen, NULL, NULL, NULL, NULL); 4932 if (ret < 0) 4933 goto out; 4934 ret = get_cur_path(sctx, clone_root->ino, gen, p); 4935 } else { 4936 ret = get_inode_path(clone_root->root, clone_root->ino, p); 4937 } 4938 if (ret < 0) 4939 goto out; 4940 4941 /* 4942 * If the parent we're using has a received_uuid set then use that as 4943 * our clone source as that is what we will look for when doing a 4944 * receive. 4945 * 4946 * This covers the case that we create a snapshot off of a received 4947 * subvolume and then use that as the parent and try to receive on a 4948 * different host. 4949 */ 4950 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid)) 4951 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 4952 clone_root->root->root_item.received_uuid); 4953 else 4954 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 4955 clone_root->root->root_item.uuid); 4956 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, 4957 le64_to_cpu(clone_root->root->root_item.ctransid)); 4958 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); 4959 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, 4960 clone_root->offset); 4961 4962 ret = send_cmd(sctx); 4963 4964 tlv_put_failure: 4965 out: 4966 fs_path_free(p); 4967 return ret; 4968 } 4969 4970 /* 4971 * Send an update extent command to user space. 4972 */ 4973 static int send_update_extent(struct send_ctx *sctx, 4974 u64 offset, u32 len) 4975 { 4976 int ret = 0; 4977 struct fs_path *p; 4978 4979 p = fs_path_alloc(); 4980 if (!p) 4981 return -ENOMEM; 4982 4983 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT); 4984 if (ret < 0) 4985 goto out; 4986 4987 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4988 if (ret < 0) 4989 goto out; 4990 4991 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4992 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4993 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); 4994 4995 ret = send_cmd(sctx); 4996 4997 tlv_put_failure: 4998 out: 4999 fs_path_free(p); 5000 return ret; 5001 } 5002 5003 static int send_hole(struct send_ctx *sctx, u64 end) 5004 { 5005 struct fs_path *p = NULL; 5006 u64 offset = sctx->cur_inode_last_extent; 5007 u64 len; 5008 int ret = 0; 5009 5010 /* 5011 * A hole that starts at EOF or beyond it. Since we do not yet support 5012 * fallocate (for extent preallocation and hole punching), sending a 5013 * write of zeroes starting at EOF or beyond would later require issuing 5014 * a truncate operation which would undo the write and achieve nothing. 5015 */ 5016 if (offset >= sctx->cur_inode_size) 5017 return 0; 5018 5019 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) 5020 return send_update_extent(sctx, offset, end - offset); 5021 5022 p = fs_path_alloc(); 5023 if (!p) 5024 return -ENOMEM; 5025 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 5026 if (ret < 0) 5027 goto tlv_put_failure; 5028 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE); 5029 while (offset < end) { 5030 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE); 5031 5032 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 5033 if (ret < 0) 5034 break; 5035 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 5036 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5037 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len); 5038 ret = send_cmd(sctx); 5039 if (ret < 0) 5040 break; 5041 offset += len; 5042 } 5043 sctx->cur_inode_next_write_offset = offset; 5044 tlv_put_failure: 5045 fs_path_free(p); 5046 return ret; 5047 } 5048 5049 static int send_extent_data(struct send_ctx *sctx, 5050 const u64 offset, 5051 const u64 len) 5052 { 5053 u64 sent = 0; 5054 5055 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) 5056 return send_update_extent(sctx, offset, len); 5057 5058 while (sent < len) { 5059 u64 size = len - sent; 5060 int ret; 5061 5062 if (size > BTRFS_SEND_READ_SIZE) 5063 size = BTRFS_SEND_READ_SIZE; 5064 ret = send_write(sctx, offset + sent, size); 5065 if (ret < 0) 5066 return ret; 5067 if (!ret) 5068 break; 5069 sent += ret; 5070 } 5071 return 0; 5072 } 5073 5074 static int clone_range(struct send_ctx *sctx, 5075 struct clone_root *clone_root, 5076 const u64 disk_byte, 5077 u64 data_offset, 5078 u64 offset, 5079 u64 len) 5080 { 5081 struct btrfs_path *path; 5082 struct btrfs_key key; 5083 int ret; 5084 5085 /* 5086 * Prevent cloning from a zero offset with a length matching the sector 5087 * size because in some scenarios this will make the receiver fail. 5088 * 5089 * For example, if in the source filesystem the extent at offset 0 5090 * has a length of sectorsize and it was written using direct IO, then 5091 * it can never be an inline extent (even if compression is enabled). 5092 * Then this extent can be cloned in the original filesystem to a non 5093 * zero file offset, but it may not be possible to clone in the 5094 * destination filesystem because it can be inlined due to compression 5095 * on the destination filesystem (as the receiver's write operations are 5096 * always done using buffered IO). The same happens when the original 5097 * filesystem does not have compression enabled but the destination 5098 * filesystem has. 5099 */ 5100 if (clone_root->offset == 0 && 5101 len == sctx->send_root->fs_info->sectorsize) 5102 return send_extent_data(sctx, offset, len); 5103 5104 path = alloc_path_for_send(); 5105 if (!path) 5106 return -ENOMEM; 5107 5108 /* 5109 * We can't send a clone operation for the entire range if we find 5110 * extent items in the respective range in the source file that 5111 * refer to different extents or if we find holes. 5112 * So check for that and do a mix of clone and regular write/copy 5113 * operations if needed. 5114 * 5115 * Example: 5116 * 5117 * mkfs.btrfs -f /dev/sda 5118 * mount /dev/sda /mnt 5119 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo 5120 * cp --reflink=always /mnt/foo /mnt/bar 5121 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo 5122 * btrfs subvolume snapshot -r /mnt /mnt/snap 5123 * 5124 * If when we send the snapshot and we are processing file bar (which 5125 * has a higher inode number than foo) we blindly send a clone operation 5126 * for the [0, 100K[ range from foo to bar, the receiver ends up getting 5127 * a file bar that matches the content of file foo - iow, doesn't match 5128 * the content from bar in the original filesystem. 5129 */ 5130 key.objectid = clone_root->ino; 5131 key.type = BTRFS_EXTENT_DATA_KEY; 5132 key.offset = clone_root->offset; 5133 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0); 5134 if (ret < 0) 5135 goto out; 5136 if (ret > 0 && path->slots[0] > 0) { 5137 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); 5138 if (key.objectid == clone_root->ino && 5139 key.type == BTRFS_EXTENT_DATA_KEY) 5140 path->slots[0]--; 5141 } 5142 5143 while (true) { 5144 struct extent_buffer *leaf = path->nodes[0]; 5145 int slot = path->slots[0]; 5146 struct btrfs_file_extent_item *ei; 5147 u8 type; 5148 u64 ext_len; 5149 u64 clone_len; 5150 5151 if (slot >= btrfs_header_nritems(leaf)) { 5152 ret = btrfs_next_leaf(clone_root->root, path); 5153 if (ret < 0) 5154 goto out; 5155 else if (ret > 0) 5156 break; 5157 continue; 5158 } 5159 5160 btrfs_item_key_to_cpu(leaf, &key, slot); 5161 5162 /* 5163 * We might have an implicit trailing hole (NO_HOLES feature 5164 * enabled). We deal with it after leaving this loop. 5165 */ 5166 if (key.objectid != clone_root->ino || 5167 key.type != BTRFS_EXTENT_DATA_KEY) 5168 break; 5169 5170 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 5171 type = btrfs_file_extent_type(leaf, ei); 5172 if (type == BTRFS_FILE_EXTENT_INLINE) { 5173 ext_len = btrfs_file_extent_ram_bytes(leaf, ei); 5174 ext_len = PAGE_ALIGN(ext_len); 5175 } else { 5176 ext_len = btrfs_file_extent_num_bytes(leaf, ei); 5177 } 5178 5179 if (key.offset + ext_len <= clone_root->offset) 5180 goto next; 5181 5182 if (key.offset > clone_root->offset) { 5183 /* Implicit hole, NO_HOLES feature enabled. */ 5184 u64 hole_len = key.offset - clone_root->offset; 5185 5186 if (hole_len > len) 5187 hole_len = len; 5188 ret = send_extent_data(sctx, offset, hole_len); 5189 if (ret < 0) 5190 goto out; 5191 5192 len -= hole_len; 5193 if (len == 0) 5194 break; 5195 offset += hole_len; 5196 clone_root->offset += hole_len; 5197 data_offset += hole_len; 5198 } 5199 5200 if (key.offset >= clone_root->offset + len) 5201 break; 5202 5203 clone_len = min_t(u64, ext_len, len); 5204 5205 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte && 5206 btrfs_file_extent_offset(leaf, ei) == data_offset) 5207 ret = send_clone(sctx, offset, clone_len, clone_root); 5208 else 5209 ret = send_extent_data(sctx, offset, clone_len); 5210 5211 if (ret < 0) 5212 goto out; 5213 5214 len -= clone_len; 5215 if (len == 0) 5216 break; 5217 offset += clone_len; 5218 clone_root->offset += clone_len; 5219 data_offset += clone_len; 5220 next: 5221 path->slots[0]++; 5222 } 5223 5224 if (len > 0) 5225 ret = send_extent_data(sctx, offset, len); 5226 else 5227 ret = 0; 5228 out: 5229 btrfs_free_path(path); 5230 return ret; 5231 } 5232 5233 static int send_write_or_clone(struct send_ctx *sctx, 5234 struct btrfs_path *path, 5235 struct btrfs_key *key, 5236 struct clone_root *clone_root) 5237 { 5238 int ret = 0; 5239 struct btrfs_file_extent_item *ei; 5240 u64 offset = key->offset; 5241 u64 len; 5242 u8 type; 5243 u64 bs = sctx->send_root->fs_info->sb->s_blocksize; 5244 5245 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 5246 struct btrfs_file_extent_item); 5247 type = btrfs_file_extent_type(path->nodes[0], ei); 5248 if (type == BTRFS_FILE_EXTENT_INLINE) { 5249 len = btrfs_file_extent_ram_bytes(path->nodes[0], ei); 5250 /* 5251 * it is possible the inline item won't cover the whole page, 5252 * but there may be items after this page. Make 5253 * sure to send the whole thing 5254 */ 5255 len = PAGE_ALIGN(len); 5256 } else { 5257 len = btrfs_file_extent_num_bytes(path->nodes[0], ei); 5258 } 5259 5260 if (offset >= sctx->cur_inode_size) { 5261 ret = 0; 5262 goto out; 5263 } 5264 if (offset + len > sctx->cur_inode_size) 5265 len = sctx->cur_inode_size - offset; 5266 if (len == 0) { 5267 ret = 0; 5268 goto out; 5269 } 5270 5271 if (clone_root && IS_ALIGNED(offset + len, bs)) { 5272 u64 disk_byte; 5273 u64 data_offset; 5274 5275 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei); 5276 data_offset = btrfs_file_extent_offset(path->nodes[0], ei); 5277 ret = clone_range(sctx, clone_root, disk_byte, data_offset, 5278 offset, len); 5279 } else { 5280 ret = send_extent_data(sctx, offset, len); 5281 } 5282 sctx->cur_inode_next_write_offset = offset + len; 5283 out: 5284 return ret; 5285 } 5286 5287 static int is_extent_unchanged(struct send_ctx *sctx, 5288 struct btrfs_path *left_path, 5289 struct btrfs_key *ekey) 5290 { 5291 int ret = 0; 5292 struct btrfs_key key; 5293 struct btrfs_path *path = NULL; 5294 struct extent_buffer *eb; 5295 int slot; 5296 struct btrfs_key found_key; 5297 struct btrfs_file_extent_item *ei; 5298 u64 left_disknr; 5299 u64 right_disknr; 5300 u64 left_offset; 5301 u64 right_offset; 5302 u64 left_offset_fixed; 5303 u64 left_len; 5304 u64 right_len; 5305 u64 left_gen; 5306 u64 right_gen; 5307 u8 left_type; 5308 u8 right_type; 5309 5310 path = alloc_path_for_send(); 5311 if (!path) 5312 return -ENOMEM; 5313 5314 eb = left_path->nodes[0]; 5315 slot = left_path->slots[0]; 5316 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 5317 left_type = btrfs_file_extent_type(eb, ei); 5318 5319 if (left_type != BTRFS_FILE_EXTENT_REG) { 5320 ret = 0; 5321 goto out; 5322 } 5323 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 5324 left_len = btrfs_file_extent_num_bytes(eb, ei); 5325 left_offset = btrfs_file_extent_offset(eb, ei); 5326 left_gen = btrfs_file_extent_generation(eb, ei); 5327 5328 /* 5329 * Following comments will refer to these graphics. L is the left 5330 * extents which we are checking at the moment. 1-8 are the right 5331 * extents that we iterate. 5332 * 5333 * |-----L-----| 5334 * |-1-|-2a-|-3-|-4-|-5-|-6-| 5335 * 5336 * |-----L-----| 5337 * |--1--|-2b-|...(same as above) 5338 * 5339 * Alternative situation. Happens on files where extents got split. 5340 * |-----L-----| 5341 * |-----------7-----------|-6-| 5342 * 5343 * Alternative situation. Happens on files which got larger. 5344 * |-----L-----| 5345 * |-8-| 5346 * Nothing follows after 8. 5347 */ 5348 5349 key.objectid = ekey->objectid; 5350 key.type = BTRFS_EXTENT_DATA_KEY; 5351 key.offset = ekey->offset; 5352 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0); 5353 if (ret < 0) 5354 goto out; 5355 if (ret) { 5356 ret = 0; 5357 goto out; 5358 } 5359 5360 /* 5361 * Handle special case where the right side has no extents at all. 5362 */ 5363 eb = path->nodes[0]; 5364 slot = path->slots[0]; 5365 btrfs_item_key_to_cpu(eb, &found_key, slot); 5366 if (found_key.objectid != key.objectid || 5367 found_key.type != key.type) { 5368 /* If we're a hole then just pretend nothing changed */ 5369 ret = (left_disknr) ? 0 : 1; 5370 goto out; 5371 } 5372 5373 /* 5374 * We're now on 2a, 2b or 7. 5375 */ 5376 key = found_key; 5377 while (key.offset < ekey->offset + left_len) { 5378 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 5379 right_type = btrfs_file_extent_type(eb, ei); 5380 if (right_type != BTRFS_FILE_EXTENT_REG && 5381 right_type != BTRFS_FILE_EXTENT_INLINE) { 5382 ret = 0; 5383 goto out; 5384 } 5385 5386 if (right_type == BTRFS_FILE_EXTENT_INLINE) { 5387 right_len = btrfs_file_extent_ram_bytes(eb, ei); 5388 right_len = PAGE_ALIGN(right_len); 5389 } else { 5390 right_len = btrfs_file_extent_num_bytes(eb, ei); 5391 } 5392 5393 /* 5394 * Are we at extent 8? If yes, we know the extent is changed. 5395 * This may only happen on the first iteration. 5396 */ 5397 if (found_key.offset + right_len <= ekey->offset) { 5398 /* If we're a hole just pretend nothing changed */ 5399 ret = (left_disknr) ? 0 : 1; 5400 goto out; 5401 } 5402 5403 /* 5404 * We just wanted to see if when we have an inline extent, what 5405 * follows it is a regular extent (wanted to check the above 5406 * condition for inline extents too). This should normally not 5407 * happen but it's possible for example when we have an inline 5408 * compressed extent representing data with a size matching 5409 * the page size (currently the same as sector size). 5410 */ 5411 if (right_type == BTRFS_FILE_EXTENT_INLINE) { 5412 ret = 0; 5413 goto out; 5414 } 5415 5416 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 5417 right_offset = btrfs_file_extent_offset(eb, ei); 5418 right_gen = btrfs_file_extent_generation(eb, ei); 5419 5420 left_offset_fixed = left_offset; 5421 if (key.offset < ekey->offset) { 5422 /* Fix the right offset for 2a and 7. */ 5423 right_offset += ekey->offset - key.offset; 5424 } else { 5425 /* Fix the left offset for all behind 2a and 2b */ 5426 left_offset_fixed += key.offset - ekey->offset; 5427 } 5428 5429 /* 5430 * Check if we have the same extent. 5431 */ 5432 if (left_disknr != right_disknr || 5433 left_offset_fixed != right_offset || 5434 left_gen != right_gen) { 5435 ret = 0; 5436 goto out; 5437 } 5438 5439 /* 5440 * Go to the next extent. 5441 */ 5442 ret = btrfs_next_item(sctx->parent_root, path); 5443 if (ret < 0) 5444 goto out; 5445 if (!ret) { 5446 eb = path->nodes[0]; 5447 slot = path->slots[0]; 5448 btrfs_item_key_to_cpu(eb, &found_key, slot); 5449 } 5450 if (ret || found_key.objectid != key.objectid || 5451 found_key.type != key.type) { 5452 key.offset += right_len; 5453 break; 5454 } 5455 if (found_key.offset != key.offset + right_len) { 5456 ret = 0; 5457 goto out; 5458 } 5459 key = found_key; 5460 } 5461 5462 /* 5463 * We're now behind the left extent (treat as unchanged) or at the end 5464 * of the right side (treat as changed). 5465 */ 5466 if (key.offset >= ekey->offset + left_len) 5467 ret = 1; 5468 else 5469 ret = 0; 5470 5471 5472 out: 5473 btrfs_free_path(path); 5474 return ret; 5475 } 5476 5477 static int get_last_extent(struct send_ctx *sctx, u64 offset) 5478 { 5479 struct btrfs_path *path; 5480 struct btrfs_root *root = sctx->send_root; 5481 struct btrfs_file_extent_item *fi; 5482 struct btrfs_key key; 5483 u64 extent_end; 5484 u8 type; 5485 int ret; 5486 5487 path = alloc_path_for_send(); 5488 if (!path) 5489 return -ENOMEM; 5490 5491 sctx->cur_inode_last_extent = 0; 5492 5493 key.objectid = sctx->cur_ino; 5494 key.type = BTRFS_EXTENT_DATA_KEY; 5495 key.offset = offset; 5496 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1); 5497 if (ret < 0) 5498 goto out; 5499 ret = 0; 5500 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 5501 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY) 5502 goto out; 5503 5504 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 5505 struct btrfs_file_extent_item); 5506 type = btrfs_file_extent_type(path->nodes[0], fi); 5507 if (type == BTRFS_FILE_EXTENT_INLINE) { 5508 u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi); 5509 extent_end = ALIGN(key.offset + size, 5510 sctx->send_root->fs_info->sectorsize); 5511 } else { 5512 extent_end = key.offset + 5513 btrfs_file_extent_num_bytes(path->nodes[0], fi); 5514 } 5515 sctx->cur_inode_last_extent = extent_end; 5516 out: 5517 btrfs_free_path(path); 5518 return ret; 5519 } 5520 5521 static int range_is_hole_in_parent(struct send_ctx *sctx, 5522 const u64 start, 5523 const u64 end) 5524 { 5525 struct btrfs_path *path; 5526 struct btrfs_key key; 5527 struct btrfs_root *root = sctx->parent_root; 5528 u64 search_start = start; 5529 int ret; 5530 5531 path = alloc_path_for_send(); 5532 if (!path) 5533 return -ENOMEM; 5534 5535 key.objectid = sctx->cur_ino; 5536 key.type = BTRFS_EXTENT_DATA_KEY; 5537 key.offset = search_start; 5538 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5539 if (ret < 0) 5540 goto out; 5541 if (ret > 0 && path->slots[0] > 0) 5542 path->slots[0]--; 5543 5544 while (search_start < end) { 5545 struct extent_buffer *leaf = path->nodes[0]; 5546 int slot = path->slots[0]; 5547 struct btrfs_file_extent_item *fi; 5548 u64 extent_end; 5549 5550 if (slot >= btrfs_header_nritems(leaf)) { 5551 ret = btrfs_next_leaf(root, path); 5552 if (ret < 0) 5553 goto out; 5554 else if (ret > 0) 5555 break; 5556 continue; 5557 } 5558 5559 btrfs_item_key_to_cpu(leaf, &key, slot); 5560 if (key.objectid < sctx->cur_ino || 5561 key.type < BTRFS_EXTENT_DATA_KEY) 5562 goto next; 5563 if (key.objectid > sctx->cur_ino || 5564 key.type > BTRFS_EXTENT_DATA_KEY || 5565 key.offset >= end) 5566 break; 5567 5568 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 5569 if (btrfs_file_extent_type(leaf, fi) == 5570 BTRFS_FILE_EXTENT_INLINE) { 5571 u64 size = btrfs_file_extent_ram_bytes(leaf, fi); 5572 5573 extent_end = ALIGN(key.offset + size, 5574 root->fs_info->sectorsize); 5575 } else { 5576 extent_end = key.offset + 5577 btrfs_file_extent_num_bytes(leaf, fi); 5578 } 5579 if (extent_end <= start) 5580 goto next; 5581 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) { 5582 search_start = extent_end; 5583 goto next; 5584 } 5585 ret = 0; 5586 goto out; 5587 next: 5588 path->slots[0]++; 5589 } 5590 ret = 1; 5591 out: 5592 btrfs_free_path(path); 5593 return ret; 5594 } 5595 5596 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, 5597 struct btrfs_key *key) 5598 { 5599 struct btrfs_file_extent_item *fi; 5600 u64 extent_end; 5601 u8 type; 5602 int ret = 0; 5603 5604 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx)) 5605 return 0; 5606 5607 if (sctx->cur_inode_last_extent == (u64)-1) { 5608 ret = get_last_extent(sctx, key->offset - 1); 5609 if (ret) 5610 return ret; 5611 } 5612 5613 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 5614 struct btrfs_file_extent_item); 5615 type = btrfs_file_extent_type(path->nodes[0], fi); 5616 if (type == BTRFS_FILE_EXTENT_INLINE) { 5617 u64 size = btrfs_file_extent_ram_bytes(path->nodes[0], fi); 5618 extent_end = ALIGN(key->offset + size, 5619 sctx->send_root->fs_info->sectorsize); 5620 } else { 5621 extent_end = key->offset + 5622 btrfs_file_extent_num_bytes(path->nodes[0], fi); 5623 } 5624 5625 if (path->slots[0] == 0 && 5626 sctx->cur_inode_last_extent < key->offset) { 5627 /* 5628 * We might have skipped entire leafs that contained only 5629 * file extent items for our current inode. These leafs have 5630 * a generation number smaller (older) than the one in the 5631 * current leaf and the leaf our last extent came from, and 5632 * are located between these 2 leafs. 5633 */ 5634 ret = get_last_extent(sctx, key->offset - 1); 5635 if (ret) 5636 return ret; 5637 } 5638 5639 if (sctx->cur_inode_last_extent < key->offset) { 5640 ret = range_is_hole_in_parent(sctx, 5641 sctx->cur_inode_last_extent, 5642 key->offset); 5643 if (ret < 0) 5644 return ret; 5645 else if (ret == 0) 5646 ret = send_hole(sctx, key->offset); 5647 else 5648 ret = 0; 5649 } 5650 sctx->cur_inode_last_extent = extent_end; 5651 return ret; 5652 } 5653 5654 static int process_extent(struct send_ctx *sctx, 5655 struct btrfs_path *path, 5656 struct btrfs_key *key) 5657 { 5658 struct clone_root *found_clone = NULL; 5659 int ret = 0; 5660 5661 if (S_ISLNK(sctx->cur_inode_mode)) 5662 return 0; 5663 5664 if (sctx->parent_root && !sctx->cur_inode_new) { 5665 ret = is_extent_unchanged(sctx, path, key); 5666 if (ret < 0) 5667 goto out; 5668 if (ret) { 5669 ret = 0; 5670 goto out_hole; 5671 } 5672 } else { 5673 struct btrfs_file_extent_item *ei; 5674 u8 type; 5675 5676 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 5677 struct btrfs_file_extent_item); 5678 type = btrfs_file_extent_type(path->nodes[0], ei); 5679 if (type == BTRFS_FILE_EXTENT_PREALLOC || 5680 type == BTRFS_FILE_EXTENT_REG) { 5681 /* 5682 * The send spec does not have a prealloc command yet, 5683 * so just leave a hole for prealloc'ed extents until 5684 * we have enough commands queued up to justify rev'ing 5685 * the send spec. 5686 */ 5687 if (type == BTRFS_FILE_EXTENT_PREALLOC) { 5688 ret = 0; 5689 goto out; 5690 } 5691 5692 /* Have a hole, just skip it. */ 5693 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) { 5694 ret = 0; 5695 goto out; 5696 } 5697 } 5698 } 5699 5700 ret = find_extent_clone(sctx, path, key->objectid, key->offset, 5701 sctx->cur_inode_size, &found_clone); 5702 if (ret != -ENOENT && ret < 0) 5703 goto out; 5704 5705 ret = send_write_or_clone(sctx, path, key, found_clone); 5706 if (ret) 5707 goto out; 5708 out_hole: 5709 ret = maybe_send_hole(sctx, path, key); 5710 out: 5711 return ret; 5712 } 5713 5714 static int process_all_extents(struct send_ctx *sctx) 5715 { 5716 int ret; 5717 struct btrfs_root *root; 5718 struct btrfs_path *path; 5719 struct btrfs_key key; 5720 struct btrfs_key found_key; 5721 struct extent_buffer *eb; 5722 int slot; 5723 5724 root = sctx->send_root; 5725 path = alloc_path_for_send(); 5726 if (!path) 5727 return -ENOMEM; 5728 5729 key.objectid = sctx->cmp_key->objectid; 5730 key.type = BTRFS_EXTENT_DATA_KEY; 5731 key.offset = 0; 5732 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5733 if (ret < 0) 5734 goto out; 5735 5736 while (1) { 5737 eb = path->nodes[0]; 5738 slot = path->slots[0]; 5739 5740 if (slot >= btrfs_header_nritems(eb)) { 5741 ret = btrfs_next_leaf(root, path); 5742 if (ret < 0) { 5743 goto out; 5744 } else if (ret > 0) { 5745 ret = 0; 5746 break; 5747 } 5748 continue; 5749 } 5750 5751 btrfs_item_key_to_cpu(eb, &found_key, slot); 5752 5753 if (found_key.objectid != key.objectid || 5754 found_key.type != key.type) { 5755 ret = 0; 5756 goto out; 5757 } 5758 5759 ret = process_extent(sctx, path, &found_key); 5760 if (ret < 0) 5761 goto out; 5762 5763 path->slots[0]++; 5764 } 5765 5766 out: 5767 btrfs_free_path(path); 5768 return ret; 5769 } 5770 5771 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end, 5772 int *pending_move, 5773 int *refs_processed) 5774 { 5775 int ret = 0; 5776 5777 if (sctx->cur_ino == 0) 5778 goto out; 5779 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid && 5780 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY) 5781 goto out; 5782 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs)) 5783 goto out; 5784 5785 ret = process_recorded_refs(sctx, pending_move); 5786 if (ret < 0) 5787 goto out; 5788 5789 *refs_processed = 1; 5790 out: 5791 return ret; 5792 } 5793 5794 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) 5795 { 5796 int ret = 0; 5797 u64 left_mode; 5798 u64 left_uid; 5799 u64 left_gid; 5800 u64 right_mode; 5801 u64 right_uid; 5802 u64 right_gid; 5803 int need_chmod = 0; 5804 int need_chown = 0; 5805 int need_truncate = 1; 5806 int pending_move = 0; 5807 int refs_processed = 0; 5808 5809 if (sctx->ignore_cur_inode) 5810 return 0; 5811 5812 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move, 5813 &refs_processed); 5814 if (ret < 0) 5815 goto out; 5816 5817 /* 5818 * We have processed the refs and thus need to advance send_progress. 5819 * Now, calls to get_cur_xxx will take the updated refs of the current 5820 * inode into account. 5821 * 5822 * On the other hand, if our current inode is a directory and couldn't 5823 * be moved/renamed because its parent was renamed/moved too and it has 5824 * a higher inode number, we can only move/rename our current inode 5825 * after we moved/renamed its parent. Therefore in this case operate on 5826 * the old path (pre move/rename) of our current inode, and the 5827 * move/rename will be performed later. 5828 */ 5829 if (refs_processed && !pending_move) 5830 sctx->send_progress = sctx->cur_ino + 1; 5831 5832 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted) 5833 goto out; 5834 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino) 5835 goto out; 5836 5837 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL, 5838 &left_mode, &left_uid, &left_gid, NULL); 5839 if (ret < 0) 5840 goto out; 5841 5842 if (!sctx->parent_root || sctx->cur_inode_new) { 5843 need_chown = 1; 5844 if (!S_ISLNK(sctx->cur_inode_mode)) 5845 need_chmod = 1; 5846 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size) 5847 need_truncate = 0; 5848 } else { 5849 u64 old_size; 5850 5851 ret = get_inode_info(sctx->parent_root, sctx->cur_ino, 5852 &old_size, NULL, &right_mode, &right_uid, 5853 &right_gid, NULL); 5854 if (ret < 0) 5855 goto out; 5856 5857 if (left_uid != right_uid || left_gid != right_gid) 5858 need_chown = 1; 5859 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode) 5860 need_chmod = 1; 5861 if ((old_size == sctx->cur_inode_size) || 5862 (sctx->cur_inode_size > old_size && 5863 sctx->cur_inode_next_write_offset == sctx->cur_inode_size)) 5864 need_truncate = 0; 5865 } 5866 5867 if (S_ISREG(sctx->cur_inode_mode)) { 5868 if (need_send_hole(sctx)) { 5869 if (sctx->cur_inode_last_extent == (u64)-1 || 5870 sctx->cur_inode_last_extent < 5871 sctx->cur_inode_size) { 5872 ret = get_last_extent(sctx, (u64)-1); 5873 if (ret) 5874 goto out; 5875 } 5876 if (sctx->cur_inode_last_extent < 5877 sctx->cur_inode_size) { 5878 ret = send_hole(sctx, sctx->cur_inode_size); 5879 if (ret) 5880 goto out; 5881 } 5882 } 5883 if (need_truncate) { 5884 ret = send_truncate(sctx, sctx->cur_ino, 5885 sctx->cur_inode_gen, 5886 sctx->cur_inode_size); 5887 if (ret < 0) 5888 goto out; 5889 } 5890 } 5891 5892 if (need_chown) { 5893 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen, 5894 left_uid, left_gid); 5895 if (ret < 0) 5896 goto out; 5897 } 5898 if (need_chmod) { 5899 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen, 5900 left_mode); 5901 if (ret < 0) 5902 goto out; 5903 } 5904 5905 /* 5906 * If other directory inodes depended on our current directory 5907 * inode's move/rename, now do their move/rename operations. 5908 */ 5909 if (!is_waiting_for_move(sctx, sctx->cur_ino)) { 5910 ret = apply_children_dir_moves(sctx); 5911 if (ret) 5912 goto out; 5913 /* 5914 * Need to send that every time, no matter if it actually 5915 * changed between the two trees as we have done changes to 5916 * the inode before. If our inode is a directory and it's 5917 * waiting to be moved/renamed, we will send its utimes when 5918 * it's moved/renamed, therefore we don't need to do it here. 5919 */ 5920 sctx->send_progress = sctx->cur_ino + 1; 5921 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); 5922 if (ret < 0) 5923 goto out; 5924 } 5925 5926 out: 5927 return ret; 5928 } 5929 5930 struct parent_paths_ctx { 5931 struct list_head *refs; 5932 struct send_ctx *sctx; 5933 }; 5934 5935 static int record_parent_ref(int num, u64 dir, int index, struct fs_path *name, 5936 void *ctx) 5937 { 5938 struct parent_paths_ctx *ppctx = ctx; 5939 5940 return record_ref(ppctx->sctx->parent_root, dir, name, ppctx->sctx, 5941 ppctx->refs); 5942 } 5943 5944 /* 5945 * Issue unlink operations for all paths of the current inode found in the 5946 * parent snapshot. 5947 */ 5948 static int btrfs_unlink_all_paths(struct send_ctx *sctx) 5949 { 5950 LIST_HEAD(deleted_refs); 5951 struct btrfs_path *path; 5952 struct btrfs_key key; 5953 struct parent_paths_ctx ctx; 5954 int ret; 5955 5956 path = alloc_path_for_send(); 5957 if (!path) 5958 return -ENOMEM; 5959 5960 key.objectid = sctx->cur_ino; 5961 key.type = BTRFS_INODE_REF_KEY; 5962 key.offset = 0; 5963 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0); 5964 if (ret < 0) 5965 goto out; 5966 5967 ctx.refs = &deleted_refs; 5968 ctx.sctx = sctx; 5969 5970 while (true) { 5971 struct extent_buffer *eb = path->nodes[0]; 5972 int slot = path->slots[0]; 5973 5974 if (slot >= btrfs_header_nritems(eb)) { 5975 ret = btrfs_next_leaf(sctx->parent_root, path); 5976 if (ret < 0) 5977 goto out; 5978 else if (ret > 0) 5979 break; 5980 continue; 5981 } 5982 5983 btrfs_item_key_to_cpu(eb, &key, slot); 5984 if (key.objectid != sctx->cur_ino) 5985 break; 5986 if (key.type != BTRFS_INODE_REF_KEY && 5987 key.type != BTRFS_INODE_EXTREF_KEY) 5988 break; 5989 5990 ret = iterate_inode_ref(sctx->parent_root, path, &key, 1, 5991 record_parent_ref, &ctx); 5992 if (ret < 0) 5993 goto out; 5994 5995 path->slots[0]++; 5996 } 5997 5998 while (!list_empty(&deleted_refs)) { 5999 struct recorded_ref *ref; 6000 6001 ref = list_first_entry(&deleted_refs, struct recorded_ref, list); 6002 ret = send_unlink(sctx, ref->full_path); 6003 if (ret < 0) 6004 goto out; 6005 fs_path_free(ref->full_path); 6006 list_del(&ref->list); 6007 kfree(ref); 6008 } 6009 ret = 0; 6010 out: 6011 btrfs_free_path(path); 6012 if (ret) 6013 __free_recorded_refs(&deleted_refs); 6014 return ret; 6015 } 6016 6017 static int changed_inode(struct send_ctx *sctx, 6018 enum btrfs_compare_tree_result result) 6019 { 6020 int ret = 0; 6021 struct btrfs_key *key = sctx->cmp_key; 6022 struct btrfs_inode_item *left_ii = NULL; 6023 struct btrfs_inode_item *right_ii = NULL; 6024 u64 left_gen = 0; 6025 u64 right_gen = 0; 6026 6027 sctx->cur_ino = key->objectid; 6028 sctx->cur_inode_new_gen = 0; 6029 sctx->cur_inode_last_extent = (u64)-1; 6030 sctx->cur_inode_next_write_offset = 0; 6031 sctx->ignore_cur_inode = false; 6032 6033 /* 6034 * Set send_progress to current inode. This will tell all get_cur_xxx 6035 * functions that the current inode's refs are not updated yet. Later, 6036 * when process_recorded_refs is finished, it is set to cur_ino + 1. 6037 */ 6038 sctx->send_progress = sctx->cur_ino; 6039 6040 if (result == BTRFS_COMPARE_TREE_NEW || 6041 result == BTRFS_COMPARE_TREE_CHANGED) { 6042 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0], 6043 sctx->left_path->slots[0], 6044 struct btrfs_inode_item); 6045 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0], 6046 left_ii); 6047 } else { 6048 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], 6049 sctx->right_path->slots[0], 6050 struct btrfs_inode_item); 6051 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], 6052 right_ii); 6053 } 6054 if (result == BTRFS_COMPARE_TREE_CHANGED) { 6055 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], 6056 sctx->right_path->slots[0], 6057 struct btrfs_inode_item); 6058 6059 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], 6060 right_ii); 6061 6062 /* 6063 * The cur_ino = root dir case is special here. We can't treat 6064 * the inode as deleted+reused because it would generate a 6065 * stream that tries to delete/mkdir the root dir. 6066 */ 6067 if (left_gen != right_gen && 6068 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) 6069 sctx->cur_inode_new_gen = 1; 6070 } 6071 6072 /* 6073 * Normally we do not find inodes with a link count of zero (orphans) 6074 * because the most common case is to create a snapshot and use it 6075 * for a send operation. However other less common use cases involve 6076 * using a subvolume and send it after turning it to RO mode just 6077 * after deleting all hard links of a file while holding an open 6078 * file descriptor against it or turning a RO snapshot into RW mode, 6079 * keep an open file descriptor against a file, delete it and then 6080 * turn the snapshot back to RO mode before using it for a send 6081 * operation. So if we find such cases, ignore the inode and all its 6082 * items completely if it's a new inode, or if it's a changed inode 6083 * make sure all its previous paths (from the parent snapshot) are all 6084 * unlinked and all other the inode items are ignored. 6085 */ 6086 if (result == BTRFS_COMPARE_TREE_NEW || 6087 result == BTRFS_COMPARE_TREE_CHANGED) { 6088 u32 nlinks; 6089 6090 nlinks = btrfs_inode_nlink(sctx->left_path->nodes[0], left_ii); 6091 if (nlinks == 0) { 6092 sctx->ignore_cur_inode = true; 6093 if (result == BTRFS_COMPARE_TREE_CHANGED) 6094 ret = btrfs_unlink_all_paths(sctx); 6095 goto out; 6096 } 6097 } 6098 6099 if (result == BTRFS_COMPARE_TREE_NEW) { 6100 sctx->cur_inode_gen = left_gen; 6101 sctx->cur_inode_new = 1; 6102 sctx->cur_inode_deleted = 0; 6103 sctx->cur_inode_size = btrfs_inode_size( 6104 sctx->left_path->nodes[0], left_ii); 6105 sctx->cur_inode_mode = btrfs_inode_mode( 6106 sctx->left_path->nodes[0], left_ii); 6107 sctx->cur_inode_rdev = btrfs_inode_rdev( 6108 sctx->left_path->nodes[0], left_ii); 6109 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) 6110 ret = send_create_inode_if_needed(sctx); 6111 } else if (result == BTRFS_COMPARE_TREE_DELETED) { 6112 sctx->cur_inode_gen = right_gen; 6113 sctx->cur_inode_new = 0; 6114 sctx->cur_inode_deleted = 1; 6115 sctx->cur_inode_size = btrfs_inode_size( 6116 sctx->right_path->nodes[0], right_ii); 6117 sctx->cur_inode_mode = btrfs_inode_mode( 6118 sctx->right_path->nodes[0], right_ii); 6119 } else if (result == BTRFS_COMPARE_TREE_CHANGED) { 6120 /* 6121 * We need to do some special handling in case the inode was 6122 * reported as changed with a changed generation number. This 6123 * means that the original inode was deleted and new inode 6124 * reused the same inum. So we have to treat the old inode as 6125 * deleted and the new one as new. 6126 */ 6127 if (sctx->cur_inode_new_gen) { 6128 /* 6129 * First, process the inode as if it was deleted. 6130 */ 6131 sctx->cur_inode_gen = right_gen; 6132 sctx->cur_inode_new = 0; 6133 sctx->cur_inode_deleted = 1; 6134 sctx->cur_inode_size = btrfs_inode_size( 6135 sctx->right_path->nodes[0], right_ii); 6136 sctx->cur_inode_mode = btrfs_inode_mode( 6137 sctx->right_path->nodes[0], right_ii); 6138 ret = process_all_refs(sctx, 6139 BTRFS_COMPARE_TREE_DELETED); 6140 if (ret < 0) 6141 goto out; 6142 6143 /* 6144 * Now process the inode as if it was new. 6145 */ 6146 sctx->cur_inode_gen = left_gen; 6147 sctx->cur_inode_new = 1; 6148 sctx->cur_inode_deleted = 0; 6149 sctx->cur_inode_size = btrfs_inode_size( 6150 sctx->left_path->nodes[0], left_ii); 6151 sctx->cur_inode_mode = btrfs_inode_mode( 6152 sctx->left_path->nodes[0], left_ii); 6153 sctx->cur_inode_rdev = btrfs_inode_rdev( 6154 sctx->left_path->nodes[0], left_ii); 6155 ret = send_create_inode_if_needed(sctx); 6156 if (ret < 0) 6157 goto out; 6158 6159 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW); 6160 if (ret < 0) 6161 goto out; 6162 /* 6163 * Advance send_progress now as we did not get into 6164 * process_recorded_refs_if_needed in the new_gen case. 6165 */ 6166 sctx->send_progress = sctx->cur_ino + 1; 6167 6168 /* 6169 * Now process all extents and xattrs of the inode as if 6170 * they were all new. 6171 */ 6172 ret = process_all_extents(sctx); 6173 if (ret < 0) 6174 goto out; 6175 ret = process_all_new_xattrs(sctx); 6176 if (ret < 0) 6177 goto out; 6178 } else { 6179 sctx->cur_inode_gen = left_gen; 6180 sctx->cur_inode_new = 0; 6181 sctx->cur_inode_new_gen = 0; 6182 sctx->cur_inode_deleted = 0; 6183 sctx->cur_inode_size = btrfs_inode_size( 6184 sctx->left_path->nodes[0], left_ii); 6185 sctx->cur_inode_mode = btrfs_inode_mode( 6186 sctx->left_path->nodes[0], left_ii); 6187 } 6188 } 6189 6190 out: 6191 return ret; 6192 } 6193 6194 /* 6195 * We have to process new refs before deleted refs, but compare_trees gives us 6196 * the new and deleted refs mixed. To fix this, we record the new/deleted refs 6197 * first and later process them in process_recorded_refs. 6198 * For the cur_inode_new_gen case, we skip recording completely because 6199 * changed_inode did already initiate processing of refs. The reason for this is 6200 * that in this case, compare_tree actually compares the refs of 2 different 6201 * inodes. To fix this, process_all_refs is used in changed_inode to handle all 6202 * refs of the right tree as deleted and all refs of the left tree as new. 6203 */ 6204 static int changed_ref(struct send_ctx *sctx, 6205 enum btrfs_compare_tree_result result) 6206 { 6207 int ret = 0; 6208 6209 if (sctx->cur_ino != sctx->cmp_key->objectid) { 6210 inconsistent_snapshot_error(sctx, result, "reference"); 6211 return -EIO; 6212 } 6213 6214 if (!sctx->cur_inode_new_gen && 6215 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { 6216 if (result == BTRFS_COMPARE_TREE_NEW) 6217 ret = record_new_ref(sctx); 6218 else if (result == BTRFS_COMPARE_TREE_DELETED) 6219 ret = record_deleted_ref(sctx); 6220 else if (result == BTRFS_COMPARE_TREE_CHANGED) 6221 ret = record_changed_ref(sctx); 6222 } 6223 6224 return ret; 6225 } 6226 6227 /* 6228 * Process new/deleted/changed xattrs. We skip processing in the 6229 * cur_inode_new_gen case because changed_inode did already initiate processing 6230 * of xattrs. The reason is the same as in changed_ref 6231 */ 6232 static int changed_xattr(struct send_ctx *sctx, 6233 enum btrfs_compare_tree_result result) 6234 { 6235 int ret = 0; 6236 6237 if (sctx->cur_ino != sctx->cmp_key->objectid) { 6238 inconsistent_snapshot_error(sctx, result, "xattr"); 6239 return -EIO; 6240 } 6241 6242 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 6243 if (result == BTRFS_COMPARE_TREE_NEW) 6244 ret = process_new_xattr(sctx); 6245 else if (result == BTRFS_COMPARE_TREE_DELETED) 6246 ret = process_deleted_xattr(sctx); 6247 else if (result == BTRFS_COMPARE_TREE_CHANGED) 6248 ret = process_changed_xattr(sctx); 6249 } 6250 6251 return ret; 6252 } 6253 6254 /* 6255 * Process new/deleted/changed extents. We skip processing in the 6256 * cur_inode_new_gen case because changed_inode did already initiate processing 6257 * of extents. The reason is the same as in changed_ref 6258 */ 6259 static int changed_extent(struct send_ctx *sctx, 6260 enum btrfs_compare_tree_result result) 6261 { 6262 int ret = 0; 6263 6264 if (sctx->cur_ino != sctx->cmp_key->objectid) { 6265 6266 if (result == BTRFS_COMPARE_TREE_CHANGED) { 6267 struct extent_buffer *leaf_l; 6268 struct extent_buffer *leaf_r; 6269 struct btrfs_file_extent_item *ei_l; 6270 struct btrfs_file_extent_item *ei_r; 6271 6272 leaf_l = sctx->left_path->nodes[0]; 6273 leaf_r = sctx->right_path->nodes[0]; 6274 ei_l = btrfs_item_ptr(leaf_l, 6275 sctx->left_path->slots[0], 6276 struct btrfs_file_extent_item); 6277 ei_r = btrfs_item_ptr(leaf_r, 6278 sctx->right_path->slots[0], 6279 struct btrfs_file_extent_item); 6280 6281 /* 6282 * We may have found an extent item that has changed 6283 * only its disk_bytenr field and the corresponding 6284 * inode item was not updated. This case happens due to 6285 * very specific timings during relocation when a leaf 6286 * that contains file extent items is COWed while 6287 * relocation is ongoing and its in the stage where it 6288 * updates data pointers. So when this happens we can 6289 * safely ignore it since we know it's the same extent, 6290 * but just at different logical and physical locations 6291 * (when an extent is fully replaced with a new one, we 6292 * know the generation number must have changed too, 6293 * since snapshot creation implies committing the current 6294 * transaction, and the inode item must have been updated 6295 * as well). 6296 * This replacement of the disk_bytenr happens at 6297 * relocation.c:replace_file_extents() through 6298 * relocation.c:btrfs_reloc_cow_block(). 6299 */ 6300 if (btrfs_file_extent_generation(leaf_l, ei_l) == 6301 btrfs_file_extent_generation(leaf_r, ei_r) && 6302 btrfs_file_extent_ram_bytes(leaf_l, ei_l) == 6303 btrfs_file_extent_ram_bytes(leaf_r, ei_r) && 6304 btrfs_file_extent_compression(leaf_l, ei_l) == 6305 btrfs_file_extent_compression(leaf_r, ei_r) && 6306 btrfs_file_extent_encryption(leaf_l, ei_l) == 6307 btrfs_file_extent_encryption(leaf_r, ei_r) && 6308 btrfs_file_extent_other_encoding(leaf_l, ei_l) == 6309 btrfs_file_extent_other_encoding(leaf_r, ei_r) && 6310 btrfs_file_extent_type(leaf_l, ei_l) == 6311 btrfs_file_extent_type(leaf_r, ei_r) && 6312 btrfs_file_extent_disk_bytenr(leaf_l, ei_l) != 6313 btrfs_file_extent_disk_bytenr(leaf_r, ei_r) && 6314 btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) == 6315 btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) && 6316 btrfs_file_extent_offset(leaf_l, ei_l) == 6317 btrfs_file_extent_offset(leaf_r, ei_r) && 6318 btrfs_file_extent_num_bytes(leaf_l, ei_l) == 6319 btrfs_file_extent_num_bytes(leaf_r, ei_r)) 6320 return 0; 6321 } 6322 6323 inconsistent_snapshot_error(sctx, result, "extent"); 6324 return -EIO; 6325 } 6326 6327 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 6328 if (result != BTRFS_COMPARE_TREE_DELETED) 6329 ret = process_extent(sctx, sctx->left_path, 6330 sctx->cmp_key); 6331 } 6332 6333 return ret; 6334 } 6335 6336 static int dir_changed(struct send_ctx *sctx, u64 dir) 6337 { 6338 u64 orig_gen, new_gen; 6339 int ret; 6340 6341 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL, 6342 NULL, NULL); 6343 if (ret) 6344 return ret; 6345 6346 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL, 6347 NULL, NULL, NULL); 6348 if (ret) 6349 return ret; 6350 6351 return (orig_gen != new_gen) ? 1 : 0; 6352 } 6353 6354 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path, 6355 struct btrfs_key *key) 6356 { 6357 struct btrfs_inode_extref *extref; 6358 struct extent_buffer *leaf; 6359 u64 dirid = 0, last_dirid = 0; 6360 unsigned long ptr; 6361 u32 item_size; 6362 u32 cur_offset = 0; 6363 int ref_name_len; 6364 int ret = 0; 6365 6366 /* Easy case, just check this one dirid */ 6367 if (key->type == BTRFS_INODE_REF_KEY) { 6368 dirid = key->offset; 6369 6370 ret = dir_changed(sctx, dirid); 6371 goto out; 6372 } 6373 6374 leaf = path->nodes[0]; 6375 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 6376 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 6377 while (cur_offset < item_size) { 6378 extref = (struct btrfs_inode_extref *)(ptr + 6379 cur_offset); 6380 dirid = btrfs_inode_extref_parent(leaf, extref); 6381 ref_name_len = btrfs_inode_extref_name_len(leaf, extref); 6382 cur_offset += ref_name_len + sizeof(*extref); 6383 if (dirid == last_dirid) 6384 continue; 6385 ret = dir_changed(sctx, dirid); 6386 if (ret) 6387 break; 6388 last_dirid = dirid; 6389 } 6390 out: 6391 return ret; 6392 } 6393 6394 /* 6395 * Updates compare related fields in sctx and simply forwards to the actual 6396 * changed_xxx functions. 6397 */ 6398 static int changed_cb(struct btrfs_path *left_path, 6399 struct btrfs_path *right_path, 6400 struct btrfs_key *key, 6401 enum btrfs_compare_tree_result result, 6402 void *ctx) 6403 { 6404 int ret = 0; 6405 struct send_ctx *sctx = ctx; 6406 6407 if (result == BTRFS_COMPARE_TREE_SAME) { 6408 if (key->type == BTRFS_INODE_REF_KEY || 6409 key->type == BTRFS_INODE_EXTREF_KEY) { 6410 ret = compare_refs(sctx, left_path, key); 6411 if (!ret) 6412 return 0; 6413 if (ret < 0) 6414 return ret; 6415 } else if (key->type == BTRFS_EXTENT_DATA_KEY) { 6416 return maybe_send_hole(sctx, left_path, key); 6417 } else { 6418 return 0; 6419 } 6420 result = BTRFS_COMPARE_TREE_CHANGED; 6421 ret = 0; 6422 } 6423 6424 sctx->left_path = left_path; 6425 sctx->right_path = right_path; 6426 sctx->cmp_key = key; 6427 6428 ret = finish_inode_if_needed(sctx, 0); 6429 if (ret < 0) 6430 goto out; 6431 6432 /* Ignore non-FS objects */ 6433 if (key->objectid == BTRFS_FREE_INO_OBJECTID || 6434 key->objectid == BTRFS_FREE_SPACE_OBJECTID) 6435 goto out; 6436 6437 if (key->type == BTRFS_INODE_ITEM_KEY) { 6438 ret = changed_inode(sctx, result); 6439 } else if (!sctx->ignore_cur_inode) { 6440 if (key->type == BTRFS_INODE_REF_KEY || 6441 key->type == BTRFS_INODE_EXTREF_KEY) 6442 ret = changed_ref(sctx, result); 6443 else if (key->type == BTRFS_XATTR_ITEM_KEY) 6444 ret = changed_xattr(sctx, result); 6445 else if (key->type == BTRFS_EXTENT_DATA_KEY) 6446 ret = changed_extent(sctx, result); 6447 } 6448 6449 out: 6450 return ret; 6451 } 6452 6453 static int full_send_tree(struct send_ctx *sctx) 6454 { 6455 int ret; 6456 struct btrfs_root *send_root = sctx->send_root; 6457 struct btrfs_key key; 6458 struct btrfs_path *path; 6459 struct extent_buffer *eb; 6460 int slot; 6461 6462 path = alloc_path_for_send(); 6463 if (!path) 6464 return -ENOMEM; 6465 6466 key.objectid = BTRFS_FIRST_FREE_OBJECTID; 6467 key.type = BTRFS_INODE_ITEM_KEY; 6468 key.offset = 0; 6469 6470 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); 6471 if (ret < 0) 6472 goto out; 6473 if (ret) 6474 goto out_finish; 6475 6476 while (1) { 6477 eb = path->nodes[0]; 6478 slot = path->slots[0]; 6479 btrfs_item_key_to_cpu(eb, &key, slot); 6480 6481 ret = changed_cb(path, NULL, &key, 6482 BTRFS_COMPARE_TREE_NEW, sctx); 6483 if (ret < 0) 6484 goto out; 6485 6486 ret = btrfs_next_item(send_root, path); 6487 if (ret < 0) 6488 goto out; 6489 if (ret) { 6490 ret = 0; 6491 break; 6492 } 6493 } 6494 6495 out_finish: 6496 ret = finish_inode_if_needed(sctx, 1); 6497 6498 out: 6499 btrfs_free_path(path); 6500 return ret; 6501 } 6502 6503 static int send_subvol(struct send_ctx *sctx) 6504 { 6505 int ret; 6506 6507 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) { 6508 ret = send_header(sctx); 6509 if (ret < 0) 6510 goto out; 6511 } 6512 6513 ret = send_subvol_begin(sctx); 6514 if (ret < 0) 6515 goto out; 6516 6517 if (sctx->parent_root) { 6518 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, 6519 changed_cb, sctx); 6520 if (ret < 0) 6521 goto out; 6522 ret = finish_inode_if_needed(sctx, 1); 6523 if (ret < 0) 6524 goto out; 6525 } else { 6526 ret = full_send_tree(sctx); 6527 if (ret < 0) 6528 goto out; 6529 } 6530 6531 out: 6532 free_recorded_refs(sctx); 6533 return ret; 6534 } 6535 6536 /* 6537 * If orphan cleanup did remove any orphans from a root, it means the tree 6538 * was modified and therefore the commit root is not the same as the current 6539 * root anymore. This is a problem, because send uses the commit root and 6540 * therefore can see inode items that don't exist in the current root anymore, 6541 * and for example make calls to btrfs_iget, which will do tree lookups based 6542 * on the current root and not on the commit root. Those lookups will fail, 6543 * returning a -ESTALE error, and making send fail with that error. So make 6544 * sure a send does not see any orphans we have just removed, and that it will 6545 * see the same inodes regardless of whether a transaction commit happened 6546 * before it started (meaning that the commit root will be the same as the 6547 * current root) or not. 6548 */ 6549 static int ensure_commit_roots_uptodate(struct send_ctx *sctx) 6550 { 6551 int i; 6552 struct btrfs_trans_handle *trans = NULL; 6553 6554 again: 6555 if (sctx->parent_root && 6556 sctx->parent_root->node != sctx->parent_root->commit_root) 6557 goto commit_trans; 6558 6559 for (i = 0; i < sctx->clone_roots_cnt; i++) 6560 if (sctx->clone_roots[i].root->node != 6561 sctx->clone_roots[i].root->commit_root) 6562 goto commit_trans; 6563 6564 if (trans) 6565 return btrfs_end_transaction(trans); 6566 6567 return 0; 6568 6569 commit_trans: 6570 /* Use any root, all fs roots will get their commit roots updated. */ 6571 if (!trans) { 6572 trans = btrfs_join_transaction(sctx->send_root); 6573 if (IS_ERR(trans)) 6574 return PTR_ERR(trans); 6575 goto again; 6576 } 6577 6578 return btrfs_commit_transaction(trans); 6579 } 6580 6581 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root) 6582 { 6583 spin_lock(&root->root_item_lock); 6584 root->send_in_progress--; 6585 /* 6586 * Not much left to do, we don't know why it's unbalanced and 6587 * can't blindly reset it to 0. 6588 */ 6589 if (root->send_in_progress < 0) 6590 btrfs_err(root->fs_info, 6591 "send_in_progress unbalanced %d root %llu", 6592 root->send_in_progress, root->root_key.objectid); 6593 spin_unlock(&root->root_item_lock); 6594 } 6595 6596 long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) 6597 { 6598 int ret = 0; 6599 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root; 6600 struct btrfs_fs_info *fs_info = send_root->fs_info; 6601 struct btrfs_root *clone_root; 6602 struct btrfs_key key; 6603 struct send_ctx *sctx = NULL; 6604 u32 i; 6605 u64 *clone_sources_tmp = NULL; 6606 int clone_sources_to_rollback = 0; 6607 unsigned alloc_size; 6608 int sort_clone_roots = 0; 6609 int index; 6610 6611 if (!capable(CAP_SYS_ADMIN)) 6612 return -EPERM; 6613 6614 /* 6615 * The subvolume must remain read-only during send, protect against 6616 * making it RW. This also protects against deletion. 6617 */ 6618 spin_lock(&send_root->root_item_lock); 6619 send_root->send_in_progress++; 6620 spin_unlock(&send_root->root_item_lock); 6621 6622 /* 6623 * This is done when we lookup the root, it should already be complete 6624 * by the time we get here. 6625 */ 6626 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE); 6627 6628 /* 6629 * Userspace tools do the checks and warn the user if it's 6630 * not RO. 6631 */ 6632 if (!btrfs_root_readonly(send_root)) { 6633 ret = -EPERM; 6634 goto out; 6635 } 6636 6637 /* 6638 * Check that we don't overflow at later allocations, we request 6639 * clone_sources_count + 1 items, and compare to unsigned long inside 6640 * access_ok. 6641 */ 6642 if (arg->clone_sources_count > 6643 ULONG_MAX / sizeof(struct clone_root) - 1) { 6644 ret = -EINVAL; 6645 goto out; 6646 } 6647 6648 if (!access_ok(VERIFY_READ, arg->clone_sources, 6649 sizeof(*arg->clone_sources) * 6650 arg->clone_sources_count)) { 6651 ret = -EFAULT; 6652 goto out; 6653 } 6654 6655 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) { 6656 ret = -EINVAL; 6657 goto out; 6658 } 6659 6660 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL); 6661 if (!sctx) { 6662 ret = -ENOMEM; 6663 goto out; 6664 } 6665 6666 INIT_LIST_HEAD(&sctx->new_refs); 6667 INIT_LIST_HEAD(&sctx->deleted_refs); 6668 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL); 6669 INIT_LIST_HEAD(&sctx->name_cache_list); 6670 6671 sctx->flags = arg->flags; 6672 6673 sctx->send_filp = fget(arg->send_fd); 6674 if (!sctx->send_filp) { 6675 ret = -EBADF; 6676 goto out; 6677 } 6678 6679 sctx->send_root = send_root; 6680 /* 6681 * Unlikely but possible, if the subvolume is marked for deletion but 6682 * is slow to remove the directory entry, send can still be started 6683 */ 6684 if (btrfs_root_dead(sctx->send_root)) { 6685 ret = -EPERM; 6686 goto out; 6687 } 6688 6689 sctx->clone_roots_cnt = arg->clone_sources_count; 6690 6691 sctx->send_max_size = BTRFS_SEND_BUF_SIZE; 6692 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL); 6693 if (!sctx->send_buf) { 6694 ret = -ENOMEM; 6695 goto out; 6696 } 6697 6698 sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL); 6699 if (!sctx->read_buf) { 6700 ret = -ENOMEM; 6701 goto out; 6702 } 6703 6704 sctx->pending_dir_moves = RB_ROOT; 6705 sctx->waiting_dir_moves = RB_ROOT; 6706 sctx->orphan_dirs = RB_ROOT; 6707 6708 alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1); 6709 6710 sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL); 6711 if (!sctx->clone_roots) { 6712 ret = -ENOMEM; 6713 goto out; 6714 } 6715 6716 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources); 6717 6718 if (arg->clone_sources_count) { 6719 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL); 6720 if (!clone_sources_tmp) { 6721 ret = -ENOMEM; 6722 goto out; 6723 } 6724 6725 ret = copy_from_user(clone_sources_tmp, arg->clone_sources, 6726 alloc_size); 6727 if (ret) { 6728 ret = -EFAULT; 6729 goto out; 6730 } 6731 6732 for (i = 0; i < arg->clone_sources_count; i++) { 6733 key.objectid = clone_sources_tmp[i]; 6734 key.type = BTRFS_ROOT_ITEM_KEY; 6735 key.offset = (u64)-1; 6736 6737 index = srcu_read_lock(&fs_info->subvol_srcu); 6738 6739 clone_root = btrfs_read_fs_root_no_name(fs_info, &key); 6740 if (IS_ERR(clone_root)) { 6741 srcu_read_unlock(&fs_info->subvol_srcu, index); 6742 ret = PTR_ERR(clone_root); 6743 goto out; 6744 } 6745 spin_lock(&clone_root->root_item_lock); 6746 if (!btrfs_root_readonly(clone_root) || 6747 btrfs_root_dead(clone_root)) { 6748 spin_unlock(&clone_root->root_item_lock); 6749 srcu_read_unlock(&fs_info->subvol_srcu, index); 6750 ret = -EPERM; 6751 goto out; 6752 } 6753 clone_root->send_in_progress++; 6754 spin_unlock(&clone_root->root_item_lock); 6755 srcu_read_unlock(&fs_info->subvol_srcu, index); 6756 6757 sctx->clone_roots[i].root = clone_root; 6758 clone_sources_to_rollback = i + 1; 6759 } 6760 kvfree(clone_sources_tmp); 6761 clone_sources_tmp = NULL; 6762 } 6763 6764 if (arg->parent_root) { 6765 key.objectid = arg->parent_root; 6766 key.type = BTRFS_ROOT_ITEM_KEY; 6767 key.offset = (u64)-1; 6768 6769 index = srcu_read_lock(&fs_info->subvol_srcu); 6770 6771 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key); 6772 if (IS_ERR(sctx->parent_root)) { 6773 srcu_read_unlock(&fs_info->subvol_srcu, index); 6774 ret = PTR_ERR(sctx->parent_root); 6775 goto out; 6776 } 6777 6778 spin_lock(&sctx->parent_root->root_item_lock); 6779 sctx->parent_root->send_in_progress++; 6780 if (!btrfs_root_readonly(sctx->parent_root) || 6781 btrfs_root_dead(sctx->parent_root)) { 6782 spin_unlock(&sctx->parent_root->root_item_lock); 6783 srcu_read_unlock(&fs_info->subvol_srcu, index); 6784 ret = -EPERM; 6785 goto out; 6786 } 6787 spin_unlock(&sctx->parent_root->root_item_lock); 6788 6789 srcu_read_unlock(&fs_info->subvol_srcu, index); 6790 } 6791 6792 /* 6793 * Clones from send_root are allowed, but only if the clone source 6794 * is behind the current send position. This is checked while searching 6795 * for possible clone sources. 6796 */ 6797 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root; 6798 6799 /* We do a bsearch later */ 6800 sort(sctx->clone_roots, sctx->clone_roots_cnt, 6801 sizeof(*sctx->clone_roots), __clone_root_cmp_sort, 6802 NULL); 6803 sort_clone_roots = 1; 6804 6805 ret = ensure_commit_roots_uptodate(sctx); 6806 if (ret) 6807 goto out; 6808 6809 current->journal_info = BTRFS_SEND_TRANS_STUB; 6810 ret = send_subvol(sctx); 6811 current->journal_info = NULL; 6812 if (ret < 0) 6813 goto out; 6814 6815 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) { 6816 ret = begin_cmd(sctx, BTRFS_SEND_C_END); 6817 if (ret < 0) 6818 goto out; 6819 ret = send_cmd(sctx); 6820 if (ret < 0) 6821 goto out; 6822 } 6823 6824 out: 6825 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)); 6826 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) { 6827 struct rb_node *n; 6828 struct pending_dir_move *pm; 6829 6830 n = rb_first(&sctx->pending_dir_moves); 6831 pm = rb_entry(n, struct pending_dir_move, node); 6832 while (!list_empty(&pm->list)) { 6833 struct pending_dir_move *pm2; 6834 6835 pm2 = list_first_entry(&pm->list, 6836 struct pending_dir_move, list); 6837 free_pending_move(sctx, pm2); 6838 } 6839 free_pending_move(sctx, pm); 6840 } 6841 6842 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)); 6843 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) { 6844 struct rb_node *n; 6845 struct waiting_dir_move *dm; 6846 6847 n = rb_first(&sctx->waiting_dir_moves); 6848 dm = rb_entry(n, struct waiting_dir_move, node); 6849 rb_erase(&dm->node, &sctx->waiting_dir_moves); 6850 kfree(dm); 6851 } 6852 6853 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs)); 6854 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) { 6855 struct rb_node *n; 6856 struct orphan_dir_info *odi; 6857 6858 n = rb_first(&sctx->orphan_dirs); 6859 odi = rb_entry(n, struct orphan_dir_info, node); 6860 free_orphan_dir_info(sctx, odi); 6861 } 6862 6863 if (sort_clone_roots) { 6864 for (i = 0; i < sctx->clone_roots_cnt; i++) 6865 btrfs_root_dec_send_in_progress( 6866 sctx->clone_roots[i].root); 6867 } else { 6868 for (i = 0; sctx && i < clone_sources_to_rollback; i++) 6869 btrfs_root_dec_send_in_progress( 6870 sctx->clone_roots[i].root); 6871 6872 btrfs_root_dec_send_in_progress(send_root); 6873 } 6874 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) 6875 btrfs_root_dec_send_in_progress(sctx->parent_root); 6876 6877 kvfree(clone_sources_tmp); 6878 6879 if (sctx) { 6880 if (sctx->send_filp) 6881 fput(sctx->send_filp); 6882 6883 kvfree(sctx->clone_roots); 6884 kvfree(sctx->send_buf); 6885 kvfree(sctx->read_buf); 6886 6887 name_cache_free(sctx); 6888 6889 kfree(sctx); 6890 } 6891 6892 return ret; 6893 } 6894