1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2012 Alexander Block. All rights reserved. 4 */ 5 6 #include <linux/bsearch.h> 7 #include <linux/fs.h> 8 #include <linux/file.h> 9 #include <linux/sort.h> 10 #include <linux/mount.h> 11 #include <linux/xattr.h> 12 #include <linux/posix_acl_xattr.h> 13 #include <linux/radix-tree.h> 14 #include <linux/vmalloc.h> 15 #include <linux/string.h> 16 #include <linux/compat.h> 17 #include <linux/crc32c.h> 18 19 #include "send.h" 20 #include "backref.h" 21 #include "locking.h" 22 #include "disk-io.h" 23 #include "btrfs_inode.h" 24 #include "transaction.h" 25 #include "compression.h" 26 27 /* 28 * A fs_path is a helper to dynamically build path names with unknown size. 29 * It reallocates the internal buffer on demand. 30 * It allows fast adding of path elements on the right side (normal path) and 31 * fast adding to the left side (reversed path). A reversed path can also be 32 * unreversed if needed. 33 */ 34 struct fs_path { 35 union { 36 struct { 37 char *start; 38 char *end; 39 40 char *buf; 41 unsigned short buf_len:15; 42 unsigned short reversed:1; 43 char inline_buf[]; 44 }; 45 /* 46 * Average path length does not exceed 200 bytes, we'll have 47 * better packing in the slab and higher chance to satisfy 48 * a allocation later during send. 49 */ 50 char pad[256]; 51 }; 52 }; 53 #define FS_PATH_INLINE_SIZE \ 54 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf)) 55 56 57 /* reused for each extent */ 58 struct clone_root { 59 struct btrfs_root *root; 60 u64 ino; 61 u64 offset; 62 63 u64 found_refs; 64 }; 65 66 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128 67 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2) 68 69 struct send_ctx { 70 struct file *send_filp; 71 loff_t send_off; 72 char *send_buf; 73 u32 send_size; 74 u32 send_max_size; 75 u64 total_send_size; 76 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1]; 77 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */ 78 79 struct btrfs_root *send_root; 80 struct btrfs_root *parent_root; 81 struct clone_root *clone_roots; 82 int clone_roots_cnt; 83 84 /* current state of the compare_tree call */ 85 struct btrfs_path *left_path; 86 struct btrfs_path *right_path; 87 struct btrfs_key *cmp_key; 88 89 /* 90 * infos of the currently processed inode. In case of deleted inodes, 91 * these are the values from the deleted inode. 92 */ 93 u64 cur_ino; 94 u64 cur_inode_gen; 95 int cur_inode_new; 96 int cur_inode_new_gen; 97 int cur_inode_deleted; 98 u64 cur_inode_size; 99 u64 cur_inode_mode; 100 u64 cur_inode_rdev; 101 u64 cur_inode_last_extent; 102 u64 cur_inode_next_write_offset; 103 104 u64 send_progress; 105 106 struct list_head new_refs; 107 struct list_head deleted_refs; 108 109 struct radix_tree_root name_cache; 110 struct list_head name_cache_list; 111 int name_cache_size; 112 113 struct file_ra_state ra; 114 115 char *read_buf; 116 117 /* 118 * We process inodes by their increasing order, so if before an 119 * incremental send we reverse the parent/child relationship of 120 * directories such that a directory with a lower inode number was 121 * the parent of a directory with a higher inode number, and the one 122 * becoming the new parent got renamed too, we can't rename/move the 123 * directory with lower inode number when we finish processing it - we 124 * must process the directory with higher inode number first, then 125 * rename/move it and then rename/move the directory with lower inode 126 * number. Example follows. 127 * 128 * Tree state when the first send was performed: 129 * 130 * . 131 * |-- a (ino 257) 132 * |-- b (ino 258) 133 * | 134 * | 135 * |-- c (ino 259) 136 * | |-- d (ino 260) 137 * | 138 * |-- c2 (ino 261) 139 * 140 * Tree state when the second (incremental) send is performed: 141 * 142 * . 143 * |-- a (ino 257) 144 * |-- b (ino 258) 145 * |-- c2 (ino 261) 146 * |-- d2 (ino 260) 147 * |-- cc (ino 259) 148 * 149 * The sequence of steps that lead to the second state was: 150 * 151 * mv /a/b/c/d /a/b/c2/d2 152 * mv /a/b/c /a/b/c2/d2/cc 153 * 154 * "c" has lower inode number, but we can't move it (2nd mv operation) 155 * before we move "d", which has higher inode number. 156 * 157 * So we just memorize which move/rename operations must be performed 158 * later when their respective parent is processed and moved/renamed. 159 */ 160 161 /* Indexed by parent directory inode number. */ 162 struct rb_root pending_dir_moves; 163 164 /* 165 * Reverse index, indexed by the inode number of a directory that 166 * is waiting for the move/rename of its immediate parent before its 167 * own move/rename can be performed. 168 */ 169 struct rb_root waiting_dir_moves; 170 171 /* 172 * A directory that is going to be rm'ed might have a child directory 173 * which is in the pending directory moves index above. In this case, 174 * the directory can only be removed after the move/rename of its child 175 * is performed. Example: 176 * 177 * Parent snapshot: 178 * 179 * . (ino 256) 180 * |-- a/ (ino 257) 181 * |-- b/ (ino 258) 182 * |-- c/ (ino 259) 183 * | |-- x/ (ino 260) 184 * | 185 * |-- y/ (ino 261) 186 * 187 * Send snapshot: 188 * 189 * . (ino 256) 190 * |-- a/ (ino 257) 191 * |-- b/ (ino 258) 192 * |-- YY/ (ino 261) 193 * |-- x/ (ino 260) 194 * 195 * Sequence of steps that lead to the send snapshot: 196 * rm -f /a/b/c/foo.txt 197 * mv /a/b/y /a/b/YY 198 * mv /a/b/c/x /a/b/YY 199 * rmdir /a/b/c 200 * 201 * When the child is processed, its move/rename is delayed until its 202 * parent is processed (as explained above), but all other operations 203 * like update utimes, chown, chgrp, etc, are performed and the paths 204 * that it uses for those operations must use the orphanized name of 205 * its parent (the directory we're going to rm later), so we need to 206 * memorize that name. 207 * 208 * Indexed by the inode number of the directory to be deleted. 209 */ 210 struct rb_root orphan_dirs; 211 }; 212 213 struct pending_dir_move { 214 struct rb_node node; 215 struct list_head list; 216 u64 parent_ino; 217 u64 ino; 218 u64 gen; 219 struct list_head update_refs; 220 }; 221 222 struct waiting_dir_move { 223 struct rb_node node; 224 u64 ino; 225 /* 226 * There might be some directory that could not be removed because it 227 * was waiting for this directory inode to be moved first. Therefore 228 * after this directory is moved, we can try to rmdir the ino rmdir_ino. 229 */ 230 u64 rmdir_ino; 231 bool orphanized; 232 }; 233 234 struct orphan_dir_info { 235 struct rb_node node; 236 u64 ino; 237 u64 gen; 238 }; 239 240 struct name_cache_entry { 241 struct list_head list; 242 /* 243 * radix_tree has only 32bit entries but we need to handle 64bit inums. 244 * We use the lower 32bit of the 64bit inum to store it in the tree. If 245 * more then one inum would fall into the same entry, we use radix_list 246 * to store the additional entries. radix_list is also used to store 247 * entries where two entries have the same inum but different 248 * generations. 249 */ 250 struct list_head radix_list; 251 u64 ino; 252 u64 gen; 253 u64 parent_ino; 254 u64 parent_gen; 255 int ret; 256 int need_later_update; 257 int name_len; 258 char name[]; 259 }; 260 261 __cold 262 static void inconsistent_snapshot_error(struct send_ctx *sctx, 263 enum btrfs_compare_tree_result result, 264 const char *what) 265 { 266 const char *result_string; 267 268 switch (result) { 269 case BTRFS_COMPARE_TREE_NEW: 270 result_string = "new"; 271 break; 272 case BTRFS_COMPARE_TREE_DELETED: 273 result_string = "deleted"; 274 break; 275 case BTRFS_COMPARE_TREE_CHANGED: 276 result_string = "updated"; 277 break; 278 case BTRFS_COMPARE_TREE_SAME: 279 ASSERT(0); 280 result_string = "unchanged"; 281 break; 282 default: 283 ASSERT(0); 284 result_string = "unexpected"; 285 } 286 287 btrfs_err(sctx->send_root->fs_info, 288 "Send: inconsistent snapshot, found %s %s for inode %llu without updated inode item, send root is %llu, parent root is %llu", 289 result_string, what, sctx->cmp_key->objectid, 290 sctx->send_root->root_key.objectid, 291 (sctx->parent_root ? 292 sctx->parent_root->root_key.objectid : 0)); 293 } 294 295 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); 296 297 static struct waiting_dir_move * 298 get_waiting_dir_move(struct send_ctx *sctx, u64 ino); 299 300 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino); 301 302 static int need_send_hole(struct send_ctx *sctx) 303 { 304 return (sctx->parent_root && !sctx->cur_inode_new && 305 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted && 306 S_ISREG(sctx->cur_inode_mode)); 307 } 308 309 static void fs_path_reset(struct fs_path *p) 310 { 311 if (p->reversed) { 312 p->start = p->buf + p->buf_len - 1; 313 p->end = p->start; 314 *p->start = 0; 315 } else { 316 p->start = p->buf; 317 p->end = p->start; 318 *p->start = 0; 319 } 320 } 321 322 static struct fs_path *fs_path_alloc(void) 323 { 324 struct fs_path *p; 325 326 p = kmalloc(sizeof(*p), GFP_KERNEL); 327 if (!p) 328 return NULL; 329 p->reversed = 0; 330 p->buf = p->inline_buf; 331 p->buf_len = FS_PATH_INLINE_SIZE; 332 fs_path_reset(p); 333 return p; 334 } 335 336 static struct fs_path *fs_path_alloc_reversed(void) 337 { 338 struct fs_path *p; 339 340 p = fs_path_alloc(); 341 if (!p) 342 return NULL; 343 p->reversed = 1; 344 fs_path_reset(p); 345 return p; 346 } 347 348 static void fs_path_free(struct fs_path *p) 349 { 350 if (!p) 351 return; 352 if (p->buf != p->inline_buf) 353 kfree(p->buf); 354 kfree(p); 355 } 356 357 static int fs_path_len(struct fs_path *p) 358 { 359 return p->end - p->start; 360 } 361 362 static int fs_path_ensure_buf(struct fs_path *p, int len) 363 { 364 char *tmp_buf; 365 int path_len; 366 int old_buf_len; 367 368 len++; 369 370 if (p->buf_len >= len) 371 return 0; 372 373 if (len > PATH_MAX) { 374 WARN_ON(1); 375 return -ENOMEM; 376 } 377 378 path_len = p->end - p->start; 379 old_buf_len = p->buf_len; 380 381 /* 382 * First time the inline_buf does not suffice 383 */ 384 if (p->buf == p->inline_buf) { 385 tmp_buf = kmalloc(len, GFP_KERNEL); 386 if (tmp_buf) 387 memcpy(tmp_buf, p->buf, old_buf_len); 388 } else { 389 tmp_buf = krealloc(p->buf, len, GFP_KERNEL); 390 } 391 if (!tmp_buf) 392 return -ENOMEM; 393 p->buf = tmp_buf; 394 /* 395 * The real size of the buffer is bigger, this will let the fast path 396 * happen most of the time 397 */ 398 p->buf_len = ksize(p->buf); 399 400 if (p->reversed) { 401 tmp_buf = p->buf + old_buf_len - path_len - 1; 402 p->end = p->buf + p->buf_len - 1; 403 p->start = p->end - path_len; 404 memmove(p->start, tmp_buf, path_len + 1); 405 } else { 406 p->start = p->buf; 407 p->end = p->start + path_len; 408 } 409 return 0; 410 } 411 412 static int fs_path_prepare_for_add(struct fs_path *p, int name_len, 413 char **prepared) 414 { 415 int ret; 416 int new_len; 417 418 new_len = p->end - p->start + name_len; 419 if (p->start != p->end) 420 new_len++; 421 ret = fs_path_ensure_buf(p, new_len); 422 if (ret < 0) 423 goto out; 424 425 if (p->reversed) { 426 if (p->start != p->end) 427 *--p->start = '/'; 428 p->start -= name_len; 429 *prepared = p->start; 430 } else { 431 if (p->start != p->end) 432 *p->end++ = '/'; 433 *prepared = p->end; 434 p->end += name_len; 435 *p->end = 0; 436 } 437 438 out: 439 return ret; 440 } 441 442 static int fs_path_add(struct fs_path *p, const char *name, int name_len) 443 { 444 int ret; 445 char *prepared; 446 447 ret = fs_path_prepare_for_add(p, name_len, &prepared); 448 if (ret < 0) 449 goto out; 450 memcpy(prepared, name, name_len); 451 452 out: 453 return ret; 454 } 455 456 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) 457 { 458 int ret; 459 char *prepared; 460 461 ret = fs_path_prepare_for_add(p, p2->end - p2->start, &prepared); 462 if (ret < 0) 463 goto out; 464 memcpy(prepared, p2->start, p2->end - p2->start); 465 466 out: 467 return ret; 468 } 469 470 static int fs_path_add_from_extent_buffer(struct fs_path *p, 471 struct extent_buffer *eb, 472 unsigned long off, int len) 473 { 474 int ret; 475 char *prepared; 476 477 ret = fs_path_prepare_for_add(p, len, &prepared); 478 if (ret < 0) 479 goto out; 480 481 read_extent_buffer(eb, prepared, off, len); 482 483 out: 484 return ret; 485 } 486 487 static int fs_path_copy(struct fs_path *p, struct fs_path *from) 488 { 489 int ret; 490 491 p->reversed = from->reversed; 492 fs_path_reset(p); 493 494 ret = fs_path_add_path(p, from); 495 496 return ret; 497 } 498 499 500 static void fs_path_unreverse(struct fs_path *p) 501 { 502 char *tmp; 503 int len; 504 505 if (!p->reversed) 506 return; 507 508 tmp = p->start; 509 len = p->end - p->start; 510 p->start = p->buf; 511 p->end = p->start + len; 512 memmove(p->start, tmp, len + 1); 513 p->reversed = 0; 514 } 515 516 static struct btrfs_path *alloc_path_for_send(void) 517 { 518 struct btrfs_path *path; 519 520 path = btrfs_alloc_path(); 521 if (!path) 522 return NULL; 523 path->search_commit_root = 1; 524 path->skip_locking = 1; 525 path->need_commit_sem = 1; 526 return path; 527 } 528 529 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off) 530 { 531 int ret; 532 u32 pos = 0; 533 534 while (pos < len) { 535 ret = kernel_write(filp, buf + pos, len - pos, off); 536 /* TODO handle that correctly */ 537 /*if (ret == -ERESTARTSYS) { 538 continue; 539 }*/ 540 if (ret < 0) 541 return ret; 542 if (ret == 0) { 543 return -EIO; 544 } 545 pos += ret; 546 } 547 548 return 0; 549 } 550 551 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len) 552 { 553 struct btrfs_tlv_header *hdr; 554 int total_len = sizeof(*hdr) + len; 555 int left = sctx->send_max_size - sctx->send_size; 556 557 if (unlikely(left < total_len)) 558 return -EOVERFLOW; 559 560 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size); 561 hdr->tlv_type = cpu_to_le16(attr); 562 hdr->tlv_len = cpu_to_le16(len); 563 memcpy(hdr + 1, data, len); 564 sctx->send_size += total_len; 565 566 return 0; 567 } 568 569 #define TLV_PUT_DEFINE_INT(bits) \ 570 static int tlv_put_u##bits(struct send_ctx *sctx, \ 571 u##bits attr, u##bits value) \ 572 { \ 573 __le##bits __tmp = cpu_to_le##bits(value); \ 574 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \ 575 } 576 577 TLV_PUT_DEFINE_INT(64) 578 579 static int tlv_put_string(struct send_ctx *sctx, u16 attr, 580 const char *str, int len) 581 { 582 if (len == -1) 583 len = strlen(str); 584 return tlv_put(sctx, attr, str, len); 585 } 586 587 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr, 588 const u8 *uuid) 589 { 590 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE); 591 } 592 593 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, 594 struct extent_buffer *eb, 595 struct btrfs_timespec *ts) 596 { 597 struct btrfs_timespec bts; 598 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); 599 return tlv_put(sctx, attr, &bts, sizeof(bts)); 600 } 601 602 603 #define TLV_PUT(sctx, attrtype, data, attrlen) \ 604 do { \ 605 ret = tlv_put(sctx, attrtype, data, attrlen); \ 606 if (ret < 0) \ 607 goto tlv_put_failure; \ 608 } while (0) 609 610 #define TLV_PUT_INT(sctx, attrtype, bits, value) \ 611 do { \ 612 ret = tlv_put_u##bits(sctx, attrtype, value); \ 613 if (ret < 0) \ 614 goto tlv_put_failure; \ 615 } while (0) 616 617 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data) 618 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data) 619 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data) 620 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data) 621 #define TLV_PUT_STRING(sctx, attrtype, str, len) \ 622 do { \ 623 ret = tlv_put_string(sctx, attrtype, str, len); \ 624 if (ret < 0) \ 625 goto tlv_put_failure; \ 626 } while (0) 627 #define TLV_PUT_PATH(sctx, attrtype, p) \ 628 do { \ 629 ret = tlv_put_string(sctx, attrtype, p->start, \ 630 p->end - p->start); \ 631 if (ret < 0) \ 632 goto tlv_put_failure; \ 633 } while(0) 634 #define TLV_PUT_UUID(sctx, attrtype, uuid) \ 635 do { \ 636 ret = tlv_put_uuid(sctx, attrtype, uuid); \ 637 if (ret < 0) \ 638 goto tlv_put_failure; \ 639 } while (0) 640 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \ 641 do { \ 642 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \ 643 if (ret < 0) \ 644 goto tlv_put_failure; \ 645 } while (0) 646 647 static int send_header(struct send_ctx *sctx) 648 { 649 struct btrfs_stream_header hdr; 650 651 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); 652 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION); 653 654 return write_buf(sctx->send_filp, &hdr, sizeof(hdr), 655 &sctx->send_off); 656 } 657 658 /* 659 * For each command/item we want to send to userspace, we call this function. 660 */ 661 static int begin_cmd(struct send_ctx *sctx, int cmd) 662 { 663 struct btrfs_cmd_header *hdr; 664 665 if (WARN_ON(!sctx->send_buf)) 666 return -EINVAL; 667 668 BUG_ON(sctx->send_size); 669 670 sctx->send_size += sizeof(*hdr); 671 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 672 hdr->cmd = cpu_to_le16(cmd); 673 674 return 0; 675 } 676 677 static int send_cmd(struct send_ctx *sctx) 678 { 679 int ret; 680 struct btrfs_cmd_header *hdr; 681 u32 crc; 682 683 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 684 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr)); 685 hdr->crc = 0; 686 687 crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); 688 hdr->crc = cpu_to_le32(crc); 689 690 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, 691 &sctx->send_off); 692 693 sctx->total_send_size += sctx->send_size; 694 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size; 695 sctx->send_size = 0; 696 697 return ret; 698 } 699 700 /* 701 * Sends a move instruction to user space 702 */ 703 static int send_rename(struct send_ctx *sctx, 704 struct fs_path *from, struct fs_path *to) 705 { 706 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 707 int ret; 708 709 btrfs_debug(fs_info, "send_rename %s -> %s", from->start, to->start); 710 711 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); 712 if (ret < 0) 713 goto out; 714 715 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from); 716 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to); 717 718 ret = send_cmd(sctx); 719 720 tlv_put_failure: 721 out: 722 return ret; 723 } 724 725 /* 726 * Sends a link instruction to user space 727 */ 728 static int send_link(struct send_ctx *sctx, 729 struct fs_path *path, struct fs_path *lnk) 730 { 731 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 732 int ret; 733 734 btrfs_debug(fs_info, "send_link %s -> %s", path->start, lnk->start); 735 736 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); 737 if (ret < 0) 738 goto out; 739 740 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 741 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk); 742 743 ret = send_cmd(sctx); 744 745 tlv_put_failure: 746 out: 747 return ret; 748 } 749 750 /* 751 * Sends an unlink instruction to user space 752 */ 753 static int send_unlink(struct send_ctx *sctx, struct fs_path *path) 754 { 755 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 756 int ret; 757 758 btrfs_debug(fs_info, "send_unlink %s", path->start); 759 760 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); 761 if (ret < 0) 762 goto out; 763 764 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 765 766 ret = send_cmd(sctx); 767 768 tlv_put_failure: 769 out: 770 return ret; 771 } 772 773 /* 774 * Sends a rmdir instruction to user space 775 */ 776 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) 777 { 778 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 779 int ret; 780 781 btrfs_debug(fs_info, "send_rmdir %s", path->start); 782 783 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); 784 if (ret < 0) 785 goto out; 786 787 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 788 789 ret = send_cmd(sctx); 790 791 tlv_put_failure: 792 out: 793 return ret; 794 } 795 796 /* 797 * Helper function to retrieve some fields from an inode item. 798 */ 799 static int __get_inode_info(struct btrfs_root *root, struct btrfs_path *path, 800 u64 ino, u64 *size, u64 *gen, u64 *mode, u64 *uid, 801 u64 *gid, u64 *rdev) 802 { 803 int ret; 804 struct btrfs_inode_item *ii; 805 struct btrfs_key key; 806 807 key.objectid = ino; 808 key.type = BTRFS_INODE_ITEM_KEY; 809 key.offset = 0; 810 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 811 if (ret) { 812 if (ret > 0) 813 ret = -ENOENT; 814 return ret; 815 } 816 817 ii = btrfs_item_ptr(path->nodes[0], path->slots[0], 818 struct btrfs_inode_item); 819 if (size) 820 *size = btrfs_inode_size(path->nodes[0], ii); 821 if (gen) 822 *gen = btrfs_inode_generation(path->nodes[0], ii); 823 if (mode) 824 *mode = btrfs_inode_mode(path->nodes[0], ii); 825 if (uid) 826 *uid = btrfs_inode_uid(path->nodes[0], ii); 827 if (gid) 828 *gid = btrfs_inode_gid(path->nodes[0], ii); 829 if (rdev) 830 *rdev = btrfs_inode_rdev(path->nodes[0], ii); 831 832 return ret; 833 } 834 835 static int get_inode_info(struct btrfs_root *root, 836 u64 ino, u64 *size, u64 *gen, 837 u64 *mode, u64 *uid, u64 *gid, 838 u64 *rdev) 839 { 840 struct btrfs_path *path; 841 int ret; 842 843 path = alloc_path_for_send(); 844 if (!path) 845 return -ENOMEM; 846 ret = __get_inode_info(root, path, ino, size, gen, mode, uid, gid, 847 rdev); 848 btrfs_free_path(path); 849 return ret; 850 } 851 852 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index, 853 struct fs_path *p, 854 void *ctx); 855 856 /* 857 * Helper function to iterate the entries in ONE btrfs_inode_ref or 858 * btrfs_inode_extref. 859 * The iterate callback may return a non zero value to stop iteration. This can 860 * be a negative value for error codes or 1 to simply stop it. 861 * 862 * path must point to the INODE_REF or INODE_EXTREF when called. 863 */ 864 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path, 865 struct btrfs_key *found_key, int resolve, 866 iterate_inode_ref_t iterate, void *ctx) 867 { 868 struct extent_buffer *eb = path->nodes[0]; 869 struct btrfs_item *item; 870 struct btrfs_inode_ref *iref; 871 struct btrfs_inode_extref *extref; 872 struct btrfs_path *tmp_path; 873 struct fs_path *p; 874 u32 cur = 0; 875 u32 total; 876 int slot = path->slots[0]; 877 u32 name_len; 878 char *start; 879 int ret = 0; 880 int num = 0; 881 int index; 882 u64 dir; 883 unsigned long name_off; 884 unsigned long elem_size; 885 unsigned long ptr; 886 887 p = fs_path_alloc_reversed(); 888 if (!p) 889 return -ENOMEM; 890 891 tmp_path = alloc_path_for_send(); 892 if (!tmp_path) { 893 fs_path_free(p); 894 return -ENOMEM; 895 } 896 897 898 if (found_key->type == BTRFS_INODE_REF_KEY) { 899 ptr = (unsigned long)btrfs_item_ptr(eb, slot, 900 struct btrfs_inode_ref); 901 item = btrfs_item_nr(slot); 902 total = btrfs_item_size(eb, item); 903 elem_size = sizeof(*iref); 904 } else { 905 ptr = btrfs_item_ptr_offset(eb, slot); 906 total = btrfs_item_size_nr(eb, slot); 907 elem_size = sizeof(*extref); 908 } 909 910 while (cur < total) { 911 fs_path_reset(p); 912 913 if (found_key->type == BTRFS_INODE_REF_KEY) { 914 iref = (struct btrfs_inode_ref *)(ptr + cur); 915 name_len = btrfs_inode_ref_name_len(eb, iref); 916 name_off = (unsigned long)(iref + 1); 917 index = btrfs_inode_ref_index(eb, iref); 918 dir = found_key->offset; 919 } else { 920 extref = (struct btrfs_inode_extref *)(ptr + cur); 921 name_len = btrfs_inode_extref_name_len(eb, extref); 922 name_off = (unsigned long)&extref->name; 923 index = btrfs_inode_extref_index(eb, extref); 924 dir = btrfs_inode_extref_parent(eb, extref); 925 } 926 927 if (resolve) { 928 start = btrfs_ref_to_path(root, tmp_path, name_len, 929 name_off, eb, dir, 930 p->buf, p->buf_len); 931 if (IS_ERR(start)) { 932 ret = PTR_ERR(start); 933 goto out; 934 } 935 if (start < p->buf) { 936 /* overflow , try again with larger buffer */ 937 ret = fs_path_ensure_buf(p, 938 p->buf_len + p->buf - start); 939 if (ret < 0) 940 goto out; 941 start = btrfs_ref_to_path(root, tmp_path, 942 name_len, name_off, 943 eb, dir, 944 p->buf, p->buf_len); 945 if (IS_ERR(start)) { 946 ret = PTR_ERR(start); 947 goto out; 948 } 949 BUG_ON(start < p->buf); 950 } 951 p->start = start; 952 } else { 953 ret = fs_path_add_from_extent_buffer(p, eb, name_off, 954 name_len); 955 if (ret < 0) 956 goto out; 957 } 958 959 cur += elem_size + name_len; 960 ret = iterate(num, dir, index, p, ctx); 961 if (ret) 962 goto out; 963 num++; 964 } 965 966 out: 967 btrfs_free_path(tmp_path); 968 fs_path_free(p); 969 return ret; 970 } 971 972 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key, 973 const char *name, int name_len, 974 const char *data, int data_len, 975 u8 type, void *ctx); 976 977 /* 978 * Helper function to iterate the entries in ONE btrfs_dir_item. 979 * The iterate callback may return a non zero value to stop iteration. This can 980 * be a negative value for error codes or 1 to simply stop it. 981 * 982 * path must point to the dir item when called. 983 */ 984 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, 985 iterate_dir_item_t iterate, void *ctx) 986 { 987 int ret = 0; 988 struct extent_buffer *eb; 989 struct btrfs_item *item; 990 struct btrfs_dir_item *di; 991 struct btrfs_key di_key; 992 char *buf = NULL; 993 int buf_len; 994 u32 name_len; 995 u32 data_len; 996 u32 cur; 997 u32 len; 998 u32 total; 999 int slot; 1000 int num; 1001 u8 type; 1002 1003 /* 1004 * Start with a small buffer (1 page). If later we end up needing more 1005 * space, which can happen for xattrs on a fs with a leaf size greater 1006 * then the page size, attempt to increase the buffer. Typically xattr 1007 * values are small. 1008 */ 1009 buf_len = PATH_MAX; 1010 buf = kmalloc(buf_len, GFP_KERNEL); 1011 if (!buf) { 1012 ret = -ENOMEM; 1013 goto out; 1014 } 1015 1016 eb = path->nodes[0]; 1017 slot = path->slots[0]; 1018 item = btrfs_item_nr(slot); 1019 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 1020 cur = 0; 1021 len = 0; 1022 total = btrfs_item_size(eb, item); 1023 1024 num = 0; 1025 while (cur < total) { 1026 name_len = btrfs_dir_name_len(eb, di); 1027 data_len = btrfs_dir_data_len(eb, di); 1028 type = btrfs_dir_type(eb, di); 1029 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 1030 1031 if (type == BTRFS_FT_XATTR) { 1032 if (name_len > XATTR_NAME_MAX) { 1033 ret = -ENAMETOOLONG; 1034 goto out; 1035 } 1036 if (name_len + data_len > 1037 BTRFS_MAX_XATTR_SIZE(root->fs_info)) { 1038 ret = -E2BIG; 1039 goto out; 1040 } 1041 } else { 1042 /* 1043 * Path too long 1044 */ 1045 if (name_len + data_len > PATH_MAX) { 1046 ret = -ENAMETOOLONG; 1047 goto out; 1048 } 1049 } 1050 1051 if (name_len + data_len > buf_len) { 1052 buf_len = name_len + data_len; 1053 if (is_vmalloc_addr(buf)) { 1054 vfree(buf); 1055 buf = NULL; 1056 } else { 1057 char *tmp = krealloc(buf, buf_len, 1058 GFP_KERNEL | __GFP_NOWARN); 1059 1060 if (!tmp) 1061 kfree(buf); 1062 buf = tmp; 1063 } 1064 if (!buf) { 1065 buf = kvmalloc(buf_len, GFP_KERNEL); 1066 if (!buf) { 1067 ret = -ENOMEM; 1068 goto out; 1069 } 1070 } 1071 } 1072 1073 read_extent_buffer(eb, buf, (unsigned long)(di + 1), 1074 name_len + data_len); 1075 1076 len = sizeof(*di) + name_len + data_len; 1077 di = (struct btrfs_dir_item *)((char *)di + len); 1078 cur += len; 1079 1080 ret = iterate(num, &di_key, buf, name_len, buf + name_len, 1081 data_len, type, ctx); 1082 if (ret < 0) 1083 goto out; 1084 if (ret) { 1085 ret = 0; 1086 goto out; 1087 } 1088 1089 num++; 1090 } 1091 1092 out: 1093 kvfree(buf); 1094 return ret; 1095 } 1096 1097 static int __copy_first_ref(int num, u64 dir, int index, 1098 struct fs_path *p, void *ctx) 1099 { 1100 int ret; 1101 struct fs_path *pt = ctx; 1102 1103 ret = fs_path_copy(pt, p); 1104 if (ret < 0) 1105 return ret; 1106 1107 /* we want the first only */ 1108 return 1; 1109 } 1110 1111 /* 1112 * Retrieve the first path of an inode. If an inode has more then one 1113 * ref/hardlink, this is ignored. 1114 */ 1115 static int get_inode_path(struct btrfs_root *root, 1116 u64 ino, struct fs_path *path) 1117 { 1118 int ret; 1119 struct btrfs_key key, found_key; 1120 struct btrfs_path *p; 1121 1122 p = alloc_path_for_send(); 1123 if (!p) 1124 return -ENOMEM; 1125 1126 fs_path_reset(path); 1127 1128 key.objectid = ino; 1129 key.type = BTRFS_INODE_REF_KEY; 1130 key.offset = 0; 1131 1132 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0); 1133 if (ret < 0) 1134 goto out; 1135 if (ret) { 1136 ret = 1; 1137 goto out; 1138 } 1139 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); 1140 if (found_key.objectid != ino || 1141 (found_key.type != BTRFS_INODE_REF_KEY && 1142 found_key.type != BTRFS_INODE_EXTREF_KEY)) { 1143 ret = -ENOENT; 1144 goto out; 1145 } 1146 1147 ret = iterate_inode_ref(root, p, &found_key, 1, 1148 __copy_first_ref, path); 1149 if (ret < 0) 1150 goto out; 1151 ret = 0; 1152 1153 out: 1154 btrfs_free_path(p); 1155 return ret; 1156 } 1157 1158 struct backref_ctx { 1159 struct send_ctx *sctx; 1160 1161 struct btrfs_path *path; 1162 /* number of total found references */ 1163 u64 found; 1164 1165 /* 1166 * used for clones found in send_root. clones found behind cur_objectid 1167 * and cur_offset are not considered as allowed clones. 1168 */ 1169 u64 cur_objectid; 1170 u64 cur_offset; 1171 1172 /* may be truncated in case it's the last extent in a file */ 1173 u64 extent_len; 1174 1175 /* data offset in the file extent item */ 1176 u64 data_offset; 1177 1178 /* Just to check for bugs in backref resolving */ 1179 int found_itself; 1180 }; 1181 1182 static int __clone_root_cmp_bsearch(const void *key, const void *elt) 1183 { 1184 u64 root = (u64)(uintptr_t)key; 1185 struct clone_root *cr = (struct clone_root *)elt; 1186 1187 if (root < cr->root->objectid) 1188 return -1; 1189 if (root > cr->root->objectid) 1190 return 1; 1191 return 0; 1192 } 1193 1194 static int __clone_root_cmp_sort(const void *e1, const void *e2) 1195 { 1196 struct clone_root *cr1 = (struct clone_root *)e1; 1197 struct clone_root *cr2 = (struct clone_root *)e2; 1198 1199 if (cr1->root->objectid < cr2->root->objectid) 1200 return -1; 1201 if (cr1->root->objectid > cr2->root->objectid) 1202 return 1; 1203 return 0; 1204 } 1205 1206 /* 1207 * Called for every backref that is found for the current extent. 1208 * Results are collected in sctx->clone_roots->ino/offset/found_refs 1209 */ 1210 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) 1211 { 1212 struct backref_ctx *bctx = ctx_; 1213 struct clone_root *found; 1214 int ret; 1215 u64 i_size; 1216 1217 /* First check if the root is in the list of accepted clone sources */ 1218 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots, 1219 bctx->sctx->clone_roots_cnt, 1220 sizeof(struct clone_root), 1221 __clone_root_cmp_bsearch); 1222 if (!found) 1223 return 0; 1224 1225 if (found->root == bctx->sctx->send_root && 1226 ino == bctx->cur_objectid && 1227 offset == bctx->cur_offset) { 1228 bctx->found_itself = 1; 1229 } 1230 1231 /* 1232 * There are inodes that have extents that lie behind its i_size. Don't 1233 * accept clones from these extents. 1234 */ 1235 ret = __get_inode_info(found->root, bctx->path, ino, &i_size, NULL, NULL, 1236 NULL, NULL, NULL); 1237 btrfs_release_path(bctx->path); 1238 if (ret < 0) 1239 return ret; 1240 1241 if (offset + bctx->data_offset + bctx->extent_len > i_size) 1242 return 0; 1243 1244 /* 1245 * Make sure we don't consider clones from send_root that are 1246 * behind the current inode/offset. 1247 */ 1248 if (found->root == bctx->sctx->send_root) { 1249 /* 1250 * TODO for the moment we don't accept clones from the inode 1251 * that is currently send. We may change this when 1252 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same 1253 * file. 1254 */ 1255 if (ino >= bctx->cur_objectid) 1256 return 0; 1257 } 1258 1259 bctx->found++; 1260 found->found_refs++; 1261 if (ino < found->ino) { 1262 found->ino = ino; 1263 found->offset = offset; 1264 } else if (found->ino == ino) { 1265 /* 1266 * same extent found more then once in the same file. 1267 */ 1268 if (found->offset > offset + bctx->extent_len) 1269 found->offset = offset; 1270 } 1271 1272 return 0; 1273 } 1274 1275 /* 1276 * Given an inode, offset and extent item, it finds a good clone for a clone 1277 * instruction. Returns -ENOENT when none could be found. The function makes 1278 * sure that the returned clone is usable at the point where sending is at the 1279 * moment. This means, that no clones are accepted which lie behind the current 1280 * inode+offset. 1281 * 1282 * path must point to the extent item when called. 1283 */ 1284 static int find_extent_clone(struct send_ctx *sctx, 1285 struct btrfs_path *path, 1286 u64 ino, u64 data_offset, 1287 u64 ino_size, 1288 struct clone_root **found) 1289 { 1290 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 1291 int ret; 1292 int extent_type; 1293 u64 logical; 1294 u64 disk_byte; 1295 u64 num_bytes; 1296 u64 extent_item_pos; 1297 u64 flags = 0; 1298 struct btrfs_file_extent_item *fi; 1299 struct extent_buffer *eb = path->nodes[0]; 1300 struct backref_ctx *backref_ctx = NULL; 1301 struct clone_root *cur_clone_root; 1302 struct btrfs_key found_key; 1303 struct btrfs_path *tmp_path; 1304 int compressed; 1305 u32 i; 1306 1307 tmp_path = alloc_path_for_send(); 1308 if (!tmp_path) 1309 return -ENOMEM; 1310 1311 /* We only use this path under the commit sem */ 1312 tmp_path->need_commit_sem = 0; 1313 1314 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_KERNEL); 1315 if (!backref_ctx) { 1316 ret = -ENOMEM; 1317 goto out; 1318 } 1319 1320 backref_ctx->path = tmp_path; 1321 1322 if (data_offset >= ino_size) { 1323 /* 1324 * There may be extents that lie behind the file's size. 1325 * I at least had this in combination with snapshotting while 1326 * writing large files. 1327 */ 1328 ret = 0; 1329 goto out; 1330 } 1331 1332 fi = btrfs_item_ptr(eb, path->slots[0], 1333 struct btrfs_file_extent_item); 1334 extent_type = btrfs_file_extent_type(eb, fi); 1335 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1336 ret = -ENOENT; 1337 goto out; 1338 } 1339 compressed = btrfs_file_extent_compression(eb, fi); 1340 1341 num_bytes = btrfs_file_extent_num_bytes(eb, fi); 1342 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 1343 if (disk_byte == 0) { 1344 ret = -ENOENT; 1345 goto out; 1346 } 1347 logical = disk_byte + btrfs_file_extent_offset(eb, fi); 1348 1349 down_read(&fs_info->commit_root_sem); 1350 ret = extent_from_logical(fs_info, disk_byte, tmp_path, 1351 &found_key, &flags); 1352 up_read(&fs_info->commit_root_sem); 1353 btrfs_release_path(tmp_path); 1354 1355 if (ret < 0) 1356 goto out; 1357 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1358 ret = -EIO; 1359 goto out; 1360 } 1361 1362 /* 1363 * Setup the clone roots. 1364 */ 1365 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1366 cur_clone_root = sctx->clone_roots + i; 1367 cur_clone_root->ino = (u64)-1; 1368 cur_clone_root->offset = 0; 1369 cur_clone_root->found_refs = 0; 1370 } 1371 1372 backref_ctx->sctx = sctx; 1373 backref_ctx->found = 0; 1374 backref_ctx->cur_objectid = ino; 1375 backref_ctx->cur_offset = data_offset; 1376 backref_ctx->found_itself = 0; 1377 backref_ctx->extent_len = num_bytes; 1378 /* 1379 * For non-compressed extents iterate_extent_inodes() gives us extent 1380 * offsets that already take into account the data offset, but not for 1381 * compressed extents, since the offset is logical and not relative to 1382 * the physical extent locations. We must take this into account to 1383 * avoid sending clone offsets that go beyond the source file's size, 1384 * which would result in the clone ioctl failing with -EINVAL on the 1385 * receiving end. 1386 */ 1387 if (compressed == BTRFS_COMPRESS_NONE) 1388 backref_ctx->data_offset = 0; 1389 else 1390 backref_ctx->data_offset = btrfs_file_extent_offset(eb, fi); 1391 1392 /* 1393 * The last extent of a file may be too large due to page alignment. 1394 * We need to adjust extent_len in this case so that the checks in 1395 * __iterate_backrefs work. 1396 */ 1397 if (data_offset + num_bytes >= ino_size) 1398 backref_ctx->extent_len = ino_size - data_offset; 1399 1400 /* 1401 * Now collect all backrefs. 1402 */ 1403 if (compressed == BTRFS_COMPRESS_NONE) 1404 extent_item_pos = logical - found_key.objectid; 1405 else 1406 extent_item_pos = 0; 1407 ret = iterate_extent_inodes(fs_info, found_key.objectid, 1408 extent_item_pos, 1, __iterate_backrefs, 1409 backref_ctx, false); 1410 1411 if (ret < 0) 1412 goto out; 1413 1414 if (!backref_ctx->found_itself) { 1415 /* found a bug in backref code? */ 1416 ret = -EIO; 1417 btrfs_err(fs_info, 1418 "did not find backref in send_root. inode=%llu, offset=%llu, disk_byte=%llu found extent=%llu", 1419 ino, data_offset, disk_byte, found_key.objectid); 1420 goto out; 1421 } 1422 1423 btrfs_debug(fs_info, 1424 "find_extent_clone: data_offset=%llu, ino=%llu, num_bytes=%llu, logical=%llu", 1425 data_offset, ino, num_bytes, logical); 1426 1427 if (!backref_ctx->found) 1428 btrfs_debug(fs_info, "no clones found"); 1429 1430 cur_clone_root = NULL; 1431 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1432 if (sctx->clone_roots[i].found_refs) { 1433 if (!cur_clone_root) 1434 cur_clone_root = sctx->clone_roots + i; 1435 else if (sctx->clone_roots[i].root == sctx->send_root) 1436 /* prefer clones from send_root over others */ 1437 cur_clone_root = sctx->clone_roots + i; 1438 } 1439 1440 } 1441 1442 if (cur_clone_root) { 1443 *found = cur_clone_root; 1444 ret = 0; 1445 } else { 1446 ret = -ENOENT; 1447 } 1448 1449 out: 1450 btrfs_free_path(tmp_path); 1451 kfree(backref_ctx); 1452 return ret; 1453 } 1454 1455 static int read_symlink(struct btrfs_root *root, 1456 u64 ino, 1457 struct fs_path *dest) 1458 { 1459 int ret; 1460 struct btrfs_path *path; 1461 struct btrfs_key key; 1462 struct btrfs_file_extent_item *ei; 1463 u8 type; 1464 u8 compression; 1465 unsigned long off; 1466 int len; 1467 1468 path = alloc_path_for_send(); 1469 if (!path) 1470 return -ENOMEM; 1471 1472 key.objectid = ino; 1473 key.type = BTRFS_EXTENT_DATA_KEY; 1474 key.offset = 0; 1475 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1476 if (ret < 0) 1477 goto out; 1478 if (ret) { 1479 /* 1480 * An empty symlink inode. Can happen in rare error paths when 1481 * creating a symlink (transaction committed before the inode 1482 * eviction handler removed the symlink inode items and a crash 1483 * happened in between or the subvol was snapshoted in between). 1484 * Print an informative message to dmesg/syslog so that the user 1485 * can delete the symlink. 1486 */ 1487 btrfs_err(root->fs_info, 1488 "Found empty symlink inode %llu at root %llu", 1489 ino, root->root_key.objectid); 1490 ret = -EIO; 1491 goto out; 1492 } 1493 1494 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 1495 struct btrfs_file_extent_item); 1496 type = btrfs_file_extent_type(path->nodes[0], ei); 1497 compression = btrfs_file_extent_compression(path->nodes[0], ei); 1498 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE); 1499 BUG_ON(compression); 1500 1501 off = btrfs_file_extent_inline_start(ei); 1502 len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei); 1503 1504 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); 1505 1506 out: 1507 btrfs_free_path(path); 1508 return ret; 1509 } 1510 1511 /* 1512 * Helper function to generate a file name that is unique in the root of 1513 * send_root and parent_root. This is used to generate names for orphan inodes. 1514 */ 1515 static int gen_unique_name(struct send_ctx *sctx, 1516 u64 ino, u64 gen, 1517 struct fs_path *dest) 1518 { 1519 int ret = 0; 1520 struct btrfs_path *path; 1521 struct btrfs_dir_item *di; 1522 char tmp[64]; 1523 int len; 1524 u64 idx = 0; 1525 1526 path = alloc_path_for_send(); 1527 if (!path) 1528 return -ENOMEM; 1529 1530 while (1) { 1531 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu", 1532 ino, gen, idx); 1533 ASSERT(len < sizeof(tmp)); 1534 1535 di = btrfs_lookup_dir_item(NULL, sctx->send_root, 1536 path, BTRFS_FIRST_FREE_OBJECTID, 1537 tmp, strlen(tmp), 0); 1538 btrfs_release_path(path); 1539 if (IS_ERR(di)) { 1540 ret = PTR_ERR(di); 1541 goto out; 1542 } 1543 if (di) { 1544 /* not unique, try again */ 1545 idx++; 1546 continue; 1547 } 1548 1549 if (!sctx->parent_root) { 1550 /* unique */ 1551 ret = 0; 1552 break; 1553 } 1554 1555 di = btrfs_lookup_dir_item(NULL, sctx->parent_root, 1556 path, BTRFS_FIRST_FREE_OBJECTID, 1557 tmp, strlen(tmp), 0); 1558 btrfs_release_path(path); 1559 if (IS_ERR(di)) { 1560 ret = PTR_ERR(di); 1561 goto out; 1562 } 1563 if (di) { 1564 /* not unique, try again */ 1565 idx++; 1566 continue; 1567 } 1568 /* unique */ 1569 break; 1570 } 1571 1572 ret = fs_path_add(dest, tmp, strlen(tmp)); 1573 1574 out: 1575 btrfs_free_path(path); 1576 return ret; 1577 } 1578 1579 enum inode_state { 1580 inode_state_no_change, 1581 inode_state_will_create, 1582 inode_state_did_create, 1583 inode_state_will_delete, 1584 inode_state_did_delete, 1585 }; 1586 1587 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen) 1588 { 1589 int ret; 1590 int left_ret; 1591 int right_ret; 1592 u64 left_gen; 1593 u64 right_gen; 1594 1595 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL, 1596 NULL, NULL); 1597 if (ret < 0 && ret != -ENOENT) 1598 goto out; 1599 left_ret = ret; 1600 1601 if (!sctx->parent_root) { 1602 right_ret = -ENOENT; 1603 } else { 1604 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen, 1605 NULL, NULL, NULL, NULL); 1606 if (ret < 0 && ret != -ENOENT) 1607 goto out; 1608 right_ret = ret; 1609 } 1610 1611 if (!left_ret && !right_ret) { 1612 if (left_gen == gen && right_gen == gen) { 1613 ret = inode_state_no_change; 1614 } else if (left_gen == gen) { 1615 if (ino < sctx->send_progress) 1616 ret = inode_state_did_create; 1617 else 1618 ret = inode_state_will_create; 1619 } else if (right_gen == gen) { 1620 if (ino < sctx->send_progress) 1621 ret = inode_state_did_delete; 1622 else 1623 ret = inode_state_will_delete; 1624 } else { 1625 ret = -ENOENT; 1626 } 1627 } else if (!left_ret) { 1628 if (left_gen == gen) { 1629 if (ino < sctx->send_progress) 1630 ret = inode_state_did_create; 1631 else 1632 ret = inode_state_will_create; 1633 } else { 1634 ret = -ENOENT; 1635 } 1636 } else if (!right_ret) { 1637 if (right_gen == gen) { 1638 if (ino < sctx->send_progress) 1639 ret = inode_state_did_delete; 1640 else 1641 ret = inode_state_will_delete; 1642 } else { 1643 ret = -ENOENT; 1644 } 1645 } else { 1646 ret = -ENOENT; 1647 } 1648 1649 out: 1650 return ret; 1651 } 1652 1653 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen) 1654 { 1655 int ret; 1656 1657 if (ino == BTRFS_FIRST_FREE_OBJECTID) 1658 return 1; 1659 1660 ret = get_cur_inode_state(sctx, ino, gen); 1661 if (ret < 0) 1662 goto out; 1663 1664 if (ret == inode_state_no_change || 1665 ret == inode_state_did_create || 1666 ret == inode_state_will_delete) 1667 ret = 1; 1668 else 1669 ret = 0; 1670 1671 out: 1672 return ret; 1673 } 1674 1675 /* 1676 * Helper function to lookup a dir item in a dir. 1677 */ 1678 static int lookup_dir_item_inode(struct btrfs_root *root, 1679 u64 dir, const char *name, int name_len, 1680 u64 *found_inode, 1681 u8 *found_type) 1682 { 1683 int ret = 0; 1684 struct btrfs_dir_item *di; 1685 struct btrfs_key key; 1686 struct btrfs_path *path; 1687 1688 path = alloc_path_for_send(); 1689 if (!path) 1690 return -ENOMEM; 1691 1692 di = btrfs_lookup_dir_item(NULL, root, path, 1693 dir, name, name_len, 0); 1694 if (!di) { 1695 ret = -ENOENT; 1696 goto out; 1697 } 1698 if (IS_ERR(di)) { 1699 ret = PTR_ERR(di); 1700 goto out; 1701 } 1702 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 1703 if (key.type == BTRFS_ROOT_ITEM_KEY) { 1704 ret = -ENOENT; 1705 goto out; 1706 } 1707 *found_inode = key.objectid; 1708 *found_type = btrfs_dir_type(path->nodes[0], di); 1709 1710 out: 1711 btrfs_free_path(path); 1712 return ret; 1713 } 1714 1715 /* 1716 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir, 1717 * generation of the parent dir and the name of the dir entry. 1718 */ 1719 static int get_first_ref(struct btrfs_root *root, u64 ino, 1720 u64 *dir, u64 *dir_gen, struct fs_path *name) 1721 { 1722 int ret; 1723 struct btrfs_key key; 1724 struct btrfs_key found_key; 1725 struct btrfs_path *path; 1726 int len; 1727 u64 parent_dir; 1728 1729 path = alloc_path_for_send(); 1730 if (!path) 1731 return -ENOMEM; 1732 1733 key.objectid = ino; 1734 key.type = BTRFS_INODE_REF_KEY; 1735 key.offset = 0; 1736 1737 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); 1738 if (ret < 0) 1739 goto out; 1740 if (!ret) 1741 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1742 path->slots[0]); 1743 if (ret || found_key.objectid != ino || 1744 (found_key.type != BTRFS_INODE_REF_KEY && 1745 found_key.type != BTRFS_INODE_EXTREF_KEY)) { 1746 ret = -ENOENT; 1747 goto out; 1748 } 1749 1750 if (found_key.type == BTRFS_INODE_REF_KEY) { 1751 struct btrfs_inode_ref *iref; 1752 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1753 struct btrfs_inode_ref); 1754 len = btrfs_inode_ref_name_len(path->nodes[0], iref); 1755 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 1756 (unsigned long)(iref + 1), 1757 len); 1758 parent_dir = found_key.offset; 1759 } else { 1760 struct btrfs_inode_extref *extref; 1761 extref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1762 struct btrfs_inode_extref); 1763 len = btrfs_inode_extref_name_len(path->nodes[0], extref); 1764 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 1765 (unsigned long)&extref->name, len); 1766 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref); 1767 } 1768 if (ret < 0) 1769 goto out; 1770 btrfs_release_path(path); 1771 1772 if (dir_gen) { 1773 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, 1774 NULL, NULL, NULL); 1775 if (ret < 0) 1776 goto out; 1777 } 1778 1779 *dir = parent_dir; 1780 1781 out: 1782 btrfs_free_path(path); 1783 return ret; 1784 } 1785 1786 static int is_first_ref(struct btrfs_root *root, 1787 u64 ino, u64 dir, 1788 const char *name, int name_len) 1789 { 1790 int ret; 1791 struct fs_path *tmp_name; 1792 u64 tmp_dir; 1793 1794 tmp_name = fs_path_alloc(); 1795 if (!tmp_name) 1796 return -ENOMEM; 1797 1798 ret = get_first_ref(root, ino, &tmp_dir, NULL, tmp_name); 1799 if (ret < 0) 1800 goto out; 1801 1802 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) { 1803 ret = 0; 1804 goto out; 1805 } 1806 1807 ret = !memcmp(tmp_name->start, name, name_len); 1808 1809 out: 1810 fs_path_free(tmp_name); 1811 return ret; 1812 } 1813 1814 /* 1815 * Used by process_recorded_refs to determine if a new ref would overwrite an 1816 * already existing ref. In case it detects an overwrite, it returns the 1817 * inode/gen in who_ino/who_gen. 1818 * When an overwrite is detected, process_recorded_refs does proper orphanizing 1819 * to make sure later references to the overwritten inode are possible. 1820 * Orphanizing is however only required for the first ref of an inode. 1821 * process_recorded_refs does an additional is_first_ref check to see if 1822 * orphanizing is really required. 1823 */ 1824 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, 1825 const char *name, int name_len, 1826 u64 *who_ino, u64 *who_gen, u64 *who_mode) 1827 { 1828 int ret = 0; 1829 u64 gen; 1830 u64 other_inode = 0; 1831 u8 other_type = 0; 1832 1833 if (!sctx->parent_root) 1834 goto out; 1835 1836 ret = is_inode_existent(sctx, dir, dir_gen); 1837 if (ret <= 0) 1838 goto out; 1839 1840 /* 1841 * If we have a parent root we need to verify that the parent dir was 1842 * not deleted and then re-created, if it was then we have no overwrite 1843 * and we can just unlink this entry. 1844 */ 1845 if (sctx->parent_root && dir != BTRFS_FIRST_FREE_OBJECTID) { 1846 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, 1847 NULL, NULL, NULL); 1848 if (ret < 0 && ret != -ENOENT) 1849 goto out; 1850 if (ret) { 1851 ret = 0; 1852 goto out; 1853 } 1854 if (gen != dir_gen) 1855 goto out; 1856 } 1857 1858 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len, 1859 &other_inode, &other_type); 1860 if (ret < 0 && ret != -ENOENT) 1861 goto out; 1862 if (ret) { 1863 ret = 0; 1864 goto out; 1865 } 1866 1867 /* 1868 * Check if the overwritten ref was already processed. If yes, the ref 1869 * was already unlinked/moved, so we can safely assume that we will not 1870 * overwrite anything at this point in time. 1871 */ 1872 if (other_inode > sctx->send_progress || 1873 is_waiting_for_move(sctx, other_inode)) { 1874 ret = get_inode_info(sctx->parent_root, other_inode, NULL, 1875 who_gen, who_mode, NULL, NULL, NULL); 1876 if (ret < 0) 1877 goto out; 1878 1879 ret = 1; 1880 *who_ino = other_inode; 1881 } else { 1882 ret = 0; 1883 } 1884 1885 out: 1886 return ret; 1887 } 1888 1889 /* 1890 * Checks if the ref was overwritten by an already processed inode. This is 1891 * used by __get_cur_name_and_parent to find out if the ref was orphanized and 1892 * thus the orphan name needs be used. 1893 * process_recorded_refs also uses it to avoid unlinking of refs that were 1894 * overwritten. 1895 */ 1896 static int did_overwrite_ref(struct send_ctx *sctx, 1897 u64 dir, u64 dir_gen, 1898 u64 ino, u64 ino_gen, 1899 const char *name, int name_len) 1900 { 1901 int ret = 0; 1902 u64 gen; 1903 u64 ow_inode; 1904 u8 other_type; 1905 1906 if (!sctx->parent_root) 1907 goto out; 1908 1909 ret = is_inode_existent(sctx, dir, dir_gen); 1910 if (ret <= 0) 1911 goto out; 1912 1913 if (dir != BTRFS_FIRST_FREE_OBJECTID) { 1914 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, 1915 NULL, NULL, NULL); 1916 if (ret < 0 && ret != -ENOENT) 1917 goto out; 1918 if (ret) { 1919 ret = 0; 1920 goto out; 1921 } 1922 if (gen != dir_gen) 1923 goto out; 1924 } 1925 1926 /* check if the ref was overwritten by another ref */ 1927 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, 1928 &ow_inode, &other_type); 1929 if (ret < 0 && ret != -ENOENT) 1930 goto out; 1931 if (ret) { 1932 /* was never and will never be overwritten */ 1933 ret = 0; 1934 goto out; 1935 } 1936 1937 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL, 1938 NULL, NULL); 1939 if (ret < 0) 1940 goto out; 1941 1942 if (ow_inode == ino && gen == ino_gen) { 1943 ret = 0; 1944 goto out; 1945 } 1946 1947 /* 1948 * We know that it is or will be overwritten. Check this now. 1949 * The current inode being processed might have been the one that caused 1950 * inode 'ino' to be orphanized, therefore check if ow_inode matches 1951 * the current inode being processed. 1952 */ 1953 if ((ow_inode < sctx->send_progress) || 1954 (ino != sctx->cur_ino && ow_inode == sctx->cur_ino && 1955 gen == sctx->cur_inode_gen)) 1956 ret = 1; 1957 else 1958 ret = 0; 1959 1960 out: 1961 return ret; 1962 } 1963 1964 /* 1965 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode 1966 * that got overwritten. This is used by process_recorded_refs to determine 1967 * if it has to use the path as returned by get_cur_path or the orphan name. 1968 */ 1969 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) 1970 { 1971 int ret = 0; 1972 struct fs_path *name = NULL; 1973 u64 dir; 1974 u64 dir_gen; 1975 1976 if (!sctx->parent_root) 1977 goto out; 1978 1979 name = fs_path_alloc(); 1980 if (!name) 1981 return -ENOMEM; 1982 1983 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name); 1984 if (ret < 0) 1985 goto out; 1986 1987 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, 1988 name->start, fs_path_len(name)); 1989 1990 out: 1991 fs_path_free(name); 1992 return ret; 1993 } 1994 1995 /* 1996 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit, 1997 * so we need to do some special handling in case we have clashes. This function 1998 * takes care of this with the help of name_cache_entry::radix_list. 1999 * In case of error, nce is kfreed. 2000 */ 2001 static int name_cache_insert(struct send_ctx *sctx, 2002 struct name_cache_entry *nce) 2003 { 2004 int ret = 0; 2005 struct list_head *nce_head; 2006 2007 nce_head = radix_tree_lookup(&sctx->name_cache, 2008 (unsigned long)nce->ino); 2009 if (!nce_head) { 2010 nce_head = kmalloc(sizeof(*nce_head), GFP_KERNEL); 2011 if (!nce_head) { 2012 kfree(nce); 2013 return -ENOMEM; 2014 } 2015 INIT_LIST_HEAD(nce_head); 2016 2017 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head); 2018 if (ret < 0) { 2019 kfree(nce_head); 2020 kfree(nce); 2021 return ret; 2022 } 2023 } 2024 list_add_tail(&nce->radix_list, nce_head); 2025 list_add_tail(&nce->list, &sctx->name_cache_list); 2026 sctx->name_cache_size++; 2027 2028 return ret; 2029 } 2030 2031 static void name_cache_delete(struct send_ctx *sctx, 2032 struct name_cache_entry *nce) 2033 { 2034 struct list_head *nce_head; 2035 2036 nce_head = radix_tree_lookup(&sctx->name_cache, 2037 (unsigned long)nce->ino); 2038 if (!nce_head) { 2039 btrfs_err(sctx->send_root->fs_info, 2040 "name_cache_delete lookup failed ino %llu cache size %d, leaking memory", 2041 nce->ino, sctx->name_cache_size); 2042 } 2043 2044 list_del(&nce->radix_list); 2045 list_del(&nce->list); 2046 sctx->name_cache_size--; 2047 2048 /* 2049 * We may not get to the final release of nce_head if the lookup fails 2050 */ 2051 if (nce_head && list_empty(nce_head)) { 2052 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); 2053 kfree(nce_head); 2054 } 2055 } 2056 2057 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx, 2058 u64 ino, u64 gen) 2059 { 2060 struct list_head *nce_head; 2061 struct name_cache_entry *cur; 2062 2063 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino); 2064 if (!nce_head) 2065 return NULL; 2066 2067 list_for_each_entry(cur, nce_head, radix_list) { 2068 if (cur->ino == ino && cur->gen == gen) 2069 return cur; 2070 } 2071 return NULL; 2072 } 2073 2074 /* 2075 * Removes the entry from the list and adds it back to the end. This marks the 2076 * entry as recently used so that name_cache_clean_unused does not remove it. 2077 */ 2078 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce) 2079 { 2080 list_del(&nce->list); 2081 list_add_tail(&nce->list, &sctx->name_cache_list); 2082 } 2083 2084 /* 2085 * Remove some entries from the beginning of name_cache_list. 2086 */ 2087 static void name_cache_clean_unused(struct send_ctx *sctx) 2088 { 2089 struct name_cache_entry *nce; 2090 2091 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE) 2092 return; 2093 2094 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) { 2095 nce = list_entry(sctx->name_cache_list.next, 2096 struct name_cache_entry, list); 2097 name_cache_delete(sctx, nce); 2098 kfree(nce); 2099 } 2100 } 2101 2102 static void name_cache_free(struct send_ctx *sctx) 2103 { 2104 struct name_cache_entry *nce; 2105 2106 while (!list_empty(&sctx->name_cache_list)) { 2107 nce = list_entry(sctx->name_cache_list.next, 2108 struct name_cache_entry, list); 2109 name_cache_delete(sctx, nce); 2110 kfree(nce); 2111 } 2112 } 2113 2114 /* 2115 * Used by get_cur_path for each ref up to the root. 2116 * Returns 0 if it succeeded. 2117 * Returns 1 if the inode is not existent or got overwritten. In that case, the 2118 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1 2119 * is returned, parent_ino/parent_gen are not guaranteed to be valid. 2120 * Returns <0 in case of error. 2121 */ 2122 static int __get_cur_name_and_parent(struct send_ctx *sctx, 2123 u64 ino, u64 gen, 2124 u64 *parent_ino, 2125 u64 *parent_gen, 2126 struct fs_path *dest) 2127 { 2128 int ret; 2129 int nce_ret; 2130 struct name_cache_entry *nce = NULL; 2131 2132 /* 2133 * First check if we already did a call to this function with the same 2134 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes 2135 * return the cached result. 2136 */ 2137 nce = name_cache_search(sctx, ino, gen); 2138 if (nce) { 2139 if (ino < sctx->send_progress && nce->need_later_update) { 2140 name_cache_delete(sctx, nce); 2141 kfree(nce); 2142 nce = NULL; 2143 } else { 2144 name_cache_used(sctx, nce); 2145 *parent_ino = nce->parent_ino; 2146 *parent_gen = nce->parent_gen; 2147 ret = fs_path_add(dest, nce->name, nce->name_len); 2148 if (ret < 0) 2149 goto out; 2150 ret = nce->ret; 2151 goto out; 2152 } 2153 } 2154 2155 /* 2156 * If the inode is not existent yet, add the orphan name and return 1. 2157 * This should only happen for the parent dir that we determine in 2158 * __record_new_ref 2159 */ 2160 ret = is_inode_existent(sctx, ino, gen); 2161 if (ret < 0) 2162 goto out; 2163 2164 if (!ret) { 2165 ret = gen_unique_name(sctx, ino, gen, dest); 2166 if (ret < 0) 2167 goto out; 2168 ret = 1; 2169 goto out_cache; 2170 } 2171 2172 /* 2173 * Depending on whether the inode was already processed or not, use 2174 * send_root or parent_root for ref lookup. 2175 */ 2176 if (ino < sctx->send_progress) 2177 ret = get_first_ref(sctx->send_root, ino, 2178 parent_ino, parent_gen, dest); 2179 else 2180 ret = get_first_ref(sctx->parent_root, ino, 2181 parent_ino, parent_gen, dest); 2182 if (ret < 0) 2183 goto out; 2184 2185 /* 2186 * Check if the ref was overwritten by an inode's ref that was processed 2187 * earlier. If yes, treat as orphan and return 1. 2188 */ 2189 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, 2190 dest->start, dest->end - dest->start); 2191 if (ret < 0) 2192 goto out; 2193 if (ret) { 2194 fs_path_reset(dest); 2195 ret = gen_unique_name(sctx, ino, gen, dest); 2196 if (ret < 0) 2197 goto out; 2198 ret = 1; 2199 } 2200 2201 out_cache: 2202 /* 2203 * Store the result of the lookup in the name cache. 2204 */ 2205 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_KERNEL); 2206 if (!nce) { 2207 ret = -ENOMEM; 2208 goto out; 2209 } 2210 2211 nce->ino = ino; 2212 nce->gen = gen; 2213 nce->parent_ino = *parent_ino; 2214 nce->parent_gen = *parent_gen; 2215 nce->name_len = fs_path_len(dest); 2216 nce->ret = ret; 2217 strcpy(nce->name, dest->start); 2218 2219 if (ino < sctx->send_progress) 2220 nce->need_later_update = 0; 2221 else 2222 nce->need_later_update = 1; 2223 2224 nce_ret = name_cache_insert(sctx, nce); 2225 if (nce_ret < 0) 2226 ret = nce_ret; 2227 name_cache_clean_unused(sctx); 2228 2229 out: 2230 return ret; 2231 } 2232 2233 /* 2234 * Magic happens here. This function returns the first ref to an inode as it 2235 * would look like while receiving the stream at this point in time. 2236 * We walk the path up to the root. For every inode in between, we check if it 2237 * was already processed/sent. If yes, we continue with the parent as found 2238 * in send_root. If not, we continue with the parent as found in parent_root. 2239 * If we encounter an inode that was deleted at this point in time, we use the 2240 * inodes "orphan" name instead of the real name and stop. Same with new inodes 2241 * that were not created yet and overwritten inodes/refs. 2242 * 2243 * When do we have have orphan inodes: 2244 * 1. When an inode is freshly created and thus no valid refs are available yet 2245 * 2. When a directory lost all it's refs (deleted) but still has dir items 2246 * inside which were not processed yet (pending for move/delete). If anyone 2247 * tried to get the path to the dir items, it would get a path inside that 2248 * orphan directory. 2249 * 3. When an inode is moved around or gets new links, it may overwrite the ref 2250 * of an unprocessed inode. If in that case the first ref would be 2251 * overwritten, the overwritten inode gets "orphanized". Later when we 2252 * process this overwritten inode, it is restored at a new place by moving 2253 * the orphan inode. 2254 * 2255 * sctx->send_progress tells this function at which point in time receiving 2256 * would be. 2257 */ 2258 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, 2259 struct fs_path *dest) 2260 { 2261 int ret = 0; 2262 struct fs_path *name = NULL; 2263 u64 parent_inode = 0; 2264 u64 parent_gen = 0; 2265 int stop = 0; 2266 2267 name = fs_path_alloc(); 2268 if (!name) { 2269 ret = -ENOMEM; 2270 goto out; 2271 } 2272 2273 dest->reversed = 1; 2274 fs_path_reset(dest); 2275 2276 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { 2277 struct waiting_dir_move *wdm; 2278 2279 fs_path_reset(name); 2280 2281 if (is_waiting_for_rm(sctx, ino)) { 2282 ret = gen_unique_name(sctx, ino, gen, name); 2283 if (ret < 0) 2284 goto out; 2285 ret = fs_path_add_path(dest, name); 2286 break; 2287 } 2288 2289 wdm = get_waiting_dir_move(sctx, ino); 2290 if (wdm && wdm->orphanized) { 2291 ret = gen_unique_name(sctx, ino, gen, name); 2292 stop = 1; 2293 } else if (wdm) { 2294 ret = get_first_ref(sctx->parent_root, ino, 2295 &parent_inode, &parent_gen, name); 2296 } else { 2297 ret = __get_cur_name_and_parent(sctx, ino, gen, 2298 &parent_inode, 2299 &parent_gen, name); 2300 if (ret) 2301 stop = 1; 2302 } 2303 2304 if (ret < 0) 2305 goto out; 2306 2307 ret = fs_path_add_path(dest, name); 2308 if (ret < 0) 2309 goto out; 2310 2311 ino = parent_inode; 2312 gen = parent_gen; 2313 } 2314 2315 out: 2316 fs_path_free(name); 2317 if (!ret) 2318 fs_path_unreverse(dest); 2319 return ret; 2320 } 2321 2322 /* 2323 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace 2324 */ 2325 static int send_subvol_begin(struct send_ctx *sctx) 2326 { 2327 int ret; 2328 struct btrfs_root *send_root = sctx->send_root; 2329 struct btrfs_root *parent_root = sctx->parent_root; 2330 struct btrfs_path *path; 2331 struct btrfs_key key; 2332 struct btrfs_root_ref *ref; 2333 struct extent_buffer *leaf; 2334 char *name = NULL; 2335 int namelen; 2336 2337 path = btrfs_alloc_path(); 2338 if (!path) 2339 return -ENOMEM; 2340 2341 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_KERNEL); 2342 if (!name) { 2343 btrfs_free_path(path); 2344 return -ENOMEM; 2345 } 2346 2347 key.objectid = send_root->objectid; 2348 key.type = BTRFS_ROOT_BACKREF_KEY; 2349 key.offset = 0; 2350 2351 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root, 2352 &key, path, 1, 0); 2353 if (ret < 0) 2354 goto out; 2355 if (ret) { 2356 ret = -ENOENT; 2357 goto out; 2358 } 2359 2360 leaf = path->nodes[0]; 2361 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2362 if (key.type != BTRFS_ROOT_BACKREF_KEY || 2363 key.objectid != send_root->objectid) { 2364 ret = -ENOENT; 2365 goto out; 2366 } 2367 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 2368 namelen = btrfs_root_ref_name_len(leaf, ref); 2369 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); 2370 btrfs_release_path(path); 2371 2372 if (parent_root) { 2373 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); 2374 if (ret < 0) 2375 goto out; 2376 } else { 2377 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL); 2378 if (ret < 0) 2379 goto out; 2380 } 2381 2382 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen); 2383 2384 if (!btrfs_is_empty_uuid(sctx->send_root->root_item.received_uuid)) 2385 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, 2386 sctx->send_root->root_item.received_uuid); 2387 else 2388 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, 2389 sctx->send_root->root_item.uuid); 2390 2391 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID, 2392 le64_to_cpu(sctx->send_root->root_item.ctransid)); 2393 if (parent_root) { 2394 if (!btrfs_is_empty_uuid(parent_root->root_item.received_uuid)) 2395 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 2396 parent_root->root_item.received_uuid); 2397 else 2398 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 2399 parent_root->root_item.uuid); 2400 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, 2401 le64_to_cpu(sctx->parent_root->root_item.ctransid)); 2402 } 2403 2404 ret = send_cmd(sctx); 2405 2406 tlv_put_failure: 2407 out: 2408 btrfs_free_path(path); 2409 kfree(name); 2410 return ret; 2411 } 2412 2413 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) 2414 { 2415 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2416 int ret = 0; 2417 struct fs_path *p; 2418 2419 btrfs_debug(fs_info, "send_truncate %llu size=%llu", ino, size); 2420 2421 p = fs_path_alloc(); 2422 if (!p) 2423 return -ENOMEM; 2424 2425 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); 2426 if (ret < 0) 2427 goto out; 2428 2429 ret = get_cur_path(sctx, ino, gen, p); 2430 if (ret < 0) 2431 goto out; 2432 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2433 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size); 2434 2435 ret = send_cmd(sctx); 2436 2437 tlv_put_failure: 2438 out: 2439 fs_path_free(p); 2440 return ret; 2441 } 2442 2443 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) 2444 { 2445 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2446 int ret = 0; 2447 struct fs_path *p; 2448 2449 btrfs_debug(fs_info, "send_chmod %llu mode=%llu", ino, mode); 2450 2451 p = fs_path_alloc(); 2452 if (!p) 2453 return -ENOMEM; 2454 2455 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); 2456 if (ret < 0) 2457 goto out; 2458 2459 ret = get_cur_path(sctx, ino, gen, p); 2460 if (ret < 0) 2461 goto out; 2462 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2463 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777); 2464 2465 ret = send_cmd(sctx); 2466 2467 tlv_put_failure: 2468 out: 2469 fs_path_free(p); 2470 return ret; 2471 } 2472 2473 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) 2474 { 2475 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2476 int ret = 0; 2477 struct fs_path *p; 2478 2479 btrfs_debug(fs_info, "send_chown %llu uid=%llu, gid=%llu", 2480 ino, uid, gid); 2481 2482 p = fs_path_alloc(); 2483 if (!p) 2484 return -ENOMEM; 2485 2486 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); 2487 if (ret < 0) 2488 goto out; 2489 2490 ret = get_cur_path(sctx, ino, gen, p); 2491 if (ret < 0) 2492 goto out; 2493 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2494 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); 2495 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); 2496 2497 ret = send_cmd(sctx); 2498 2499 tlv_put_failure: 2500 out: 2501 fs_path_free(p); 2502 return ret; 2503 } 2504 2505 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) 2506 { 2507 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2508 int ret = 0; 2509 struct fs_path *p = NULL; 2510 struct btrfs_inode_item *ii; 2511 struct btrfs_path *path = NULL; 2512 struct extent_buffer *eb; 2513 struct btrfs_key key; 2514 int slot; 2515 2516 btrfs_debug(fs_info, "send_utimes %llu", ino); 2517 2518 p = fs_path_alloc(); 2519 if (!p) 2520 return -ENOMEM; 2521 2522 path = alloc_path_for_send(); 2523 if (!path) { 2524 ret = -ENOMEM; 2525 goto out; 2526 } 2527 2528 key.objectid = ino; 2529 key.type = BTRFS_INODE_ITEM_KEY; 2530 key.offset = 0; 2531 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); 2532 if (ret > 0) 2533 ret = -ENOENT; 2534 if (ret < 0) 2535 goto out; 2536 2537 eb = path->nodes[0]; 2538 slot = path->slots[0]; 2539 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 2540 2541 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES); 2542 if (ret < 0) 2543 goto out; 2544 2545 ret = get_cur_path(sctx, ino, gen, p); 2546 if (ret < 0) 2547 goto out; 2548 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2549 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime); 2550 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime); 2551 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime); 2552 /* TODO Add otime support when the otime patches get into upstream */ 2553 2554 ret = send_cmd(sctx); 2555 2556 tlv_put_failure: 2557 out: 2558 fs_path_free(p); 2559 btrfs_free_path(path); 2560 return ret; 2561 } 2562 2563 /* 2564 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have 2565 * a valid path yet because we did not process the refs yet. So, the inode 2566 * is created as orphan. 2567 */ 2568 static int send_create_inode(struct send_ctx *sctx, u64 ino) 2569 { 2570 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 2571 int ret = 0; 2572 struct fs_path *p; 2573 int cmd; 2574 u64 gen; 2575 u64 mode; 2576 u64 rdev; 2577 2578 btrfs_debug(fs_info, "send_create_inode %llu", ino); 2579 2580 p = fs_path_alloc(); 2581 if (!p) 2582 return -ENOMEM; 2583 2584 if (ino != sctx->cur_ino) { 2585 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, 2586 NULL, NULL, &rdev); 2587 if (ret < 0) 2588 goto out; 2589 } else { 2590 gen = sctx->cur_inode_gen; 2591 mode = sctx->cur_inode_mode; 2592 rdev = sctx->cur_inode_rdev; 2593 } 2594 2595 if (S_ISREG(mode)) { 2596 cmd = BTRFS_SEND_C_MKFILE; 2597 } else if (S_ISDIR(mode)) { 2598 cmd = BTRFS_SEND_C_MKDIR; 2599 } else if (S_ISLNK(mode)) { 2600 cmd = BTRFS_SEND_C_SYMLINK; 2601 } else if (S_ISCHR(mode) || S_ISBLK(mode)) { 2602 cmd = BTRFS_SEND_C_MKNOD; 2603 } else if (S_ISFIFO(mode)) { 2604 cmd = BTRFS_SEND_C_MKFIFO; 2605 } else if (S_ISSOCK(mode)) { 2606 cmd = BTRFS_SEND_C_MKSOCK; 2607 } else { 2608 btrfs_warn(sctx->send_root->fs_info, "unexpected inode type %o", 2609 (int)(mode & S_IFMT)); 2610 ret = -EOPNOTSUPP; 2611 goto out; 2612 } 2613 2614 ret = begin_cmd(sctx, cmd); 2615 if (ret < 0) 2616 goto out; 2617 2618 ret = gen_unique_name(sctx, ino, gen, p); 2619 if (ret < 0) 2620 goto out; 2621 2622 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2623 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino); 2624 2625 if (S_ISLNK(mode)) { 2626 fs_path_reset(p); 2627 ret = read_symlink(sctx->send_root, ino, p); 2628 if (ret < 0) 2629 goto out; 2630 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); 2631 } else if (S_ISCHR(mode) || S_ISBLK(mode) || 2632 S_ISFIFO(mode) || S_ISSOCK(mode)) { 2633 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev)); 2634 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode); 2635 } 2636 2637 ret = send_cmd(sctx); 2638 if (ret < 0) 2639 goto out; 2640 2641 2642 tlv_put_failure: 2643 out: 2644 fs_path_free(p); 2645 return ret; 2646 } 2647 2648 /* 2649 * We need some special handling for inodes that get processed before the parent 2650 * directory got created. See process_recorded_refs for details. 2651 * This function does the check if we already created the dir out of order. 2652 */ 2653 static int did_create_dir(struct send_ctx *sctx, u64 dir) 2654 { 2655 int ret = 0; 2656 struct btrfs_path *path = NULL; 2657 struct btrfs_key key; 2658 struct btrfs_key found_key; 2659 struct btrfs_key di_key; 2660 struct extent_buffer *eb; 2661 struct btrfs_dir_item *di; 2662 int slot; 2663 2664 path = alloc_path_for_send(); 2665 if (!path) { 2666 ret = -ENOMEM; 2667 goto out; 2668 } 2669 2670 key.objectid = dir; 2671 key.type = BTRFS_DIR_INDEX_KEY; 2672 key.offset = 0; 2673 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); 2674 if (ret < 0) 2675 goto out; 2676 2677 while (1) { 2678 eb = path->nodes[0]; 2679 slot = path->slots[0]; 2680 if (slot >= btrfs_header_nritems(eb)) { 2681 ret = btrfs_next_leaf(sctx->send_root, path); 2682 if (ret < 0) { 2683 goto out; 2684 } else if (ret > 0) { 2685 ret = 0; 2686 break; 2687 } 2688 continue; 2689 } 2690 2691 btrfs_item_key_to_cpu(eb, &found_key, slot); 2692 if (found_key.objectid != key.objectid || 2693 found_key.type != key.type) { 2694 ret = 0; 2695 goto out; 2696 } 2697 2698 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 2699 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 2700 2701 if (di_key.type != BTRFS_ROOT_ITEM_KEY && 2702 di_key.objectid < sctx->send_progress) { 2703 ret = 1; 2704 goto out; 2705 } 2706 2707 path->slots[0]++; 2708 } 2709 2710 out: 2711 btrfs_free_path(path); 2712 return ret; 2713 } 2714 2715 /* 2716 * Only creates the inode if it is: 2717 * 1. Not a directory 2718 * 2. Or a directory which was not created already due to out of order 2719 * directories. See did_create_dir and process_recorded_refs for details. 2720 */ 2721 static int send_create_inode_if_needed(struct send_ctx *sctx) 2722 { 2723 int ret; 2724 2725 if (S_ISDIR(sctx->cur_inode_mode)) { 2726 ret = did_create_dir(sctx, sctx->cur_ino); 2727 if (ret < 0) 2728 goto out; 2729 if (ret) { 2730 ret = 0; 2731 goto out; 2732 } 2733 } 2734 2735 ret = send_create_inode(sctx, sctx->cur_ino); 2736 if (ret < 0) 2737 goto out; 2738 2739 out: 2740 return ret; 2741 } 2742 2743 struct recorded_ref { 2744 struct list_head list; 2745 char *name; 2746 struct fs_path *full_path; 2747 u64 dir; 2748 u64 dir_gen; 2749 int name_len; 2750 }; 2751 2752 static void set_ref_path(struct recorded_ref *ref, struct fs_path *path) 2753 { 2754 ref->full_path = path; 2755 ref->name = (char *)kbasename(ref->full_path->start); 2756 ref->name_len = ref->full_path->end - ref->name; 2757 } 2758 2759 /* 2760 * We need to process new refs before deleted refs, but compare_tree gives us 2761 * everything mixed. So we first record all refs and later process them. 2762 * This function is a helper to record one ref. 2763 */ 2764 static int __record_ref(struct list_head *head, u64 dir, 2765 u64 dir_gen, struct fs_path *path) 2766 { 2767 struct recorded_ref *ref; 2768 2769 ref = kmalloc(sizeof(*ref), GFP_KERNEL); 2770 if (!ref) 2771 return -ENOMEM; 2772 2773 ref->dir = dir; 2774 ref->dir_gen = dir_gen; 2775 set_ref_path(ref, path); 2776 list_add_tail(&ref->list, head); 2777 return 0; 2778 } 2779 2780 static int dup_ref(struct recorded_ref *ref, struct list_head *list) 2781 { 2782 struct recorded_ref *new; 2783 2784 new = kmalloc(sizeof(*ref), GFP_KERNEL); 2785 if (!new) 2786 return -ENOMEM; 2787 2788 new->dir = ref->dir; 2789 new->dir_gen = ref->dir_gen; 2790 new->full_path = NULL; 2791 INIT_LIST_HEAD(&new->list); 2792 list_add_tail(&new->list, list); 2793 return 0; 2794 } 2795 2796 static void __free_recorded_refs(struct list_head *head) 2797 { 2798 struct recorded_ref *cur; 2799 2800 while (!list_empty(head)) { 2801 cur = list_entry(head->next, struct recorded_ref, list); 2802 fs_path_free(cur->full_path); 2803 list_del(&cur->list); 2804 kfree(cur); 2805 } 2806 } 2807 2808 static void free_recorded_refs(struct send_ctx *sctx) 2809 { 2810 __free_recorded_refs(&sctx->new_refs); 2811 __free_recorded_refs(&sctx->deleted_refs); 2812 } 2813 2814 /* 2815 * Renames/moves a file/dir to its orphan name. Used when the first 2816 * ref of an unprocessed inode gets overwritten and for all non empty 2817 * directories. 2818 */ 2819 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, 2820 struct fs_path *path) 2821 { 2822 int ret; 2823 struct fs_path *orphan; 2824 2825 orphan = fs_path_alloc(); 2826 if (!orphan) 2827 return -ENOMEM; 2828 2829 ret = gen_unique_name(sctx, ino, gen, orphan); 2830 if (ret < 0) 2831 goto out; 2832 2833 ret = send_rename(sctx, path, orphan); 2834 2835 out: 2836 fs_path_free(orphan); 2837 return ret; 2838 } 2839 2840 static struct orphan_dir_info * 2841 add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) 2842 { 2843 struct rb_node **p = &sctx->orphan_dirs.rb_node; 2844 struct rb_node *parent = NULL; 2845 struct orphan_dir_info *entry, *odi; 2846 2847 odi = kmalloc(sizeof(*odi), GFP_KERNEL); 2848 if (!odi) 2849 return ERR_PTR(-ENOMEM); 2850 odi->ino = dir_ino; 2851 odi->gen = 0; 2852 2853 while (*p) { 2854 parent = *p; 2855 entry = rb_entry(parent, struct orphan_dir_info, node); 2856 if (dir_ino < entry->ino) { 2857 p = &(*p)->rb_left; 2858 } else if (dir_ino > entry->ino) { 2859 p = &(*p)->rb_right; 2860 } else { 2861 kfree(odi); 2862 return entry; 2863 } 2864 } 2865 2866 rb_link_node(&odi->node, parent, p); 2867 rb_insert_color(&odi->node, &sctx->orphan_dirs); 2868 return odi; 2869 } 2870 2871 static struct orphan_dir_info * 2872 get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino) 2873 { 2874 struct rb_node *n = sctx->orphan_dirs.rb_node; 2875 struct orphan_dir_info *entry; 2876 2877 while (n) { 2878 entry = rb_entry(n, struct orphan_dir_info, node); 2879 if (dir_ino < entry->ino) 2880 n = n->rb_left; 2881 else if (dir_ino > entry->ino) 2882 n = n->rb_right; 2883 else 2884 return entry; 2885 } 2886 return NULL; 2887 } 2888 2889 static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino) 2890 { 2891 struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino); 2892 2893 return odi != NULL; 2894 } 2895 2896 static void free_orphan_dir_info(struct send_ctx *sctx, 2897 struct orphan_dir_info *odi) 2898 { 2899 if (!odi) 2900 return; 2901 rb_erase(&odi->node, &sctx->orphan_dirs); 2902 kfree(odi); 2903 } 2904 2905 /* 2906 * Returns 1 if a directory can be removed at this point in time. 2907 * We check this by iterating all dir items and checking if the inode behind 2908 * the dir item was already processed. 2909 */ 2910 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen, 2911 u64 send_progress) 2912 { 2913 int ret = 0; 2914 struct btrfs_root *root = sctx->parent_root; 2915 struct btrfs_path *path; 2916 struct btrfs_key key; 2917 struct btrfs_key found_key; 2918 struct btrfs_key loc; 2919 struct btrfs_dir_item *di; 2920 2921 /* 2922 * Don't try to rmdir the top/root subvolume dir. 2923 */ 2924 if (dir == BTRFS_FIRST_FREE_OBJECTID) 2925 return 0; 2926 2927 path = alloc_path_for_send(); 2928 if (!path) 2929 return -ENOMEM; 2930 2931 key.objectid = dir; 2932 key.type = BTRFS_DIR_INDEX_KEY; 2933 key.offset = 0; 2934 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2935 if (ret < 0) 2936 goto out; 2937 2938 while (1) { 2939 struct waiting_dir_move *dm; 2940 2941 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 2942 ret = btrfs_next_leaf(root, path); 2943 if (ret < 0) 2944 goto out; 2945 else if (ret > 0) 2946 break; 2947 continue; 2948 } 2949 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2950 path->slots[0]); 2951 if (found_key.objectid != key.objectid || 2952 found_key.type != key.type) 2953 break; 2954 2955 di = btrfs_item_ptr(path->nodes[0], path->slots[0], 2956 struct btrfs_dir_item); 2957 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); 2958 2959 dm = get_waiting_dir_move(sctx, loc.objectid); 2960 if (dm) { 2961 struct orphan_dir_info *odi; 2962 2963 odi = add_orphan_dir_info(sctx, dir); 2964 if (IS_ERR(odi)) { 2965 ret = PTR_ERR(odi); 2966 goto out; 2967 } 2968 odi->gen = dir_gen; 2969 dm->rmdir_ino = dir; 2970 ret = 0; 2971 goto out; 2972 } 2973 2974 if (loc.objectid > send_progress) { 2975 struct orphan_dir_info *odi; 2976 2977 odi = get_orphan_dir_info(sctx, dir); 2978 free_orphan_dir_info(sctx, odi); 2979 ret = 0; 2980 goto out; 2981 } 2982 2983 path->slots[0]++; 2984 } 2985 2986 ret = 1; 2987 2988 out: 2989 btrfs_free_path(path); 2990 return ret; 2991 } 2992 2993 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino) 2994 { 2995 struct waiting_dir_move *entry = get_waiting_dir_move(sctx, ino); 2996 2997 return entry != NULL; 2998 } 2999 3000 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized) 3001 { 3002 struct rb_node **p = &sctx->waiting_dir_moves.rb_node; 3003 struct rb_node *parent = NULL; 3004 struct waiting_dir_move *entry, *dm; 3005 3006 dm = kmalloc(sizeof(*dm), GFP_KERNEL); 3007 if (!dm) 3008 return -ENOMEM; 3009 dm->ino = ino; 3010 dm->rmdir_ino = 0; 3011 dm->orphanized = orphanized; 3012 3013 while (*p) { 3014 parent = *p; 3015 entry = rb_entry(parent, struct waiting_dir_move, node); 3016 if (ino < entry->ino) { 3017 p = &(*p)->rb_left; 3018 } else if (ino > entry->ino) { 3019 p = &(*p)->rb_right; 3020 } else { 3021 kfree(dm); 3022 return -EEXIST; 3023 } 3024 } 3025 3026 rb_link_node(&dm->node, parent, p); 3027 rb_insert_color(&dm->node, &sctx->waiting_dir_moves); 3028 return 0; 3029 } 3030 3031 static struct waiting_dir_move * 3032 get_waiting_dir_move(struct send_ctx *sctx, u64 ino) 3033 { 3034 struct rb_node *n = sctx->waiting_dir_moves.rb_node; 3035 struct waiting_dir_move *entry; 3036 3037 while (n) { 3038 entry = rb_entry(n, struct waiting_dir_move, node); 3039 if (ino < entry->ino) 3040 n = n->rb_left; 3041 else if (ino > entry->ino) 3042 n = n->rb_right; 3043 else 3044 return entry; 3045 } 3046 return NULL; 3047 } 3048 3049 static void free_waiting_dir_move(struct send_ctx *sctx, 3050 struct waiting_dir_move *dm) 3051 { 3052 if (!dm) 3053 return; 3054 rb_erase(&dm->node, &sctx->waiting_dir_moves); 3055 kfree(dm); 3056 } 3057 3058 static int add_pending_dir_move(struct send_ctx *sctx, 3059 u64 ino, 3060 u64 ino_gen, 3061 u64 parent_ino, 3062 struct list_head *new_refs, 3063 struct list_head *deleted_refs, 3064 const bool is_orphan) 3065 { 3066 struct rb_node **p = &sctx->pending_dir_moves.rb_node; 3067 struct rb_node *parent = NULL; 3068 struct pending_dir_move *entry = NULL, *pm; 3069 struct recorded_ref *cur; 3070 int exists = 0; 3071 int ret; 3072 3073 pm = kmalloc(sizeof(*pm), GFP_KERNEL); 3074 if (!pm) 3075 return -ENOMEM; 3076 pm->parent_ino = parent_ino; 3077 pm->ino = ino; 3078 pm->gen = ino_gen; 3079 INIT_LIST_HEAD(&pm->list); 3080 INIT_LIST_HEAD(&pm->update_refs); 3081 RB_CLEAR_NODE(&pm->node); 3082 3083 while (*p) { 3084 parent = *p; 3085 entry = rb_entry(parent, struct pending_dir_move, node); 3086 if (parent_ino < entry->parent_ino) { 3087 p = &(*p)->rb_left; 3088 } else if (parent_ino > entry->parent_ino) { 3089 p = &(*p)->rb_right; 3090 } else { 3091 exists = 1; 3092 break; 3093 } 3094 } 3095 3096 list_for_each_entry(cur, deleted_refs, list) { 3097 ret = dup_ref(cur, &pm->update_refs); 3098 if (ret < 0) 3099 goto out; 3100 } 3101 list_for_each_entry(cur, new_refs, list) { 3102 ret = dup_ref(cur, &pm->update_refs); 3103 if (ret < 0) 3104 goto out; 3105 } 3106 3107 ret = add_waiting_dir_move(sctx, pm->ino, is_orphan); 3108 if (ret) 3109 goto out; 3110 3111 if (exists) { 3112 list_add_tail(&pm->list, &entry->list); 3113 } else { 3114 rb_link_node(&pm->node, parent, p); 3115 rb_insert_color(&pm->node, &sctx->pending_dir_moves); 3116 } 3117 ret = 0; 3118 out: 3119 if (ret) { 3120 __free_recorded_refs(&pm->update_refs); 3121 kfree(pm); 3122 } 3123 return ret; 3124 } 3125 3126 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx, 3127 u64 parent_ino) 3128 { 3129 struct rb_node *n = sctx->pending_dir_moves.rb_node; 3130 struct pending_dir_move *entry; 3131 3132 while (n) { 3133 entry = rb_entry(n, struct pending_dir_move, node); 3134 if (parent_ino < entry->parent_ino) 3135 n = n->rb_left; 3136 else if (parent_ino > entry->parent_ino) 3137 n = n->rb_right; 3138 else 3139 return entry; 3140 } 3141 return NULL; 3142 } 3143 3144 static int path_loop(struct send_ctx *sctx, struct fs_path *name, 3145 u64 ino, u64 gen, u64 *ancestor_ino) 3146 { 3147 int ret = 0; 3148 u64 parent_inode = 0; 3149 u64 parent_gen = 0; 3150 u64 start_ino = ino; 3151 3152 *ancestor_ino = 0; 3153 while (ino != BTRFS_FIRST_FREE_OBJECTID) { 3154 fs_path_reset(name); 3155 3156 if (is_waiting_for_rm(sctx, ino)) 3157 break; 3158 if (is_waiting_for_move(sctx, ino)) { 3159 if (*ancestor_ino == 0) 3160 *ancestor_ino = ino; 3161 ret = get_first_ref(sctx->parent_root, ino, 3162 &parent_inode, &parent_gen, name); 3163 } else { 3164 ret = __get_cur_name_and_parent(sctx, ino, gen, 3165 &parent_inode, 3166 &parent_gen, name); 3167 if (ret > 0) { 3168 ret = 0; 3169 break; 3170 } 3171 } 3172 if (ret < 0) 3173 break; 3174 if (parent_inode == start_ino) { 3175 ret = 1; 3176 if (*ancestor_ino == 0) 3177 *ancestor_ino = ino; 3178 break; 3179 } 3180 ino = parent_inode; 3181 gen = parent_gen; 3182 } 3183 return ret; 3184 } 3185 3186 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) 3187 { 3188 struct fs_path *from_path = NULL; 3189 struct fs_path *to_path = NULL; 3190 struct fs_path *name = NULL; 3191 u64 orig_progress = sctx->send_progress; 3192 struct recorded_ref *cur; 3193 u64 parent_ino, parent_gen; 3194 struct waiting_dir_move *dm = NULL; 3195 u64 rmdir_ino = 0; 3196 u64 ancestor; 3197 bool is_orphan; 3198 int ret; 3199 3200 name = fs_path_alloc(); 3201 from_path = fs_path_alloc(); 3202 if (!name || !from_path) { 3203 ret = -ENOMEM; 3204 goto out; 3205 } 3206 3207 dm = get_waiting_dir_move(sctx, pm->ino); 3208 ASSERT(dm); 3209 rmdir_ino = dm->rmdir_ino; 3210 is_orphan = dm->orphanized; 3211 free_waiting_dir_move(sctx, dm); 3212 3213 if (is_orphan) { 3214 ret = gen_unique_name(sctx, pm->ino, 3215 pm->gen, from_path); 3216 } else { 3217 ret = get_first_ref(sctx->parent_root, pm->ino, 3218 &parent_ino, &parent_gen, name); 3219 if (ret < 0) 3220 goto out; 3221 ret = get_cur_path(sctx, parent_ino, parent_gen, 3222 from_path); 3223 if (ret < 0) 3224 goto out; 3225 ret = fs_path_add_path(from_path, name); 3226 } 3227 if (ret < 0) 3228 goto out; 3229 3230 sctx->send_progress = sctx->cur_ino + 1; 3231 ret = path_loop(sctx, name, pm->ino, pm->gen, &ancestor); 3232 if (ret < 0) 3233 goto out; 3234 if (ret) { 3235 LIST_HEAD(deleted_refs); 3236 ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); 3237 ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, 3238 &pm->update_refs, &deleted_refs, 3239 is_orphan); 3240 if (ret < 0) 3241 goto out; 3242 if (rmdir_ino) { 3243 dm = get_waiting_dir_move(sctx, pm->ino); 3244 ASSERT(dm); 3245 dm->rmdir_ino = rmdir_ino; 3246 } 3247 goto out; 3248 } 3249 fs_path_reset(name); 3250 to_path = name; 3251 name = NULL; 3252 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); 3253 if (ret < 0) 3254 goto out; 3255 3256 ret = send_rename(sctx, from_path, to_path); 3257 if (ret < 0) 3258 goto out; 3259 3260 if (rmdir_ino) { 3261 struct orphan_dir_info *odi; 3262 3263 odi = get_orphan_dir_info(sctx, rmdir_ino); 3264 if (!odi) { 3265 /* already deleted */ 3266 goto finish; 3267 } 3268 ret = can_rmdir(sctx, rmdir_ino, odi->gen, sctx->cur_ino); 3269 if (ret < 0) 3270 goto out; 3271 if (!ret) 3272 goto finish; 3273 3274 name = fs_path_alloc(); 3275 if (!name) { 3276 ret = -ENOMEM; 3277 goto out; 3278 } 3279 ret = get_cur_path(sctx, rmdir_ino, odi->gen, name); 3280 if (ret < 0) 3281 goto out; 3282 ret = send_rmdir(sctx, name); 3283 if (ret < 0) 3284 goto out; 3285 free_orphan_dir_info(sctx, odi); 3286 } 3287 3288 finish: 3289 ret = send_utimes(sctx, pm->ino, pm->gen); 3290 if (ret < 0) 3291 goto out; 3292 3293 /* 3294 * After rename/move, need to update the utimes of both new parent(s) 3295 * and old parent(s). 3296 */ 3297 list_for_each_entry(cur, &pm->update_refs, list) { 3298 /* 3299 * The parent inode might have been deleted in the send snapshot 3300 */ 3301 ret = get_inode_info(sctx->send_root, cur->dir, NULL, 3302 NULL, NULL, NULL, NULL, NULL); 3303 if (ret == -ENOENT) { 3304 ret = 0; 3305 continue; 3306 } 3307 if (ret < 0) 3308 goto out; 3309 3310 ret = send_utimes(sctx, cur->dir, cur->dir_gen); 3311 if (ret < 0) 3312 goto out; 3313 } 3314 3315 out: 3316 fs_path_free(name); 3317 fs_path_free(from_path); 3318 fs_path_free(to_path); 3319 sctx->send_progress = orig_progress; 3320 3321 return ret; 3322 } 3323 3324 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m) 3325 { 3326 if (!list_empty(&m->list)) 3327 list_del(&m->list); 3328 if (!RB_EMPTY_NODE(&m->node)) 3329 rb_erase(&m->node, &sctx->pending_dir_moves); 3330 __free_recorded_refs(&m->update_refs); 3331 kfree(m); 3332 } 3333 3334 static void tail_append_pending_moves(struct pending_dir_move *moves, 3335 struct list_head *stack) 3336 { 3337 if (list_empty(&moves->list)) { 3338 list_add_tail(&moves->list, stack); 3339 } else { 3340 LIST_HEAD(list); 3341 list_splice_init(&moves->list, &list); 3342 list_add_tail(&moves->list, stack); 3343 list_splice_tail(&list, stack); 3344 } 3345 } 3346 3347 static int apply_children_dir_moves(struct send_ctx *sctx) 3348 { 3349 struct pending_dir_move *pm; 3350 struct list_head stack; 3351 u64 parent_ino = sctx->cur_ino; 3352 int ret = 0; 3353 3354 pm = get_pending_dir_moves(sctx, parent_ino); 3355 if (!pm) 3356 return 0; 3357 3358 INIT_LIST_HEAD(&stack); 3359 tail_append_pending_moves(pm, &stack); 3360 3361 while (!list_empty(&stack)) { 3362 pm = list_first_entry(&stack, struct pending_dir_move, list); 3363 parent_ino = pm->ino; 3364 ret = apply_dir_move(sctx, pm); 3365 free_pending_move(sctx, pm); 3366 if (ret) 3367 goto out; 3368 pm = get_pending_dir_moves(sctx, parent_ino); 3369 if (pm) 3370 tail_append_pending_moves(pm, &stack); 3371 } 3372 return 0; 3373 3374 out: 3375 while (!list_empty(&stack)) { 3376 pm = list_first_entry(&stack, struct pending_dir_move, list); 3377 free_pending_move(sctx, pm); 3378 } 3379 return ret; 3380 } 3381 3382 /* 3383 * We might need to delay a directory rename even when no ancestor directory 3384 * (in the send root) with a higher inode number than ours (sctx->cur_ino) was 3385 * renamed. This happens when we rename a directory to the old name (the name 3386 * in the parent root) of some other unrelated directory that got its rename 3387 * delayed due to some ancestor with higher number that got renamed. 3388 * 3389 * Example: 3390 * 3391 * Parent snapshot: 3392 * . (ino 256) 3393 * |---- a/ (ino 257) 3394 * | |---- file (ino 260) 3395 * | 3396 * |---- b/ (ino 258) 3397 * |---- c/ (ino 259) 3398 * 3399 * Send snapshot: 3400 * . (ino 256) 3401 * |---- a/ (ino 258) 3402 * |---- x/ (ino 259) 3403 * |---- y/ (ino 257) 3404 * |----- file (ino 260) 3405 * 3406 * Here we can not rename 258 from 'b' to 'a' without the rename of inode 257 3407 * from 'a' to 'x/y' happening first, which in turn depends on the rename of 3408 * inode 259 from 'c' to 'x'. So the order of rename commands the send stream 3409 * must issue is: 3410 * 3411 * 1 - rename 259 from 'c' to 'x' 3412 * 2 - rename 257 from 'a' to 'x/y' 3413 * 3 - rename 258 from 'b' to 'a' 3414 * 3415 * Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can 3416 * be done right away and < 0 on error. 3417 */ 3418 static int wait_for_dest_dir_move(struct send_ctx *sctx, 3419 struct recorded_ref *parent_ref, 3420 const bool is_orphan) 3421 { 3422 struct btrfs_fs_info *fs_info = sctx->parent_root->fs_info; 3423 struct btrfs_path *path; 3424 struct btrfs_key key; 3425 struct btrfs_key di_key; 3426 struct btrfs_dir_item *di; 3427 u64 left_gen; 3428 u64 right_gen; 3429 int ret = 0; 3430 struct waiting_dir_move *wdm; 3431 3432 if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) 3433 return 0; 3434 3435 path = alloc_path_for_send(); 3436 if (!path) 3437 return -ENOMEM; 3438 3439 key.objectid = parent_ref->dir; 3440 key.type = BTRFS_DIR_ITEM_KEY; 3441 key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len); 3442 3443 ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0); 3444 if (ret < 0) { 3445 goto out; 3446 } else if (ret > 0) { 3447 ret = 0; 3448 goto out; 3449 } 3450 3451 di = btrfs_match_dir_item_name(fs_info, path, parent_ref->name, 3452 parent_ref->name_len); 3453 if (!di) { 3454 ret = 0; 3455 goto out; 3456 } 3457 /* 3458 * di_key.objectid has the number of the inode that has a dentry in the 3459 * parent directory with the same name that sctx->cur_ino is being 3460 * renamed to. We need to check if that inode is in the send root as 3461 * well and if it is currently marked as an inode with a pending rename, 3462 * if it is, we need to delay the rename of sctx->cur_ino as well, so 3463 * that it happens after that other inode is renamed. 3464 */ 3465 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key); 3466 if (di_key.type != BTRFS_INODE_ITEM_KEY) { 3467 ret = 0; 3468 goto out; 3469 } 3470 3471 ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL, 3472 &left_gen, NULL, NULL, NULL, NULL); 3473 if (ret < 0) 3474 goto out; 3475 ret = get_inode_info(sctx->send_root, di_key.objectid, NULL, 3476 &right_gen, NULL, NULL, NULL, NULL); 3477 if (ret < 0) { 3478 if (ret == -ENOENT) 3479 ret = 0; 3480 goto out; 3481 } 3482 3483 /* Different inode, no need to delay the rename of sctx->cur_ino */ 3484 if (right_gen != left_gen) { 3485 ret = 0; 3486 goto out; 3487 } 3488 3489 wdm = get_waiting_dir_move(sctx, di_key.objectid); 3490 if (wdm && !wdm->orphanized) { 3491 ret = add_pending_dir_move(sctx, 3492 sctx->cur_ino, 3493 sctx->cur_inode_gen, 3494 di_key.objectid, 3495 &sctx->new_refs, 3496 &sctx->deleted_refs, 3497 is_orphan); 3498 if (!ret) 3499 ret = 1; 3500 } 3501 out: 3502 btrfs_free_path(path); 3503 return ret; 3504 } 3505 3506 /* 3507 * Check if inode ino2, or any of its ancestors, is inode ino1. 3508 * Return 1 if true, 0 if false and < 0 on error. 3509 */ 3510 static int check_ino_in_path(struct btrfs_root *root, 3511 const u64 ino1, 3512 const u64 ino1_gen, 3513 const u64 ino2, 3514 const u64 ino2_gen, 3515 struct fs_path *fs_path) 3516 { 3517 u64 ino = ino2; 3518 3519 if (ino1 == ino2) 3520 return ino1_gen == ino2_gen; 3521 3522 while (ino > BTRFS_FIRST_FREE_OBJECTID) { 3523 u64 parent; 3524 u64 parent_gen; 3525 int ret; 3526 3527 fs_path_reset(fs_path); 3528 ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path); 3529 if (ret < 0) 3530 return ret; 3531 if (parent == ino1) 3532 return parent_gen == ino1_gen; 3533 ino = parent; 3534 } 3535 return 0; 3536 } 3537 3538 /* 3539 * Check if ino ino1 is an ancestor of inode ino2 in the given root for any 3540 * possible path (in case ino2 is not a directory and has multiple hard links). 3541 * Return 1 if true, 0 if false and < 0 on error. 3542 */ 3543 static int is_ancestor(struct btrfs_root *root, 3544 const u64 ino1, 3545 const u64 ino1_gen, 3546 const u64 ino2, 3547 struct fs_path *fs_path) 3548 { 3549 bool free_fs_path = false; 3550 int ret = 0; 3551 struct btrfs_path *path = NULL; 3552 struct btrfs_key key; 3553 3554 if (!fs_path) { 3555 fs_path = fs_path_alloc(); 3556 if (!fs_path) 3557 return -ENOMEM; 3558 free_fs_path = true; 3559 } 3560 3561 path = alloc_path_for_send(); 3562 if (!path) { 3563 ret = -ENOMEM; 3564 goto out; 3565 } 3566 3567 key.objectid = ino2; 3568 key.type = BTRFS_INODE_REF_KEY; 3569 key.offset = 0; 3570 3571 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 3572 if (ret < 0) 3573 goto out; 3574 3575 while (true) { 3576 struct extent_buffer *leaf = path->nodes[0]; 3577 int slot = path->slots[0]; 3578 u32 cur_offset = 0; 3579 u32 item_size; 3580 3581 if (slot >= btrfs_header_nritems(leaf)) { 3582 ret = btrfs_next_leaf(root, path); 3583 if (ret < 0) 3584 goto out; 3585 if (ret > 0) 3586 break; 3587 continue; 3588 } 3589 3590 btrfs_item_key_to_cpu(leaf, &key, slot); 3591 if (key.objectid != ino2) 3592 break; 3593 if (key.type != BTRFS_INODE_REF_KEY && 3594 key.type != BTRFS_INODE_EXTREF_KEY) 3595 break; 3596 3597 item_size = btrfs_item_size_nr(leaf, slot); 3598 while (cur_offset < item_size) { 3599 u64 parent; 3600 u64 parent_gen; 3601 3602 if (key.type == BTRFS_INODE_EXTREF_KEY) { 3603 unsigned long ptr; 3604 struct btrfs_inode_extref *extref; 3605 3606 ptr = btrfs_item_ptr_offset(leaf, slot); 3607 extref = (struct btrfs_inode_extref *) 3608 (ptr + cur_offset); 3609 parent = btrfs_inode_extref_parent(leaf, 3610 extref); 3611 cur_offset += sizeof(*extref); 3612 cur_offset += btrfs_inode_extref_name_len(leaf, 3613 extref); 3614 } else { 3615 parent = key.offset; 3616 cur_offset = item_size; 3617 } 3618 3619 ret = get_inode_info(root, parent, NULL, &parent_gen, 3620 NULL, NULL, NULL, NULL); 3621 if (ret < 0) 3622 goto out; 3623 ret = check_ino_in_path(root, ino1, ino1_gen, 3624 parent, parent_gen, fs_path); 3625 if (ret) 3626 goto out; 3627 } 3628 path->slots[0]++; 3629 } 3630 ret = 0; 3631 out: 3632 btrfs_free_path(path); 3633 if (free_fs_path) 3634 fs_path_free(fs_path); 3635 return ret; 3636 } 3637 3638 static int wait_for_parent_move(struct send_ctx *sctx, 3639 struct recorded_ref *parent_ref, 3640 const bool is_orphan) 3641 { 3642 int ret = 0; 3643 u64 ino = parent_ref->dir; 3644 u64 ino_gen = parent_ref->dir_gen; 3645 u64 parent_ino_before, parent_ino_after; 3646 struct fs_path *path_before = NULL; 3647 struct fs_path *path_after = NULL; 3648 int len1, len2; 3649 3650 path_after = fs_path_alloc(); 3651 path_before = fs_path_alloc(); 3652 if (!path_after || !path_before) { 3653 ret = -ENOMEM; 3654 goto out; 3655 } 3656 3657 /* 3658 * Our current directory inode may not yet be renamed/moved because some 3659 * ancestor (immediate or not) has to be renamed/moved first. So find if 3660 * such ancestor exists and make sure our own rename/move happens after 3661 * that ancestor is processed to avoid path build infinite loops (done 3662 * at get_cur_path()). 3663 */ 3664 while (ino > BTRFS_FIRST_FREE_OBJECTID) { 3665 u64 parent_ino_after_gen; 3666 3667 if (is_waiting_for_move(sctx, ino)) { 3668 /* 3669 * If the current inode is an ancestor of ino in the 3670 * parent root, we need to delay the rename of the 3671 * current inode, otherwise don't delayed the rename 3672 * because we can end up with a circular dependency 3673 * of renames, resulting in some directories never 3674 * getting the respective rename operations issued in 3675 * the send stream or getting into infinite path build 3676 * loops. 3677 */ 3678 ret = is_ancestor(sctx->parent_root, 3679 sctx->cur_ino, sctx->cur_inode_gen, 3680 ino, path_before); 3681 if (ret) 3682 break; 3683 } 3684 3685 fs_path_reset(path_before); 3686 fs_path_reset(path_after); 3687 3688 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, 3689 &parent_ino_after_gen, path_after); 3690 if (ret < 0) 3691 goto out; 3692 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, 3693 NULL, path_before); 3694 if (ret < 0 && ret != -ENOENT) { 3695 goto out; 3696 } else if (ret == -ENOENT) { 3697 ret = 0; 3698 break; 3699 } 3700 3701 len1 = fs_path_len(path_before); 3702 len2 = fs_path_len(path_after); 3703 if (ino > sctx->cur_ino && 3704 (parent_ino_before != parent_ino_after || len1 != len2 || 3705 memcmp(path_before->start, path_after->start, len1))) { 3706 u64 parent_ino_gen; 3707 3708 ret = get_inode_info(sctx->parent_root, ino, NULL, 3709 &parent_ino_gen, NULL, NULL, NULL, 3710 NULL); 3711 if (ret < 0) 3712 goto out; 3713 if (ino_gen == parent_ino_gen) { 3714 ret = 1; 3715 break; 3716 } 3717 } 3718 ino = parent_ino_after; 3719 ino_gen = parent_ino_after_gen; 3720 } 3721 3722 out: 3723 fs_path_free(path_before); 3724 fs_path_free(path_after); 3725 3726 if (ret == 1) { 3727 ret = add_pending_dir_move(sctx, 3728 sctx->cur_ino, 3729 sctx->cur_inode_gen, 3730 ino, 3731 &sctx->new_refs, 3732 &sctx->deleted_refs, 3733 is_orphan); 3734 if (!ret) 3735 ret = 1; 3736 } 3737 3738 return ret; 3739 } 3740 3741 static int update_ref_path(struct send_ctx *sctx, struct recorded_ref *ref) 3742 { 3743 int ret; 3744 struct fs_path *new_path; 3745 3746 /* 3747 * Our reference's name member points to its full_path member string, so 3748 * we use here a new path. 3749 */ 3750 new_path = fs_path_alloc(); 3751 if (!new_path) 3752 return -ENOMEM; 3753 3754 ret = get_cur_path(sctx, ref->dir, ref->dir_gen, new_path); 3755 if (ret < 0) { 3756 fs_path_free(new_path); 3757 return ret; 3758 } 3759 ret = fs_path_add(new_path, ref->name, ref->name_len); 3760 if (ret < 0) { 3761 fs_path_free(new_path); 3762 return ret; 3763 } 3764 3765 fs_path_free(ref->full_path); 3766 set_ref_path(ref, new_path); 3767 3768 return 0; 3769 } 3770 3771 /* 3772 * This does all the move/link/unlink/rmdir magic. 3773 */ 3774 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) 3775 { 3776 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 3777 int ret = 0; 3778 struct recorded_ref *cur; 3779 struct recorded_ref *cur2; 3780 struct list_head check_dirs; 3781 struct fs_path *valid_path = NULL; 3782 u64 ow_inode = 0; 3783 u64 ow_gen; 3784 u64 ow_mode; 3785 int did_overwrite = 0; 3786 int is_orphan = 0; 3787 u64 last_dir_ino_rm = 0; 3788 bool can_rename = true; 3789 bool orphanized_dir = false; 3790 bool orphanized_ancestor = false; 3791 3792 btrfs_debug(fs_info, "process_recorded_refs %llu", sctx->cur_ino); 3793 3794 /* 3795 * This should never happen as the root dir always has the same ref 3796 * which is always '..' 3797 */ 3798 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID); 3799 INIT_LIST_HEAD(&check_dirs); 3800 3801 valid_path = fs_path_alloc(); 3802 if (!valid_path) { 3803 ret = -ENOMEM; 3804 goto out; 3805 } 3806 3807 /* 3808 * First, check if the first ref of the current inode was overwritten 3809 * before. If yes, we know that the current inode was already orphanized 3810 * and thus use the orphan name. If not, we can use get_cur_path to 3811 * get the path of the first ref as it would like while receiving at 3812 * this point in time. 3813 * New inodes are always orphan at the beginning, so force to use the 3814 * orphan name in this case. 3815 * The first ref is stored in valid_path and will be updated if it 3816 * gets moved around. 3817 */ 3818 if (!sctx->cur_inode_new) { 3819 ret = did_overwrite_first_ref(sctx, sctx->cur_ino, 3820 sctx->cur_inode_gen); 3821 if (ret < 0) 3822 goto out; 3823 if (ret) 3824 did_overwrite = 1; 3825 } 3826 if (sctx->cur_inode_new || did_overwrite) { 3827 ret = gen_unique_name(sctx, sctx->cur_ino, 3828 sctx->cur_inode_gen, valid_path); 3829 if (ret < 0) 3830 goto out; 3831 is_orphan = 1; 3832 } else { 3833 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, 3834 valid_path); 3835 if (ret < 0) 3836 goto out; 3837 } 3838 3839 list_for_each_entry(cur, &sctx->new_refs, list) { 3840 /* 3841 * We may have refs where the parent directory does not exist 3842 * yet. This happens if the parent directories inum is higher 3843 * the the current inum. To handle this case, we create the 3844 * parent directory out of order. But we need to check if this 3845 * did already happen before due to other refs in the same dir. 3846 */ 3847 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); 3848 if (ret < 0) 3849 goto out; 3850 if (ret == inode_state_will_create) { 3851 ret = 0; 3852 /* 3853 * First check if any of the current inodes refs did 3854 * already create the dir. 3855 */ 3856 list_for_each_entry(cur2, &sctx->new_refs, list) { 3857 if (cur == cur2) 3858 break; 3859 if (cur2->dir == cur->dir) { 3860 ret = 1; 3861 break; 3862 } 3863 } 3864 3865 /* 3866 * If that did not happen, check if a previous inode 3867 * did already create the dir. 3868 */ 3869 if (!ret) 3870 ret = did_create_dir(sctx, cur->dir); 3871 if (ret < 0) 3872 goto out; 3873 if (!ret) { 3874 ret = send_create_inode(sctx, cur->dir); 3875 if (ret < 0) 3876 goto out; 3877 } 3878 } 3879 3880 /* 3881 * Check if this new ref would overwrite the first ref of 3882 * another unprocessed inode. If yes, orphanize the 3883 * overwritten inode. If we find an overwritten ref that is 3884 * not the first ref, simply unlink it. 3885 */ 3886 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, 3887 cur->name, cur->name_len, 3888 &ow_inode, &ow_gen, &ow_mode); 3889 if (ret < 0) 3890 goto out; 3891 if (ret) { 3892 ret = is_first_ref(sctx->parent_root, 3893 ow_inode, cur->dir, cur->name, 3894 cur->name_len); 3895 if (ret < 0) 3896 goto out; 3897 if (ret) { 3898 struct name_cache_entry *nce; 3899 struct waiting_dir_move *wdm; 3900 3901 ret = orphanize_inode(sctx, ow_inode, ow_gen, 3902 cur->full_path); 3903 if (ret < 0) 3904 goto out; 3905 if (S_ISDIR(ow_mode)) 3906 orphanized_dir = true; 3907 3908 /* 3909 * If ow_inode has its rename operation delayed 3910 * make sure that its orphanized name is used in 3911 * the source path when performing its rename 3912 * operation. 3913 */ 3914 if (is_waiting_for_move(sctx, ow_inode)) { 3915 wdm = get_waiting_dir_move(sctx, 3916 ow_inode); 3917 ASSERT(wdm); 3918 wdm->orphanized = true; 3919 } 3920 3921 /* 3922 * Make sure we clear our orphanized inode's 3923 * name from the name cache. This is because the 3924 * inode ow_inode might be an ancestor of some 3925 * other inode that will be orphanized as well 3926 * later and has an inode number greater than 3927 * sctx->send_progress. We need to prevent 3928 * future name lookups from using the old name 3929 * and get instead the orphan name. 3930 */ 3931 nce = name_cache_search(sctx, ow_inode, ow_gen); 3932 if (nce) { 3933 name_cache_delete(sctx, nce); 3934 kfree(nce); 3935 } 3936 3937 /* 3938 * ow_inode might currently be an ancestor of 3939 * cur_ino, therefore compute valid_path (the 3940 * current path of cur_ino) again because it 3941 * might contain the pre-orphanization name of 3942 * ow_inode, which is no longer valid. 3943 */ 3944 ret = is_ancestor(sctx->parent_root, 3945 ow_inode, ow_gen, 3946 sctx->cur_ino, NULL); 3947 if (ret > 0) { 3948 orphanized_ancestor = true; 3949 fs_path_reset(valid_path); 3950 ret = get_cur_path(sctx, sctx->cur_ino, 3951 sctx->cur_inode_gen, 3952 valid_path); 3953 } 3954 if (ret < 0) 3955 goto out; 3956 } else { 3957 ret = send_unlink(sctx, cur->full_path); 3958 if (ret < 0) 3959 goto out; 3960 } 3961 } 3962 3963 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) { 3964 ret = wait_for_dest_dir_move(sctx, cur, is_orphan); 3965 if (ret < 0) 3966 goto out; 3967 if (ret == 1) { 3968 can_rename = false; 3969 *pending_move = 1; 3970 } 3971 } 3972 3973 if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root && 3974 can_rename) { 3975 ret = wait_for_parent_move(sctx, cur, is_orphan); 3976 if (ret < 0) 3977 goto out; 3978 if (ret == 1) { 3979 can_rename = false; 3980 *pending_move = 1; 3981 } 3982 } 3983 3984 /* 3985 * link/move the ref to the new place. If we have an orphan 3986 * inode, move it and update valid_path. If not, link or move 3987 * it depending on the inode mode. 3988 */ 3989 if (is_orphan && can_rename) { 3990 ret = send_rename(sctx, valid_path, cur->full_path); 3991 if (ret < 0) 3992 goto out; 3993 is_orphan = 0; 3994 ret = fs_path_copy(valid_path, cur->full_path); 3995 if (ret < 0) 3996 goto out; 3997 } else if (can_rename) { 3998 if (S_ISDIR(sctx->cur_inode_mode)) { 3999 /* 4000 * Dirs can't be linked, so move it. For moved 4001 * dirs, we always have one new and one deleted 4002 * ref. The deleted ref is ignored later. 4003 */ 4004 ret = send_rename(sctx, valid_path, 4005 cur->full_path); 4006 if (!ret) 4007 ret = fs_path_copy(valid_path, 4008 cur->full_path); 4009 if (ret < 0) 4010 goto out; 4011 } else { 4012 /* 4013 * We might have previously orphanized an inode 4014 * which is an ancestor of our current inode, 4015 * so our reference's full path, which was 4016 * computed before any such orphanizations, must 4017 * be updated. 4018 */ 4019 if (orphanized_dir) { 4020 ret = update_ref_path(sctx, cur); 4021 if (ret < 0) 4022 goto out; 4023 } 4024 ret = send_link(sctx, cur->full_path, 4025 valid_path); 4026 if (ret < 0) 4027 goto out; 4028 } 4029 } 4030 ret = dup_ref(cur, &check_dirs); 4031 if (ret < 0) 4032 goto out; 4033 } 4034 4035 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) { 4036 /* 4037 * Check if we can already rmdir the directory. If not, 4038 * orphanize it. For every dir item inside that gets deleted 4039 * later, we do this check again and rmdir it then if possible. 4040 * See the use of check_dirs for more details. 4041 */ 4042 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_inode_gen, 4043 sctx->cur_ino); 4044 if (ret < 0) 4045 goto out; 4046 if (ret) { 4047 ret = send_rmdir(sctx, valid_path); 4048 if (ret < 0) 4049 goto out; 4050 } else if (!is_orphan) { 4051 ret = orphanize_inode(sctx, sctx->cur_ino, 4052 sctx->cur_inode_gen, valid_path); 4053 if (ret < 0) 4054 goto out; 4055 is_orphan = 1; 4056 } 4057 4058 list_for_each_entry(cur, &sctx->deleted_refs, list) { 4059 ret = dup_ref(cur, &check_dirs); 4060 if (ret < 0) 4061 goto out; 4062 } 4063 } else if (S_ISDIR(sctx->cur_inode_mode) && 4064 !list_empty(&sctx->deleted_refs)) { 4065 /* 4066 * We have a moved dir. Add the old parent to check_dirs 4067 */ 4068 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, 4069 list); 4070 ret = dup_ref(cur, &check_dirs); 4071 if (ret < 0) 4072 goto out; 4073 } else if (!S_ISDIR(sctx->cur_inode_mode)) { 4074 /* 4075 * We have a non dir inode. Go through all deleted refs and 4076 * unlink them if they were not already overwritten by other 4077 * inodes. 4078 */ 4079 list_for_each_entry(cur, &sctx->deleted_refs, list) { 4080 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen, 4081 sctx->cur_ino, sctx->cur_inode_gen, 4082 cur->name, cur->name_len); 4083 if (ret < 0) 4084 goto out; 4085 if (!ret) { 4086 /* 4087 * If we orphanized any ancestor before, we need 4088 * to recompute the full path for deleted names, 4089 * since any such path was computed before we 4090 * processed any references and orphanized any 4091 * ancestor inode. 4092 */ 4093 if (orphanized_ancestor) { 4094 ret = update_ref_path(sctx, cur); 4095 if (ret < 0) 4096 goto out; 4097 } 4098 ret = send_unlink(sctx, cur->full_path); 4099 if (ret < 0) 4100 goto out; 4101 } 4102 ret = dup_ref(cur, &check_dirs); 4103 if (ret < 0) 4104 goto out; 4105 } 4106 /* 4107 * If the inode is still orphan, unlink the orphan. This may 4108 * happen when a previous inode did overwrite the first ref 4109 * of this inode and no new refs were added for the current 4110 * inode. Unlinking does not mean that the inode is deleted in 4111 * all cases. There may still be links to this inode in other 4112 * places. 4113 */ 4114 if (is_orphan) { 4115 ret = send_unlink(sctx, valid_path); 4116 if (ret < 0) 4117 goto out; 4118 } 4119 } 4120 4121 /* 4122 * We did collect all parent dirs where cur_inode was once located. We 4123 * now go through all these dirs and check if they are pending for 4124 * deletion and if it's finally possible to perform the rmdir now. 4125 * We also update the inode stats of the parent dirs here. 4126 */ 4127 list_for_each_entry(cur, &check_dirs, list) { 4128 /* 4129 * In case we had refs into dirs that were not processed yet, 4130 * we don't need to do the utime and rmdir logic for these dirs. 4131 * The dir will be processed later. 4132 */ 4133 if (cur->dir > sctx->cur_ino) 4134 continue; 4135 4136 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); 4137 if (ret < 0) 4138 goto out; 4139 4140 if (ret == inode_state_did_create || 4141 ret == inode_state_no_change) { 4142 /* TODO delayed utimes */ 4143 ret = send_utimes(sctx, cur->dir, cur->dir_gen); 4144 if (ret < 0) 4145 goto out; 4146 } else if (ret == inode_state_did_delete && 4147 cur->dir != last_dir_ino_rm) { 4148 ret = can_rmdir(sctx, cur->dir, cur->dir_gen, 4149 sctx->cur_ino); 4150 if (ret < 0) 4151 goto out; 4152 if (ret) { 4153 ret = get_cur_path(sctx, cur->dir, 4154 cur->dir_gen, valid_path); 4155 if (ret < 0) 4156 goto out; 4157 ret = send_rmdir(sctx, valid_path); 4158 if (ret < 0) 4159 goto out; 4160 last_dir_ino_rm = cur->dir; 4161 } 4162 } 4163 } 4164 4165 ret = 0; 4166 4167 out: 4168 __free_recorded_refs(&check_dirs); 4169 free_recorded_refs(sctx); 4170 fs_path_free(valid_path); 4171 return ret; 4172 } 4173 4174 static int record_ref(struct btrfs_root *root, u64 dir, struct fs_path *name, 4175 void *ctx, struct list_head *refs) 4176 { 4177 int ret = 0; 4178 struct send_ctx *sctx = ctx; 4179 struct fs_path *p; 4180 u64 gen; 4181 4182 p = fs_path_alloc(); 4183 if (!p) 4184 return -ENOMEM; 4185 4186 ret = get_inode_info(root, dir, NULL, &gen, NULL, NULL, 4187 NULL, NULL); 4188 if (ret < 0) 4189 goto out; 4190 4191 ret = get_cur_path(sctx, dir, gen, p); 4192 if (ret < 0) 4193 goto out; 4194 ret = fs_path_add_path(p, name); 4195 if (ret < 0) 4196 goto out; 4197 4198 ret = __record_ref(refs, dir, gen, p); 4199 4200 out: 4201 if (ret) 4202 fs_path_free(p); 4203 return ret; 4204 } 4205 4206 static int __record_new_ref(int num, u64 dir, int index, 4207 struct fs_path *name, 4208 void *ctx) 4209 { 4210 struct send_ctx *sctx = ctx; 4211 return record_ref(sctx->send_root, dir, name, ctx, &sctx->new_refs); 4212 } 4213 4214 4215 static int __record_deleted_ref(int num, u64 dir, int index, 4216 struct fs_path *name, 4217 void *ctx) 4218 { 4219 struct send_ctx *sctx = ctx; 4220 return record_ref(sctx->parent_root, dir, name, ctx, 4221 &sctx->deleted_refs); 4222 } 4223 4224 static int record_new_ref(struct send_ctx *sctx) 4225 { 4226 int ret; 4227 4228 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, 4229 sctx->cmp_key, 0, __record_new_ref, sctx); 4230 if (ret < 0) 4231 goto out; 4232 ret = 0; 4233 4234 out: 4235 return ret; 4236 } 4237 4238 static int record_deleted_ref(struct send_ctx *sctx) 4239 { 4240 int ret; 4241 4242 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, 4243 sctx->cmp_key, 0, __record_deleted_ref, sctx); 4244 if (ret < 0) 4245 goto out; 4246 ret = 0; 4247 4248 out: 4249 return ret; 4250 } 4251 4252 struct find_ref_ctx { 4253 u64 dir; 4254 u64 dir_gen; 4255 struct btrfs_root *root; 4256 struct fs_path *name; 4257 int found_idx; 4258 }; 4259 4260 static int __find_iref(int num, u64 dir, int index, 4261 struct fs_path *name, 4262 void *ctx_) 4263 { 4264 struct find_ref_ctx *ctx = ctx_; 4265 u64 dir_gen; 4266 int ret; 4267 4268 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) && 4269 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) { 4270 /* 4271 * To avoid doing extra lookups we'll only do this if everything 4272 * else matches. 4273 */ 4274 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL, 4275 NULL, NULL, NULL); 4276 if (ret) 4277 return ret; 4278 if (dir_gen != ctx->dir_gen) 4279 return 0; 4280 ctx->found_idx = num; 4281 return 1; 4282 } 4283 return 0; 4284 } 4285 4286 static int find_iref(struct btrfs_root *root, 4287 struct btrfs_path *path, 4288 struct btrfs_key *key, 4289 u64 dir, u64 dir_gen, struct fs_path *name) 4290 { 4291 int ret; 4292 struct find_ref_ctx ctx; 4293 4294 ctx.dir = dir; 4295 ctx.name = name; 4296 ctx.dir_gen = dir_gen; 4297 ctx.found_idx = -1; 4298 ctx.root = root; 4299 4300 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx); 4301 if (ret < 0) 4302 return ret; 4303 4304 if (ctx.found_idx == -1) 4305 return -ENOENT; 4306 4307 return ctx.found_idx; 4308 } 4309 4310 static int __record_changed_new_ref(int num, u64 dir, int index, 4311 struct fs_path *name, 4312 void *ctx) 4313 { 4314 u64 dir_gen; 4315 int ret; 4316 struct send_ctx *sctx = ctx; 4317 4318 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL, 4319 NULL, NULL, NULL); 4320 if (ret) 4321 return ret; 4322 4323 ret = find_iref(sctx->parent_root, sctx->right_path, 4324 sctx->cmp_key, dir, dir_gen, name); 4325 if (ret == -ENOENT) 4326 ret = __record_new_ref(num, dir, index, name, sctx); 4327 else if (ret > 0) 4328 ret = 0; 4329 4330 return ret; 4331 } 4332 4333 static int __record_changed_deleted_ref(int num, u64 dir, int index, 4334 struct fs_path *name, 4335 void *ctx) 4336 { 4337 u64 dir_gen; 4338 int ret; 4339 struct send_ctx *sctx = ctx; 4340 4341 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL, 4342 NULL, NULL, NULL); 4343 if (ret) 4344 return ret; 4345 4346 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key, 4347 dir, dir_gen, name); 4348 if (ret == -ENOENT) 4349 ret = __record_deleted_ref(num, dir, index, name, sctx); 4350 else if (ret > 0) 4351 ret = 0; 4352 4353 return ret; 4354 } 4355 4356 static int record_changed_ref(struct send_ctx *sctx) 4357 { 4358 int ret = 0; 4359 4360 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, 4361 sctx->cmp_key, 0, __record_changed_new_ref, sctx); 4362 if (ret < 0) 4363 goto out; 4364 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, 4365 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx); 4366 if (ret < 0) 4367 goto out; 4368 ret = 0; 4369 4370 out: 4371 return ret; 4372 } 4373 4374 /* 4375 * Record and process all refs at once. Needed when an inode changes the 4376 * generation number, which means that it was deleted and recreated. 4377 */ 4378 static int process_all_refs(struct send_ctx *sctx, 4379 enum btrfs_compare_tree_result cmd) 4380 { 4381 int ret; 4382 struct btrfs_root *root; 4383 struct btrfs_path *path; 4384 struct btrfs_key key; 4385 struct btrfs_key found_key; 4386 struct extent_buffer *eb; 4387 int slot; 4388 iterate_inode_ref_t cb; 4389 int pending_move = 0; 4390 4391 path = alloc_path_for_send(); 4392 if (!path) 4393 return -ENOMEM; 4394 4395 if (cmd == BTRFS_COMPARE_TREE_NEW) { 4396 root = sctx->send_root; 4397 cb = __record_new_ref; 4398 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) { 4399 root = sctx->parent_root; 4400 cb = __record_deleted_ref; 4401 } else { 4402 btrfs_err(sctx->send_root->fs_info, 4403 "Wrong command %d in process_all_refs", cmd); 4404 ret = -EINVAL; 4405 goto out; 4406 } 4407 4408 key.objectid = sctx->cmp_key->objectid; 4409 key.type = BTRFS_INODE_REF_KEY; 4410 key.offset = 0; 4411 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4412 if (ret < 0) 4413 goto out; 4414 4415 while (1) { 4416 eb = path->nodes[0]; 4417 slot = path->slots[0]; 4418 if (slot >= btrfs_header_nritems(eb)) { 4419 ret = btrfs_next_leaf(root, path); 4420 if (ret < 0) 4421 goto out; 4422 else if (ret > 0) 4423 break; 4424 continue; 4425 } 4426 4427 btrfs_item_key_to_cpu(eb, &found_key, slot); 4428 4429 if (found_key.objectid != key.objectid || 4430 (found_key.type != BTRFS_INODE_REF_KEY && 4431 found_key.type != BTRFS_INODE_EXTREF_KEY)) 4432 break; 4433 4434 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); 4435 if (ret < 0) 4436 goto out; 4437 4438 path->slots[0]++; 4439 } 4440 btrfs_release_path(path); 4441 4442 /* 4443 * We don't actually care about pending_move as we are simply 4444 * re-creating this inode and will be rename'ing it into place once we 4445 * rename the parent directory. 4446 */ 4447 ret = process_recorded_refs(sctx, &pending_move); 4448 out: 4449 btrfs_free_path(path); 4450 return ret; 4451 } 4452 4453 static int send_set_xattr(struct send_ctx *sctx, 4454 struct fs_path *path, 4455 const char *name, int name_len, 4456 const char *data, int data_len) 4457 { 4458 int ret = 0; 4459 4460 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); 4461 if (ret < 0) 4462 goto out; 4463 4464 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 4465 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); 4466 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len); 4467 4468 ret = send_cmd(sctx); 4469 4470 tlv_put_failure: 4471 out: 4472 return ret; 4473 } 4474 4475 static int send_remove_xattr(struct send_ctx *sctx, 4476 struct fs_path *path, 4477 const char *name, int name_len) 4478 { 4479 int ret = 0; 4480 4481 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR); 4482 if (ret < 0) 4483 goto out; 4484 4485 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 4486 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); 4487 4488 ret = send_cmd(sctx); 4489 4490 tlv_put_failure: 4491 out: 4492 return ret; 4493 } 4494 4495 static int __process_new_xattr(int num, struct btrfs_key *di_key, 4496 const char *name, int name_len, 4497 const char *data, int data_len, 4498 u8 type, void *ctx) 4499 { 4500 int ret; 4501 struct send_ctx *sctx = ctx; 4502 struct fs_path *p; 4503 struct posix_acl_xattr_header dummy_acl; 4504 4505 p = fs_path_alloc(); 4506 if (!p) 4507 return -ENOMEM; 4508 4509 /* 4510 * This hack is needed because empty acls are stored as zero byte 4511 * data in xattrs. Problem with that is, that receiving these zero byte 4512 * acls will fail later. To fix this, we send a dummy acl list that 4513 * only contains the version number and no entries. 4514 */ 4515 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || 4516 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) { 4517 if (data_len == 0) { 4518 dummy_acl.a_version = 4519 cpu_to_le32(POSIX_ACL_XATTR_VERSION); 4520 data = (char *)&dummy_acl; 4521 data_len = sizeof(dummy_acl); 4522 } 4523 } 4524 4525 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4526 if (ret < 0) 4527 goto out; 4528 4529 ret = send_set_xattr(sctx, p, name, name_len, data, data_len); 4530 4531 out: 4532 fs_path_free(p); 4533 return ret; 4534 } 4535 4536 static int __process_deleted_xattr(int num, struct btrfs_key *di_key, 4537 const char *name, int name_len, 4538 const char *data, int data_len, 4539 u8 type, void *ctx) 4540 { 4541 int ret; 4542 struct send_ctx *sctx = ctx; 4543 struct fs_path *p; 4544 4545 p = fs_path_alloc(); 4546 if (!p) 4547 return -ENOMEM; 4548 4549 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4550 if (ret < 0) 4551 goto out; 4552 4553 ret = send_remove_xattr(sctx, p, name, name_len); 4554 4555 out: 4556 fs_path_free(p); 4557 return ret; 4558 } 4559 4560 static int process_new_xattr(struct send_ctx *sctx) 4561 { 4562 int ret = 0; 4563 4564 ret = iterate_dir_item(sctx->send_root, sctx->left_path, 4565 __process_new_xattr, sctx); 4566 4567 return ret; 4568 } 4569 4570 static int process_deleted_xattr(struct send_ctx *sctx) 4571 { 4572 return iterate_dir_item(sctx->parent_root, sctx->right_path, 4573 __process_deleted_xattr, sctx); 4574 } 4575 4576 struct find_xattr_ctx { 4577 const char *name; 4578 int name_len; 4579 int found_idx; 4580 char *found_data; 4581 int found_data_len; 4582 }; 4583 4584 static int __find_xattr(int num, struct btrfs_key *di_key, 4585 const char *name, int name_len, 4586 const char *data, int data_len, 4587 u8 type, void *vctx) 4588 { 4589 struct find_xattr_ctx *ctx = vctx; 4590 4591 if (name_len == ctx->name_len && 4592 strncmp(name, ctx->name, name_len) == 0) { 4593 ctx->found_idx = num; 4594 ctx->found_data_len = data_len; 4595 ctx->found_data = kmemdup(data, data_len, GFP_KERNEL); 4596 if (!ctx->found_data) 4597 return -ENOMEM; 4598 return 1; 4599 } 4600 return 0; 4601 } 4602 4603 static int find_xattr(struct btrfs_root *root, 4604 struct btrfs_path *path, 4605 struct btrfs_key *key, 4606 const char *name, int name_len, 4607 char **data, int *data_len) 4608 { 4609 int ret; 4610 struct find_xattr_ctx ctx; 4611 4612 ctx.name = name; 4613 ctx.name_len = name_len; 4614 ctx.found_idx = -1; 4615 ctx.found_data = NULL; 4616 ctx.found_data_len = 0; 4617 4618 ret = iterate_dir_item(root, path, __find_xattr, &ctx); 4619 if (ret < 0) 4620 return ret; 4621 4622 if (ctx.found_idx == -1) 4623 return -ENOENT; 4624 if (data) { 4625 *data = ctx.found_data; 4626 *data_len = ctx.found_data_len; 4627 } else { 4628 kfree(ctx.found_data); 4629 } 4630 return ctx.found_idx; 4631 } 4632 4633 4634 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key, 4635 const char *name, int name_len, 4636 const char *data, int data_len, 4637 u8 type, void *ctx) 4638 { 4639 int ret; 4640 struct send_ctx *sctx = ctx; 4641 char *found_data = NULL; 4642 int found_data_len = 0; 4643 4644 ret = find_xattr(sctx->parent_root, sctx->right_path, 4645 sctx->cmp_key, name, name_len, &found_data, 4646 &found_data_len); 4647 if (ret == -ENOENT) { 4648 ret = __process_new_xattr(num, di_key, name, name_len, data, 4649 data_len, type, ctx); 4650 } else if (ret >= 0) { 4651 if (data_len != found_data_len || 4652 memcmp(data, found_data, data_len)) { 4653 ret = __process_new_xattr(num, di_key, name, name_len, 4654 data, data_len, type, ctx); 4655 } else { 4656 ret = 0; 4657 } 4658 } 4659 4660 kfree(found_data); 4661 return ret; 4662 } 4663 4664 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key, 4665 const char *name, int name_len, 4666 const char *data, int data_len, 4667 u8 type, void *ctx) 4668 { 4669 int ret; 4670 struct send_ctx *sctx = ctx; 4671 4672 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key, 4673 name, name_len, NULL, NULL); 4674 if (ret == -ENOENT) 4675 ret = __process_deleted_xattr(num, di_key, name, name_len, data, 4676 data_len, type, ctx); 4677 else if (ret >= 0) 4678 ret = 0; 4679 4680 return ret; 4681 } 4682 4683 static int process_changed_xattr(struct send_ctx *sctx) 4684 { 4685 int ret = 0; 4686 4687 ret = iterate_dir_item(sctx->send_root, sctx->left_path, 4688 __process_changed_new_xattr, sctx); 4689 if (ret < 0) 4690 goto out; 4691 ret = iterate_dir_item(sctx->parent_root, sctx->right_path, 4692 __process_changed_deleted_xattr, sctx); 4693 4694 out: 4695 return ret; 4696 } 4697 4698 static int process_all_new_xattrs(struct send_ctx *sctx) 4699 { 4700 int ret; 4701 struct btrfs_root *root; 4702 struct btrfs_path *path; 4703 struct btrfs_key key; 4704 struct btrfs_key found_key; 4705 struct extent_buffer *eb; 4706 int slot; 4707 4708 path = alloc_path_for_send(); 4709 if (!path) 4710 return -ENOMEM; 4711 4712 root = sctx->send_root; 4713 4714 key.objectid = sctx->cmp_key->objectid; 4715 key.type = BTRFS_XATTR_ITEM_KEY; 4716 key.offset = 0; 4717 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4718 if (ret < 0) 4719 goto out; 4720 4721 while (1) { 4722 eb = path->nodes[0]; 4723 slot = path->slots[0]; 4724 if (slot >= btrfs_header_nritems(eb)) { 4725 ret = btrfs_next_leaf(root, path); 4726 if (ret < 0) { 4727 goto out; 4728 } else if (ret > 0) { 4729 ret = 0; 4730 break; 4731 } 4732 continue; 4733 } 4734 4735 btrfs_item_key_to_cpu(eb, &found_key, slot); 4736 if (found_key.objectid != key.objectid || 4737 found_key.type != key.type) { 4738 ret = 0; 4739 goto out; 4740 } 4741 4742 ret = iterate_dir_item(root, path, __process_new_xattr, sctx); 4743 if (ret < 0) 4744 goto out; 4745 4746 path->slots[0]++; 4747 } 4748 4749 out: 4750 btrfs_free_path(path); 4751 return ret; 4752 } 4753 4754 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) 4755 { 4756 struct btrfs_root *root = sctx->send_root; 4757 struct btrfs_fs_info *fs_info = root->fs_info; 4758 struct inode *inode; 4759 struct page *page; 4760 char *addr; 4761 struct btrfs_key key; 4762 pgoff_t index = offset >> PAGE_SHIFT; 4763 pgoff_t last_index; 4764 unsigned pg_offset = offset & ~PAGE_MASK; 4765 ssize_t ret = 0; 4766 4767 key.objectid = sctx->cur_ino; 4768 key.type = BTRFS_INODE_ITEM_KEY; 4769 key.offset = 0; 4770 4771 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 4772 if (IS_ERR(inode)) 4773 return PTR_ERR(inode); 4774 4775 if (offset + len > i_size_read(inode)) { 4776 if (offset > i_size_read(inode)) 4777 len = 0; 4778 else 4779 len = offset - i_size_read(inode); 4780 } 4781 if (len == 0) 4782 goto out; 4783 4784 last_index = (offset + len - 1) >> PAGE_SHIFT; 4785 4786 /* initial readahead */ 4787 memset(&sctx->ra, 0, sizeof(struct file_ra_state)); 4788 file_ra_state_init(&sctx->ra, inode->i_mapping); 4789 4790 while (index <= last_index) { 4791 unsigned cur_len = min_t(unsigned, len, 4792 PAGE_SIZE - pg_offset); 4793 4794 page = find_lock_page(inode->i_mapping, index); 4795 if (!page) { 4796 page_cache_sync_readahead(inode->i_mapping, &sctx->ra, 4797 NULL, index, last_index + 1 - index); 4798 4799 page = find_or_create_page(inode->i_mapping, index, 4800 GFP_KERNEL); 4801 if (!page) { 4802 ret = -ENOMEM; 4803 break; 4804 } 4805 } 4806 4807 if (PageReadahead(page)) { 4808 page_cache_async_readahead(inode->i_mapping, &sctx->ra, 4809 NULL, page, index, last_index + 1 - index); 4810 } 4811 4812 if (!PageUptodate(page)) { 4813 btrfs_readpage(NULL, page); 4814 lock_page(page); 4815 if (!PageUptodate(page)) { 4816 unlock_page(page); 4817 put_page(page); 4818 ret = -EIO; 4819 break; 4820 } 4821 } 4822 4823 addr = kmap(page); 4824 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len); 4825 kunmap(page); 4826 unlock_page(page); 4827 put_page(page); 4828 index++; 4829 pg_offset = 0; 4830 len -= cur_len; 4831 ret += cur_len; 4832 } 4833 out: 4834 iput(inode); 4835 return ret; 4836 } 4837 4838 /* 4839 * Read some bytes from the current inode/file and send a write command to 4840 * user space. 4841 */ 4842 static int send_write(struct send_ctx *sctx, u64 offset, u32 len) 4843 { 4844 struct btrfs_fs_info *fs_info = sctx->send_root->fs_info; 4845 int ret = 0; 4846 struct fs_path *p; 4847 ssize_t num_read = 0; 4848 4849 p = fs_path_alloc(); 4850 if (!p) 4851 return -ENOMEM; 4852 4853 btrfs_debug(fs_info, "send_write offset=%llu, len=%d", offset, len); 4854 4855 num_read = fill_read_buf(sctx, offset, len); 4856 if (num_read <= 0) { 4857 if (num_read < 0) 4858 ret = num_read; 4859 goto out; 4860 } 4861 4862 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 4863 if (ret < 0) 4864 goto out; 4865 4866 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4867 if (ret < 0) 4868 goto out; 4869 4870 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4871 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4872 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read); 4873 4874 ret = send_cmd(sctx); 4875 4876 tlv_put_failure: 4877 out: 4878 fs_path_free(p); 4879 if (ret < 0) 4880 return ret; 4881 return num_read; 4882 } 4883 4884 /* 4885 * Send a clone command to user space. 4886 */ 4887 static int send_clone(struct send_ctx *sctx, 4888 u64 offset, u32 len, 4889 struct clone_root *clone_root) 4890 { 4891 int ret = 0; 4892 struct fs_path *p; 4893 u64 gen; 4894 4895 btrfs_debug(sctx->send_root->fs_info, 4896 "send_clone offset=%llu, len=%d, clone_root=%llu, clone_inode=%llu, clone_offset=%llu", 4897 offset, len, clone_root->root->objectid, clone_root->ino, 4898 clone_root->offset); 4899 4900 p = fs_path_alloc(); 4901 if (!p) 4902 return -ENOMEM; 4903 4904 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE); 4905 if (ret < 0) 4906 goto out; 4907 4908 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4909 if (ret < 0) 4910 goto out; 4911 4912 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4913 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); 4914 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4915 4916 if (clone_root->root == sctx->send_root) { 4917 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL, 4918 &gen, NULL, NULL, NULL, NULL); 4919 if (ret < 0) 4920 goto out; 4921 ret = get_cur_path(sctx, clone_root->ino, gen, p); 4922 } else { 4923 ret = get_inode_path(clone_root->root, clone_root->ino, p); 4924 } 4925 if (ret < 0) 4926 goto out; 4927 4928 /* 4929 * If the parent we're using has a received_uuid set then use that as 4930 * our clone source as that is what we will look for when doing a 4931 * receive. 4932 * 4933 * This covers the case that we create a snapshot off of a received 4934 * subvolume and then use that as the parent and try to receive on a 4935 * different host. 4936 */ 4937 if (!btrfs_is_empty_uuid(clone_root->root->root_item.received_uuid)) 4938 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 4939 clone_root->root->root_item.received_uuid); 4940 else 4941 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 4942 clone_root->root->root_item.uuid); 4943 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, 4944 le64_to_cpu(clone_root->root->root_item.ctransid)); 4945 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); 4946 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, 4947 clone_root->offset); 4948 4949 ret = send_cmd(sctx); 4950 4951 tlv_put_failure: 4952 out: 4953 fs_path_free(p); 4954 return ret; 4955 } 4956 4957 /* 4958 * Send an update extent command to user space. 4959 */ 4960 static int send_update_extent(struct send_ctx *sctx, 4961 u64 offset, u32 len) 4962 { 4963 int ret = 0; 4964 struct fs_path *p; 4965 4966 p = fs_path_alloc(); 4967 if (!p) 4968 return -ENOMEM; 4969 4970 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT); 4971 if (ret < 0) 4972 goto out; 4973 4974 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4975 if (ret < 0) 4976 goto out; 4977 4978 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4979 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4980 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); 4981 4982 ret = send_cmd(sctx); 4983 4984 tlv_put_failure: 4985 out: 4986 fs_path_free(p); 4987 return ret; 4988 } 4989 4990 static int send_hole(struct send_ctx *sctx, u64 end) 4991 { 4992 struct fs_path *p = NULL; 4993 u64 offset = sctx->cur_inode_last_extent; 4994 u64 len; 4995 int ret = 0; 4996 4997 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) 4998 return send_update_extent(sctx, offset, end - offset); 4999 5000 p = fs_path_alloc(); 5001 if (!p) 5002 return -ENOMEM; 5003 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 5004 if (ret < 0) 5005 goto tlv_put_failure; 5006 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE); 5007 while (offset < end) { 5008 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE); 5009 5010 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 5011 if (ret < 0) 5012 break; 5013 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 5014 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 5015 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len); 5016 ret = send_cmd(sctx); 5017 if (ret < 0) 5018 break; 5019 offset += len; 5020 } 5021 sctx->cur_inode_next_write_offset = offset; 5022 tlv_put_failure: 5023 fs_path_free(p); 5024 return ret; 5025 } 5026 5027 static int send_extent_data(struct send_ctx *sctx, 5028 const u64 offset, 5029 const u64 len) 5030 { 5031 u64 sent = 0; 5032 5033 if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) 5034 return send_update_extent(sctx, offset, len); 5035 5036 while (sent < len) { 5037 u64 size = len - sent; 5038 int ret; 5039 5040 if (size > BTRFS_SEND_READ_SIZE) 5041 size = BTRFS_SEND_READ_SIZE; 5042 ret = send_write(sctx, offset + sent, size); 5043 if (ret < 0) 5044 return ret; 5045 if (!ret) 5046 break; 5047 sent += ret; 5048 } 5049 return 0; 5050 } 5051 5052 static int clone_range(struct send_ctx *sctx, 5053 struct clone_root *clone_root, 5054 const u64 disk_byte, 5055 u64 data_offset, 5056 u64 offset, 5057 u64 len) 5058 { 5059 struct btrfs_path *path; 5060 struct btrfs_key key; 5061 int ret; 5062 5063 /* 5064 * Prevent cloning from a zero offset with a length matching the sector 5065 * size because in some scenarios this will make the receiver fail. 5066 * 5067 * For example, if in the source filesystem the extent at offset 0 5068 * has a length of sectorsize and it was written using direct IO, then 5069 * it can never be an inline extent (even if compression is enabled). 5070 * Then this extent can be cloned in the original filesystem to a non 5071 * zero file offset, but it may not be possible to clone in the 5072 * destination filesystem because it can be inlined due to compression 5073 * on the destination filesystem (as the receiver's write operations are 5074 * always done using buffered IO). The same happens when the original 5075 * filesystem does not have compression enabled but the destination 5076 * filesystem has. 5077 */ 5078 if (clone_root->offset == 0 && 5079 len == sctx->send_root->fs_info->sectorsize) 5080 return send_extent_data(sctx, offset, len); 5081 5082 path = alloc_path_for_send(); 5083 if (!path) 5084 return -ENOMEM; 5085 5086 /* 5087 * We can't send a clone operation for the entire range if we find 5088 * extent items in the respective range in the source file that 5089 * refer to different extents or if we find holes. 5090 * So check for that and do a mix of clone and regular write/copy 5091 * operations if needed. 5092 * 5093 * Example: 5094 * 5095 * mkfs.btrfs -f /dev/sda 5096 * mount /dev/sda /mnt 5097 * xfs_io -f -c "pwrite -S 0xaa 0K 100K" /mnt/foo 5098 * cp --reflink=always /mnt/foo /mnt/bar 5099 * xfs_io -c "pwrite -S 0xbb 50K 50K" /mnt/foo 5100 * btrfs subvolume snapshot -r /mnt /mnt/snap 5101 * 5102 * If when we send the snapshot and we are processing file bar (which 5103 * has a higher inode number than foo) we blindly send a clone operation 5104 * for the [0, 100K[ range from foo to bar, the receiver ends up getting 5105 * a file bar that matches the content of file foo - iow, doesn't match 5106 * the content from bar in the original filesystem. 5107 */ 5108 key.objectid = clone_root->ino; 5109 key.type = BTRFS_EXTENT_DATA_KEY; 5110 key.offset = clone_root->offset; 5111 ret = btrfs_search_slot(NULL, clone_root->root, &key, path, 0, 0); 5112 if (ret < 0) 5113 goto out; 5114 if (ret > 0 && path->slots[0] > 0) { 5115 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0] - 1); 5116 if (key.objectid == clone_root->ino && 5117 key.type == BTRFS_EXTENT_DATA_KEY) 5118 path->slots[0]--; 5119 } 5120 5121 while (true) { 5122 struct extent_buffer *leaf = path->nodes[0]; 5123 int slot = path->slots[0]; 5124 struct btrfs_file_extent_item *ei; 5125 u8 type; 5126 u64 ext_len; 5127 u64 clone_len; 5128 5129 if (slot >= btrfs_header_nritems(leaf)) { 5130 ret = btrfs_next_leaf(clone_root->root, path); 5131 if (ret < 0) 5132 goto out; 5133 else if (ret > 0) 5134 break; 5135 continue; 5136 } 5137 5138 btrfs_item_key_to_cpu(leaf, &key, slot); 5139 5140 /* 5141 * We might have an implicit trailing hole (NO_HOLES feature 5142 * enabled). We deal with it after leaving this loop. 5143 */ 5144 if (key.objectid != clone_root->ino || 5145 key.type != BTRFS_EXTENT_DATA_KEY) 5146 break; 5147 5148 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 5149 type = btrfs_file_extent_type(leaf, ei); 5150 if (type == BTRFS_FILE_EXTENT_INLINE) { 5151 ext_len = btrfs_file_extent_inline_len(leaf, slot, ei); 5152 ext_len = PAGE_ALIGN(ext_len); 5153 } else { 5154 ext_len = btrfs_file_extent_num_bytes(leaf, ei); 5155 } 5156 5157 if (key.offset + ext_len <= clone_root->offset) 5158 goto next; 5159 5160 if (key.offset > clone_root->offset) { 5161 /* Implicit hole, NO_HOLES feature enabled. */ 5162 u64 hole_len = key.offset - clone_root->offset; 5163 5164 if (hole_len > len) 5165 hole_len = len; 5166 ret = send_extent_data(sctx, offset, hole_len); 5167 if (ret < 0) 5168 goto out; 5169 5170 len -= hole_len; 5171 if (len == 0) 5172 break; 5173 offset += hole_len; 5174 clone_root->offset += hole_len; 5175 data_offset += hole_len; 5176 } 5177 5178 if (key.offset >= clone_root->offset + len) 5179 break; 5180 5181 clone_len = min_t(u64, ext_len, len); 5182 5183 if (btrfs_file_extent_disk_bytenr(leaf, ei) == disk_byte && 5184 btrfs_file_extent_offset(leaf, ei) == data_offset) 5185 ret = send_clone(sctx, offset, clone_len, clone_root); 5186 else 5187 ret = send_extent_data(sctx, offset, clone_len); 5188 5189 if (ret < 0) 5190 goto out; 5191 5192 len -= clone_len; 5193 if (len == 0) 5194 break; 5195 offset += clone_len; 5196 clone_root->offset += clone_len; 5197 data_offset += clone_len; 5198 next: 5199 path->slots[0]++; 5200 } 5201 5202 if (len > 0) 5203 ret = send_extent_data(sctx, offset, len); 5204 else 5205 ret = 0; 5206 out: 5207 btrfs_free_path(path); 5208 return ret; 5209 } 5210 5211 static int send_write_or_clone(struct send_ctx *sctx, 5212 struct btrfs_path *path, 5213 struct btrfs_key *key, 5214 struct clone_root *clone_root) 5215 { 5216 int ret = 0; 5217 struct btrfs_file_extent_item *ei; 5218 u64 offset = key->offset; 5219 u64 len; 5220 u8 type; 5221 u64 bs = sctx->send_root->fs_info->sb->s_blocksize; 5222 5223 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 5224 struct btrfs_file_extent_item); 5225 type = btrfs_file_extent_type(path->nodes[0], ei); 5226 if (type == BTRFS_FILE_EXTENT_INLINE) { 5227 len = btrfs_file_extent_inline_len(path->nodes[0], 5228 path->slots[0], ei); 5229 /* 5230 * it is possible the inline item won't cover the whole page, 5231 * but there may be items after this page. Make 5232 * sure to send the whole thing 5233 */ 5234 len = PAGE_ALIGN(len); 5235 } else { 5236 len = btrfs_file_extent_num_bytes(path->nodes[0], ei); 5237 } 5238 5239 if (offset + len > sctx->cur_inode_size) 5240 len = sctx->cur_inode_size - offset; 5241 if (len == 0) { 5242 ret = 0; 5243 goto out; 5244 } 5245 5246 if (clone_root && IS_ALIGNED(offset + len, bs)) { 5247 u64 disk_byte; 5248 u64 data_offset; 5249 5250 disk_byte = btrfs_file_extent_disk_bytenr(path->nodes[0], ei); 5251 data_offset = btrfs_file_extent_offset(path->nodes[0], ei); 5252 ret = clone_range(sctx, clone_root, disk_byte, data_offset, 5253 offset, len); 5254 } else { 5255 ret = send_extent_data(sctx, offset, len); 5256 } 5257 sctx->cur_inode_next_write_offset = offset + len; 5258 out: 5259 return ret; 5260 } 5261 5262 static int is_extent_unchanged(struct send_ctx *sctx, 5263 struct btrfs_path *left_path, 5264 struct btrfs_key *ekey) 5265 { 5266 int ret = 0; 5267 struct btrfs_key key; 5268 struct btrfs_path *path = NULL; 5269 struct extent_buffer *eb; 5270 int slot; 5271 struct btrfs_key found_key; 5272 struct btrfs_file_extent_item *ei; 5273 u64 left_disknr; 5274 u64 right_disknr; 5275 u64 left_offset; 5276 u64 right_offset; 5277 u64 left_offset_fixed; 5278 u64 left_len; 5279 u64 right_len; 5280 u64 left_gen; 5281 u64 right_gen; 5282 u8 left_type; 5283 u8 right_type; 5284 5285 path = alloc_path_for_send(); 5286 if (!path) 5287 return -ENOMEM; 5288 5289 eb = left_path->nodes[0]; 5290 slot = left_path->slots[0]; 5291 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 5292 left_type = btrfs_file_extent_type(eb, ei); 5293 5294 if (left_type != BTRFS_FILE_EXTENT_REG) { 5295 ret = 0; 5296 goto out; 5297 } 5298 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 5299 left_len = btrfs_file_extent_num_bytes(eb, ei); 5300 left_offset = btrfs_file_extent_offset(eb, ei); 5301 left_gen = btrfs_file_extent_generation(eb, ei); 5302 5303 /* 5304 * Following comments will refer to these graphics. L is the left 5305 * extents which we are checking at the moment. 1-8 are the right 5306 * extents that we iterate. 5307 * 5308 * |-----L-----| 5309 * |-1-|-2a-|-3-|-4-|-5-|-6-| 5310 * 5311 * |-----L-----| 5312 * |--1--|-2b-|...(same as above) 5313 * 5314 * Alternative situation. Happens on files where extents got split. 5315 * |-----L-----| 5316 * |-----------7-----------|-6-| 5317 * 5318 * Alternative situation. Happens on files which got larger. 5319 * |-----L-----| 5320 * |-8-| 5321 * Nothing follows after 8. 5322 */ 5323 5324 key.objectid = ekey->objectid; 5325 key.type = BTRFS_EXTENT_DATA_KEY; 5326 key.offset = ekey->offset; 5327 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0); 5328 if (ret < 0) 5329 goto out; 5330 if (ret) { 5331 ret = 0; 5332 goto out; 5333 } 5334 5335 /* 5336 * Handle special case where the right side has no extents at all. 5337 */ 5338 eb = path->nodes[0]; 5339 slot = path->slots[0]; 5340 btrfs_item_key_to_cpu(eb, &found_key, slot); 5341 if (found_key.objectid != key.objectid || 5342 found_key.type != key.type) { 5343 /* If we're a hole then just pretend nothing changed */ 5344 ret = (left_disknr) ? 0 : 1; 5345 goto out; 5346 } 5347 5348 /* 5349 * We're now on 2a, 2b or 7. 5350 */ 5351 key = found_key; 5352 while (key.offset < ekey->offset + left_len) { 5353 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 5354 right_type = btrfs_file_extent_type(eb, ei); 5355 if (right_type != BTRFS_FILE_EXTENT_REG && 5356 right_type != BTRFS_FILE_EXTENT_INLINE) { 5357 ret = 0; 5358 goto out; 5359 } 5360 5361 if (right_type == BTRFS_FILE_EXTENT_INLINE) { 5362 right_len = btrfs_file_extent_inline_len(eb, slot, ei); 5363 right_len = PAGE_ALIGN(right_len); 5364 } else { 5365 right_len = btrfs_file_extent_num_bytes(eb, ei); 5366 } 5367 5368 /* 5369 * Are we at extent 8? If yes, we know the extent is changed. 5370 * This may only happen on the first iteration. 5371 */ 5372 if (found_key.offset + right_len <= ekey->offset) { 5373 /* If we're a hole just pretend nothing changed */ 5374 ret = (left_disknr) ? 0 : 1; 5375 goto out; 5376 } 5377 5378 /* 5379 * We just wanted to see if when we have an inline extent, what 5380 * follows it is a regular extent (wanted to check the above 5381 * condition for inline extents too). This should normally not 5382 * happen but it's possible for example when we have an inline 5383 * compressed extent representing data with a size matching 5384 * the page size (currently the same as sector size). 5385 */ 5386 if (right_type == BTRFS_FILE_EXTENT_INLINE) { 5387 ret = 0; 5388 goto out; 5389 } 5390 5391 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 5392 right_offset = btrfs_file_extent_offset(eb, ei); 5393 right_gen = btrfs_file_extent_generation(eb, ei); 5394 5395 left_offset_fixed = left_offset; 5396 if (key.offset < ekey->offset) { 5397 /* Fix the right offset for 2a and 7. */ 5398 right_offset += ekey->offset - key.offset; 5399 } else { 5400 /* Fix the left offset for all behind 2a and 2b */ 5401 left_offset_fixed += key.offset - ekey->offset; 5402 } 5403 5404 /* 5405 * Check if we have the same extent. 5406 */ 5407 if (left_disknr != right_disknr || 5408 left_offset_fixed != right_offset || 5409 left_gen != right_gen) { 5410 ret = 0; 5411 goto out; 5412 } 5413 5414 /* 5415 * Go to the next extent. 5416 */ 5417 ret = btrfs_next_item(sctx->parent_root, path); 5418 if (ret < 0) 5419 goto out; 5420 if (!ret) { 5421 eb = path->nodes[0]; 5422 slot = path->slots[0]; 5423 btrfs_item_key_to_cpu(eb, &found_key, slot); 5424 } 5425 if (ret || found_key.objectid != key.objectid || 5426 found_key.type != key.type) { 5427 key.offset += right_len; 5428 break; 5429 } 5430 if (found_key.offset != key.offset + right_len) { 5431 ret = 0; 5432 goto out; 5433 } 5434 key = found_key; 5435 } 5436 5437 /* 5438 * We're now behind the left extent (treat as unchanged) or at the end 5439 * of the right side (treat as changed). 5440 */ 5441 if (key.offset >= ekey->offset + left_len) 5442 ret = 1; 5443 else 5444 ret = 0; 5445 5446 5447 out: 5448 btrfs_free_path(path); 5449 return ret; 5450 } 5451 5452 static int get_last_extent(struct send_ctx *sctx, u64 offset) 5453 { 5454 struct btrfs_path *path; 5455 struct btrfs_root *root = sctx->send_root; 5456 struct btrfs_file_extent_item *fi; 5457 struct btrfs_key key; 5458 u64 extent_end; 5459 u8 type; 5460 int ret; 5461 5462 path = alloc_path_for_send(); 5463 if (!path) 5464 return -ENOMEM; 5465 5466 sctx->cur_inode_last_extent = 0; 5467 5468 key.objectid = sctx->cur_ino; 5469 key.type = BTRFS_EXTENT_DATA_KEY; 5470 key.offset = offset; 5471 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1); 5472 if (ret < 0) 5473 goto out; 5474 ret = 0; 5475 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 5476 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY) 5477 goto out; 5478 5479 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 5480 struct btrfs_file_extent_item); 5481 type = btrfs_file_extent_type(path->nodes[0], fi); 5482 if (type == BTRFS_FILE_EXTENT_INLINE) { 5483 u64 size = btrfs_file_extent_inline_len(path->nodes[0], 5484 path->slots[0], fi); 5485 extent_end = ALIGN(key.offset + size, 5486 sctx->send_root->fs_info->sectorsize); 5487 } else { 5488 extent_end = key.offset + 5489 btrfs_file_extent_num_bytes(path->nodes[0], fi); 5490 } 5491 sctx->cur_inode_last_extent = extent_end; 5492 out: 5493 btrfs_free_path(path); 5494 return ret; 5495 } 5496 5497 static int range_is_hole_in_parent(struct send_ctx *sctx, 5498 const u64 start, 5499 const u64 end) 5500 { 5501 struct btrfs_path *path; 5502 struct btrfs_key key; 5503 struct btrfs_root *root = sctx->parent_root; 5504 u64 search_start = start; 5505 int ret; 5506 5507 path = alloc_path_for_send(); 5508 if (!path) 5509 return -ENOMEM; 5510 5511 key.objectid = sctx->cur_ino; 5512 key.type = BTRFS_EXTENT_DATA_KEY; 5513 key.offset = search_start; 5514 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5515 if (ret < 0) 5516 goto out; 5517 if (ret > 0 && path->slots[0] > 0) 5518 path->slots[0]--; 5519 5520 while (search_start < end) { 5521 struct extent_buffer *leaf = path->nodes[0]; 5522 int slot = path->slots[0]; 5523 struct btrfs_file_extent_item *fi; 5524 u64 extent_end; 5525 5526 if (slot >= btrfs_header_nritems(leaf)) { 5527 ret = btrfs_next_leaf(root, path); 5528 if (ret < 0) 5529 goto out; 5530 else if (ret > 0) 5531 break; 5532 continue; 5533 } 5534 5535 btrfs_item_key_to_cpu(leaf, &key, slot); 5536 if (key.objectid < sctx->cur_ino || 5537 key.type < BTRFS_EXTENT_DATA_KEY) 5538 goto next; 5539 if (key.objectid > sctx->cur_ino || 5540 key.type > BTRFS_EXTENT_DATA_KEY || 5541 key.offset >= end) 5542 break; 5543 5544 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 5545 if (btrfs_file_extent_type(leaf, fi) == 5546 BTRFS_FILE_EXTENT_INLINE) { 5547 u64 size = btrfs_file_extent_inline_len(leaf, slot, fi); 5548 5549 extent_end = ALIGN(key.offset + size, 5550 root->fs_info->sectorsize); 5551 } else { 5552 extent_end = key.offset + 5553 btrfs_file_extent_num_bytes(leaf, fi); 5554 } 5555 if (extent_end <= start) 5556 goto next; 5557 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) { 5558 search_start = extent_end; 5559 goto next; 5560 } 5561 ret = 0; 5562 goto out; 5563 next: 5564 path->slots[0]++; 5565 } 5566 ret = 1; 5567 out: 5568 btrfs_free_path(path); 5569 return ret; 5570 } 5571 5572 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, 5573 struct btrfs_key *key) 5574 { 5575 struct btrfs_file_extent_item *fi; 5576 u64 extent_end; 5577 u8 type; 5578 int ret = 0; 5579 5580 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx)) 5581 return 0; 5582 5583 if (sctx->cur_inode_last_extent == (u64)-1) { 5584 ret = get_last_extent(sctx, key->offset - 1); 5585 if (ret) 5586 return ret; 5587 } 5588 5589 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 5590 struct btrfs_file_extent_item); 5591 type = btrfs_file_extent_type(path->nodes[0], fi); 5592 if (type == BTRFS_FILE_EXTENT_INLINE) { 5593 u64 size = btrfs_file_extent_inline_len(path->nodes[0], 5594 path->slots[0], fi); 5595 extent_end = ALIGN(key->offset + size, 5596 sctx->send_root->fs_info->sectorsize); 5597 } else { 5598 extent_end = key->offset + 5599 btrfs_file_extent_num_bytes(path->nodes[0], fi); 5600 } 5601 5602 if (path->slots[0] == 0 && 5603 sctx->cur_inode_last_extent < key->offset) { 5604 /* 5605 * We might have skipped entire leafs that contained only 5606 * file extent items for our current inode. These leafs have 5607 * a generation number smaller (older) than the one in the 5608 * current leaf and the leaf our last extent came from, and 5609 * are located between these 2 leafs. 5610 */ 5611 ret = get_last_extent(sctx, key->offset - 1); 5612 if (ret) 5613 return ret; 5614 } 5615 5616 if (sctx->cur_inode_last_extent < key->offset) { 5617 ret = range_is_hole_in_parent(sctx, 5618 sctx->cur_inode_last_extent, 5619 key->offset); 5620 if (ret < 0) 5621 return ret; 5622 else if (ret == 0) 5623 ret = send_hole(sctx, key->offset); 5624 else 5625 ret = 0; 5626 } 5627 sctx->cur_inode_last_extent = extent_end; 5628 return ret; 5629 } 5630 5631 static int process_extent(struct send_ctx *sctx, 5632 struct btrfs_path *path, 5633 struct btrfs_key *key) 5634 { 5635 struct clone_root *found_clone = NULL; 5636 int ret = 0; 5637 5638 if (S_ISLNK(sctx->cur_inode_mode)) 5639 return 0; 5640 5641 if (sctx->parent_root && !sctx->cur_inode_new) { 5642 ret = is_extent_unchanged(sctx, path, key); 5643 if (ret < 0) 5644 goto out; 5645 if (ret) { 5646 ret = 0; 5647 goto out_hole; 5648 } 5649 } else { 5650 struct btrfs_file_extent_item *ei; 5651 u8 type; 5652 5653 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 5654 struct btrfs_file_extent_item); 5655 type = btrfs_file_extent_type(path->nodes[0], ei); 5656 if (type == BTRFS_FILE_EXTENT_PREALLOC || 5657 type == BTRFS_FILE_EXTENT_REG) { 5658 /* 5659 * The send spec does not have a prealloc command yet, 5660 * so just leave a hole for prealloc'ed extents until 5661 * we have enough commands queued up to justify rev'ing 5662 * the send spec. 5663 */ 5664 if (type == BTRFS_FILE_EXTENT_PREALLOC) { 5665 ret = 0; 5666 goto out; 5667 } 5668 5669 /* Have a hole, just skip it. */ 5670 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) { 5671 ret = 0; 5672 goto out; 5673 } 5674 } 5675 } 5676 5677 ret = find_extent_clone(sctx, path, key->objectid, key->offset, 5678 sctx->cur_inode_size, &found_clone); 5679 if (ret != -ENOENT && ret < 0) 5680 goto out; 5681 5682 ret = send_write_or_clone(sctx, path, key, found_clone); 5683 if (ret) 5684 goto out; 5685 out_hole: 5686 ret = maybe_send_hole(sctx, path, key); 5687 out: 5688 return ret; 5689 } 5690 5691 static int process_all_extents(struct send_ctx *sctx) 5692 { 5693 int ret; 5694 struct btrfs_root *root; 5695 struct btrfs_path *path; 5696 struct btrfs_key key; 5697 struct btrfs_key found_key; 5698 struct extent_buffer *eb; 5699 int slot; 5700 5701 root = sctx->send_root; 5702 path = alloc_path_for_send(); 5703 if (!path) 5704 return -ENOMEM; 5705 5706 key.objectid = sctx->cmp_key->objectid; 5707 key.type = BTRFS_EXTENT_DATA_KEY; 5708 key.offset = 0; 5709 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5710 if (ret < 0) 5711 goto out; 5712 5713 while (1) { 5714 eb = path->nodes[0]; 5715 slot = path->slots[0]; 5716 5717 if (slot >= btrfs_header_nritems(eb)) { 5718 ret = btrfs_next_leaf(root, path); 5719 if (ret < 0) { 5720 goto out; 5721 } else if (ret > 0) { 5722 ret = 0; 5723 break; 5724 } 5725 continue; 5726 } 5727 5728 btrfs_item_key_to_cpu(eb, &found_key, slot); 5729 5730 if (found_key.objectid != key.objectid || 5731 found_key.type != key.type) { 5732 ret = 0; 5733 goto out; 5734 } 5735 5736 ret = process_extent(sctx, path, &found_key); 5737 if (ret < 0) 5738 goto out; 5739 5740 path->slots[0]++; 5741 } 5742 5743 out: 5744 btrfs_free_path(path); 5745 return ret; 5746 } 5747 5748 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end, 5749 int *pending_move, 5750 int *refs_processed) 5751 { 5752 int ret = 0; 5753 5754 if (sctx->cur_ino == 0) 5755 goto out; 5756 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid && 5757 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY) 5758 goto out; 5759 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs)) 5760 goto out; 5761 5762 ret = process_recorded_refs(sctx, pending_move); 5763 if (ret < 0) 5764 goto out; 5765 5766 *refs_processed = 1; 5767 out: 5768 return ret; 5769 } 5770 5771 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) 5772 { 5773 int ret = 0; 5774 u64 left_mode; 5775 u64 left_uid; 5776 u64 left_gid; 5777 u64 right_mode; 5778 u64 right_uid; 5779 u64 right_gid; 5780 int need_chmod = 0; 5781 int need_chown = 0; 5782 int need_truncate = 1; 5783 int pending_move = 0; 5784 int refs_processed = 0; 5785 5786 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move, 5787 &refs_processed); 5788 if (ret < 0) 5789 goto out; 5790 5791 /* 5792 * We have processed the refs and thus need to advance send_progress. 5793 * Now, calls to get_cur_xxx will take the updated refs of the current 5794 * inode into account. 5795 * 5796 * On the other hand, if our current inode is a directory and couldn't 5797 * be moved/renamed because its parent was renamed/moved too and it has 5798 * a higher inode number, we can only move/rename our current inode 5799 * after we moved/renamed its parent. Therefore in this case operate on 5800 * the old path (pre move/rename) of our current inode, and the 5801 * move/rename will be performed later. 5802 */ 5803 if (refs_processed && !pending_move) 5804 sctx->send_progress = sctx->cur_ino + 1; 5805 5806 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted) 5807 goto out; 5808 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino) 5809 goto out; 5810 5811 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL, 5812 &left_mode, &left_uid, &left_gid, NULL); 5813 if (ret < 0) 5814 goto out; 5815 5816 if (!sctx->parent_root || sctx->cur_inode_new) { 5817 need_chown = 1; 5818 if (!S_ISLNK(sctx->cur_inode_mode)) 5819 need_chmod = 1; 5820 if (sctx->cur_inode_next_write_offset == sctx->cur_inode_size) 5821 need_truncate = 0; 5822 } else { 5823 u64 old_size; 5824 5825 ret = get_inode_info(sctx->parent_root, sctx->cur_ino, 5826 &old_size, NULL, &right_mode, &right_uid, 5827 &right_gid, NULL); 5828 if (ret < 0) 5829 goto out; 5830 5831 if (left_uid != right_uid || left_gid != right_gid) 5832 need_chown = 1; 5833 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode) 5834 need_chmod = 1; 5835 if ((old_size == sctx->cur_inode_size) || 5836 (sctx->cur_inode_size > old_size && 5837 sctx->cur_inode_next_write_offset == sctx->cur_inode_size)) 5838 need_truncate = 0; 5839 } 5840 5841 if (S_ISREG(sctx->cur_inode_mode)) { 5842 if (need_send_hole(sctx)) { 5843 if (sctx->cur_inode_last_extent == (u64)-1 || 5844 sctx->cur_inode_last_extent < 5845 sctx->cur_inode_size) { 5846 ret = get_last_extent(sctx, (u64)-1); 5847 if (ret) 5848 goto out; 5849 } 5850 if (sctx->cur_inode_last_extent < 5851 sctx->cur_inode_size) { 5852 ret = send_hole(sctx, sctx->cur_inode_size); 5853 if (ret) 5854 goto out; 5855 } 5856 } 5857 if (need_truncate) { 5858 ret = send_truncate(sctx, sctx->cur_ino, 5859 sctx->cur_inode_gen, 5860 sctx->cur_inode_size); 5861 if (ret < 0) 5862 goto out; 5863 } 5864 } 5865 5866 if (need_chown) { 5867 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen, 5868 left_uid, left_gid); 5869 if (ret < 0) 5870 goto out; 5871 } 5872 if (need_chmod) { 5873 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen, 5874 left_mode); 5875 if (ret < 0) 5876 goto out; 5877 } 5878 5879 /* 5880 * If other directory inodes depended on our current directory 5881 * inode's move/rename, now do their move/rename operations. 5882 */ 5883 if (!is_waiting_for_move(sctx, sctx->cur_ino)) { 5884 ret = apply_children_dir_moves(sctx); 5885 if (ret) 5886 goto out; 5887 /* 5888 * Need to send that every time, no matter if it actually 5889 * changed between the two trees as we have done changes to 5890 * the inode before. If our inode is a directory and it's 5891 * waiting to be moved/renamed, we will send its utimes when 5892 * it's moved/renamed, therefore we don't need to do it here. 5893 */ 5894 sctx->send_progress = sctx->cur_ino + 1; 5895 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); 5896 if (ret < 0) 5897 goto out; 5898 } 5899 5900 out: 5901 return ret; 5902 } 5903 5904 static int changed_inode(struct send_ctx *sctx, 5905 enum btrfs_compare_tree_result result) 5906 { 5907 int ret = 0; 5908 struct btrfs_key *key = sctx->cmp_key; 5909 struct btrfs_inode_item *left_ii = NULL; 5910 struct btrfs_inode_item *right_ii = NULL; 5911 u64 left_gen = 0; 5912 u64 right_gen = 0; 5913 5914 sctx->cur_ino = key->objectid; 5915 sctx->cur_inode_new_gen = 0; 5916 sctx->cur_inode_last_extent = (u64)-1; 5917 sctx->cur_inode_next_write_offset = 0; 5918 5919 /* 5920 * Set send_progress to current inode. This will tell all get_cur_xxx 5921 * functions that the current inode's refs are not updated yet. Later, 5922 * when process_recorded_refs is finished, it is set to cur_ino + 1. 5923 */ 5924 sctx->send_progress = sctx->cur_ino; 5925 5926 if (result == BTRFS_COMPARE_TREE_NEW || 5927 result == BTRFS_COMPARE_TREE_CHANGED) { 5928 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0], 5929 sctx->left_path->slots[0], 5930 struct btrfs_inode_item); 5931 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0], 5932 left_ii); 5933 } else { 5934 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], 5935 sctx->right_path->slots[0], 5936 struct btrfs_inode_item); 5937 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], 5938 right_ii); 5939 } 5940 if (result == BTRFS_COMPARE_TREE_CHANGED) { 5941 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], 5942 sctx->right_path->slots[0], 5943 struct btrfs_inode_item); 5944 5945 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], 5946 right_ii); 5947 5948 /* 5949 * The cur_ino = root dir case is special here. We can't treat 5950 * the inode as deleted+reused because it would generate a 5951 * stream that tries to delete/mkdir the root dir. 5952 */ 5953 if (left_gen != right_gen && 5954 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) 5955 sctx->cur_inode_new_gen = 1; 5956 } 5957 5958 if (result == BTRFS_COMPARE_TREE_NEW) { 5959 sctx->cur_inode_gen = left_gen; 5960 sctx->cur_inode_new = 1; 5961 sctx->cur_inode_deleted = 0; 5962 sctx->cur_inode_size = btrfs_inode_size( 5963 sctx->left_path->nodes[0], left_ii); 5964 sctx->cur_inode_mode = btrfs_inode_mode( 5965 sctx->left_path->nodes[0], left_ii); 5966 sctx->cur_inode_rdev = btrfs_inode_rdev( 5967 sctx->left_path->nodes[0], left_ii); 5968 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) 5969 ret = send_create_inode_if_needed(sctx); 5970 } else if (result == BTRFS_COMPARE_TREE_DELETED) { 5971 sctx->cur_inode_gen = right_gen; 5972 sctx->cur_inode_new = 0; 5973 sctx->cur_inode_deleted = 1; 5974 sctx->cur_inode_size = btrfs_inode_size( 5975 sctx->right_path->nodes[0], right_ii); 5976 sctx->cur_inode_mode = btrfs_inode_mode( 5977 sctx->right_path->nodes[0], right_ii); 5978 } else if (result == BTRFS_COMPARE_TREE_CHANGED) { 5979 /* 5980 * We need to do some special handling in case the inode was 5981 * reported as changed with a changed generation number. This 5982 * means that the original inode was deleted and new inode 5983 * reused the same inum. So we have to treat the old inode as 5984 * deleted and the new one as new. 5985 */ 5986 if (sctx->cur_inode_new_gen) { 5987 /* 5988 * First, process the inode as if it was deleted. 5989 */ 5990 sctx->cur_inode_gen = right_gen; 5991 sctx->cur_inode_new = 0; 5992 sctx->cur_inode_deleted = 1; 5993 sctx->cur_inode_size = btrfs_inode_size( 5994 sctx->right_path->nodes[0], right_ii); 5995 sctx->cur_inode_mode = btrfs_inode_mode( 5996 sctx->right_path->nodes[0], right_ii); 5997 ret = process_all_refs(sctx, 5998 BTRFS_COMPARE_TREE_DELETED); 5999 if (ret < 0) 6000 goto out; 6001 6002 /* 6003 * Now process the inode as if it was new. 6004 */ 6005 sctx->cur_inode_gen = left_gen; 6006 sctx->cur_inode_new = 1; 6007 sctx->cur_inode_deleted = 0; 6008 sctx->cur_inode_size = btrfs_inode_size( 6009 sctx->left_path->nodes[0], left_ii); 6010 sctx->cur_inode_mode = btrfs_inode_mode( 6011 sctx->left_path->nodes[0], left_ii); 6012 sctx->cur_inode_rdev = btrfs_inode_rdev( 6013 sctx->left_path->nodes[0], left_ii); 6014 ret = send_create_inode_if_needed(sctx); 6015 if (ret < 0) 6016 goto out; 6017 6018 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW); 6019 if (ret < 0) 6020 goto out; 6021 /* 6022 * Advance send_progress now as we did not get into 6023 * process_recorded_refs_if_needed in the new_gen case. 6024 */ 6025 sctx->send_progress = sctx->cur_ino + 1; 6026 6027 /* 6028 * Now process all extents and xattrs of the inode as if 6029 * they were all new. 6030 */ 6031 ret = process_all_extents(sctx); 6032 if (ret < 0) 6033 goto out; 6034 ret = process_all_new_xattrs(sctx); 6035 if (ret < 0) 6036 goto out; 6037 } else { 6038 sctx->cur_inode_gen = left_gen; 6039 sctx->cur_inode_new = 0; 6040 sctx->cur_inode_new_gen = 0; 6041 sctx->cur_inode_deleted = 0; 6042 sctx->cur_inode_size = btrfs_inode_size( 6043 sctx->left_path->nodes[0], left_ii); 6044 sctx->cur_inode_mode = btrfs_inode_mode( 6045 sctx->left_path->nodes[0], left_ii); 6046 } 6047 } 6048 6049 out: 6050 return ret; 6051 } 6052 6053 /* 6054 * We have to process new refs before deleted refs, but compare_trees gives us 6055 * the new and deleted refs mixed. To fix this, we record the new/deleted refs 6056 * first and later process them in process_recorded_refs. 6057 * For the cur_inode_new_gen case, we skip recording completely because 6058 * changed_inode did already initiate processing of refs. The reason for this is 6059 * that in this case, compare_tree actually compares the refs of 2 different 6060 * inodes. To fix this, process_all_refs is used in changed_inode to handle all 6061 * refs of the right tree as deleted and all refs of the left tree as new. 6062 */ 6063 static int changed_ref(struct send_ctx *sctx, 6064 enum btrfs_compare_tree_result result) 6065 { 6066 int ret = 0; 6067 6068 if (sctx->cur_ino != sctx->cmp_key->objectid) { 6069 inconsistent_snapshot_error(sctx, result, "reference"); 6070 return -EIO; 6071 } 6072 6073 if (!sctx->cur_inode_new_gen && 6074 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { 6075 if (result == BTRFS_COMPARE_TREE_NEW) 6076 ret = record_new_ref(sctx); 6077 else if (result == BTRFS_COMPARE_TREE_DELETED) 6078 ret = record_deleted_ref(sctx); 6079 else if (result == BTRFS_COMPARE_TREE_CHANGED) 6080 ret = record_changed_ref(sctx); 6081 } 6082 6083 return ret; 6084 } 6085 6086 /* 6087 * Process new/deleted/changed xattrs. We skip processing in the 6088 * cur_inode_new_gen case because changed_inode did already initiate processing 6089 * of xattrs. The reason is the same as in changed_ref 6090 */ 6091 static int changed_xattr(struct send_ctx *sctx, 6092 enum btrfs_compare_tree_result result) 6093 { 6094 int ret = 0; 6095 6096 if (sctx->cur_ino != sctx->cmp_key->objectid) { 6097 inconsistent_snapshot_error(sctx, result, "xattr"); 6098 return -EIO; 6099 } 6100 6101 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 6102 if (result == BTRFS_COMPARE_TREE_NEW) 6103 ret = process_new_xattr(sctx); 6104 else if (result == BTRFS_COMPARE_TREE_DELETED) 6105 ret = process_deleted_xattr(sctx); 6106 else if (result == BTRFS_COMPARE_TREE_CHANGED) 6107 ret = process_changed_xattr(sctx); 6108 } 6109 6110 return ret; 6111 } 6112 6113 /* 6114 * Process new/deleted/changed extents. We skip processing in the 6115 * cur_inode_new_gen case because changed_inode did already initiate processing 6116 * of extents. The reason is the same as in changed_ref 6117 */ 6118 static int changed_extent(struct send_ctx *sctx, 6119 enum btrfs_compare_tree_result result) 6120 { 6121 int ret = 0; 6122 6123 if (sctx->cur_ino != sctx->cmp_key->objectid) { 6124 6125 if (result == BTRFS_COMPARE_TREE_CHANGED) { 6126 struct extent_buffer *leaf_l; 6127 struct extent_buffer *leaf_r; 6128 struct btrfs_file_extent_item *ei_l; 6129 struct btrfs_file_extent_item *ei_r; 6130 6131 leaf_l = sctx->left_path->nodes[0]; 6132 leaf_r = sctx->right_path->nodes[0]; 6133 ei_l = btrfs_item_ptr(leaf_l, 6134 sctx->left_path->slots[0], 6135 struct btrfs_file_extent_item); 6136 ei_r = btrfs_item_ptr(leaf_r, 6137 sctx->right_path->slots[0], 6138 struct btrfs_file_extent_item); 6139 6140 /* 6141 * We may have found an extent item that has changed 6142 * only its disk_bytenr field and the corresponding 6143 * inode item was not updated. This case happens due to 6144 * very specific timings during relocation when a leaf 6145 * that contains file extent items is COWed while 6146 * relocation is ongoing and its in the stage where it 6147 * updates data pointers. So when this happens we can 6148 * safely ignore it since we know it's the same extent, 6149 * but just at different logical and physical locations 6150 * (when an extent is fully replaced with a new one, we 6151 * know the generation number must have changed too, 6152 * since snapshot creation implies committing the current 6153 * transaction, and the inode item must have been updated 6154 * as well). 6155 * This replacement of the disk_bytenr happens at 6156 * relocation.c:replace_file_extents() through 6157 * relocation.c:btrfs_reloc_cow_block(). 6158 */ 6159 if (btrfs_file_extent_generation(leaf_l, ei_l) == 6160 btrfs_file_extent_generation(leaf_r, ei_r) && 6161 btrfs_file_extent_ram_bytes(leaf_l, ei_l) == 6162 btrfs_file_extent_ram_bytes(leaf_r, ei_r) && 6163 btrfs_file_extent_compression(leaf_l, ei_l) == 6164 btrfs_file_extent_compression(leaf_r, ei_r) && 6165 btrfs_file_extent_encryption(leaf_l, ei_l) == 6166 btrfs_file_extent_encryption(leaf_r, ei_r) && 6167 btrfs_file_extent_other_encoding(leaf_l, ei_l) == 6168 btrfs_file_extent_other_encoding(leaf_r, ei_r) && 6169 btrfs_file_extent_type(leaf_l, ei_l) == 6170 btrfs_file_extent_type(leaf_r, ei_r) && 6171 btrfs_file_extent_disk_bytenr(leaf_l, ei_l) != 6172 btrfs_file_extent_disk_bytenr(leaf_r, ei_r) && 6173 btrfs_file_extent_disk_num_bytes(leaf_l, ei_l) == 6174 btrfs_file_extent_disk_num_bytes(leaf_r, ei_r) && 6175 btrfs_file_extent_offset(leaf_l, ei_l) == 6176 btrfs_file_extent_offset(leaf_r, ei_r) && 6177 btrfs_file_extent_num_bytes(leaf_l, ei_l) == 6178 btrfs_file_extent_num_bytes(leaf_r, ei_r)) 6179 return 0; 6180 } 6181 6182 inconsistent_snapshot_error(sctx, result, "extent"); 6183 return -EIO; 6184 } 6185 6186 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 6187 if (result != BTRFS_COMPARE_TREE_DELETED) 6188 ret = process_extent(sctx, sctx->left_path, 6189 sctx->cmp_key); 6190 } 6191 6192 return ret; 6193 } 6194 6195 static int dir_changed(struct send_ctx *sctx, u64 dir) 6196 { 6197 u64 orig_gen, new_gen; 6198 int ret; 6199 6200 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL, 6201 NULL, NULL); 6202 if (ret) 6203 return ret; 6204 6205 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL, 6206 NULL, NULL, NULL); 6207 if (ret) 6208 return ret; 6209 6210 return (orig_gen != new_gen) ? 1 : 0; 6211 } 6212 6213 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path, 6214 struct btrfs_key *key) 6215 { 6216 struct btrfs_inode_extref *extref; 6217 struct extent_buffer *leaf; 6218 u64 dirid = 0, last_dirid = 0; 6219 unsigned long ptr; 6220 u32 item_size; 6221 u32 cur_offset = 0; 6222 int ref_name_len; 6223 int ret = 0; 6224 6225 /* Easy case, just check this one dirid */ 6226 if (key->type == BTRFS_INODE_REF_KEY) { 6227 dirid = key->offset; 6228 6229 ret = dir_changed(sctx, dirid); 6230 goto out; 6231 } 6232 6233 leaf = path->nodes[0]; 6234 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 6235 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 6236 while (cur_offset < item_size) { 6237 extref = (struct btrfs_inode_extref *)(ptr + 6238 cur_offset); 6239 dirid = btrfs_inode_extref_parent(leaf, extref); 6240 ref_name_len = btrfs_inode_extref_name_len(leaf, extref); 6241 cur_offset += ref_name_len + sizeof(*extref); 6242 if (dirid == last_dirid) 6243 continue; 6244 ret = dir_changed(sctx, dirid); 6245 if (ret) 6246 break; 6247 last_dirid = dirid; 6248 } 6249 out: 6250 return ret; 6251 } 6252 6253 /* 6254 * Updates compare related fields in sctx and simply forwards to the actual 6255 * changed_xxx functions. 6256 */ 6257 static int changed_cb(struct btrfs_path *left_path, 6258 struct btrfs_path *right_path, 6259 struct btrfs_key *key, 6260 enum btrfs_compare_tree_result result, 6261 void *ctx) 6262 { 6263 int ret = 0; 6264 struct send_ctx *sctx = ctx; 6265 6266 if (result == BTRFS_COMPARE_TREE_SAME) { 6267 if (key->type == BTRFS_INODE_REF_KEY || 6268 key->type == BTRFS_INODE_EXTREF_KEY) { 6269 ret = compare_refs(sctx, left_path, key); 6270 if (!ret) 6271 return 0; 6272 if (ret < 0) 6273 return ret; 6274 } else if (key->type == BTRFS_EXTENT_DATA_KEY) { 6275 return maybe_send_hole(sctx, left_path, key); 6276 } else { 6277 return 0; 6278 } 6279 result = BTRFS_COMPARE_TREE_CHANGED; 6280 ret = 0; 6281 } 6282 6283 sctx->left_path = left_path; 6284 sctx->right_path = right_path; 6285 sctx->cmp_key = key; 6286 6287 ret = finish_inode_if_needed(sctx, 0); 6288 if (ret < 0) 6289 goto out; 6290 6291 /* Ignore non-FS objects */ 6292 if (key->objectid == BTRFS_FREE_INO_OBJECTID || 6293 key->objectid == BTRFS_FREE_SPACE_OBJECTID) 6294 goto out; 6295 6296 if (key->type == BTRFS_INODE_ITEM_KEY) 6297 ret = changed_inode(sctx, result); 6298 else if (key->type == BTRFS_INODE_REF_KEY || 6299 key->type == BTRFS_INODE_EXTREF_KEY) 6300 ret = changed_ref(sctx, result); 6301 else if (key->type == BTRFS_XATTR_ITEM_KEY) 6302 ret = changed_xattr(sctx, result); 6303 else if (key->type == BTRFS_EXTENT_DATA_KEY) 6304 ret = changed_extent(sctx, result); 6305 6306 out: 6307 return ret; 6308 } 6309 6310 static int full_send_tree(struct send_ctx *sctx) 6311 { 6312 int ret; 6313 struct btrfs_root *send_root = sctx->send_root; 6314 struct btrfs_key key; 6315 struct btrfs_key found_key; 6316 struct btrfs_path *path; 6317 struct extent_buffer *eb; 6318 int slot; 6319 6320 path = alloc_path_for_send(); 6321 if (!path) 6322 return -ENOMEM; 6323 6324 key.objectid = BTRFS_FIRST_FREE_OBJECTID; 6325 key.type = BTRFS_INODE_ITEM_KEY; 6326 key.offset = 0; 6327 6328 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); 6329 if (ret < 0) 6330 goto out; 6331 if (ret) 6332 goto out_finish; 6333 6334 while (1) { 6335 eb = path->nodes[0]; 6336 slot = path->slots[0]; 6337 btrfs_item_key_to_cpu(eb, &found_key, slot); 6338 6339 ret = changed_cb(path, NULL, &found_key, 6340 BTRFS_COMPARE_TREE_NEW, sctx); 6341 if (ret < 0) 6342 goto out; 6343 6344 key.objectid = found_key.objectid; 6345 key.type = found_key.type; 6346 key.offset = found_key.offset + 1; 6347 6348 ret = btrfs_next_item(send_root, path); 6349 if (ret < 0) 6350 goto out; 6351 if (ret) { 6352 ret = 0; 6353 break; 6354 } 6355 } 6356 6357 out_finish: 6358 ret = finish_inode_if_needed(sctx, 1); 6359 6360 out: 6361 btrfs_free_path(path); 6362 return ret; 6363 } 6364 6365 static int send_subvol(struct send_ctx *sctx) 6366 { 6367 int ret; 6368 6369 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) { 6370 ret = send_header(sctx); 6371 if (ret < 0) 6372 goto out; 6373 } 6374 6375 ret = send_subvol_begin(sctx); 6376 if (ret < 0) 6377 goto out; 6378 6379 if (sctx->parent_root) { 6380 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, 6381 changed_cb, sctx); 6382 if (ret < 0) 6383 goto out; 6384 ret = finish_inode_if_needed(sctx, 1); 6385 if (ret < 0) 6386 goto out; 6387 } else { 6388 ret = full_send_tree(sctx); 6389 if (ret < 0) 6390 goto out; 6391 } 6392 6393 out: 6394 free_recorded_refs(sctx); 6395 return ret; 6396 } 6397 6398 /* 6399 * If orphan cleanup did remove any orphans from a root, it means the tree 6400 * was modified and therefore the commit root is not the same as the current 6401 * root anymore. This is a problem, because send uses the commit root and 6402 * therefore can see inode items that don't exist in the current root anymore, 6403 * and for example make calls to btrfs_iget, which will do tree lookups based 6404 * on the current root and not on the commit root. Those lookups will fail, 6405 * returning a -ESTALE error, and making send fail with that error. So make 6406 * sure a send does not see any orphans we have just removed, and that it will 6407 * see the same inodes regardless of whether a transaction commit happened 6408 * before it started (meaning that the commit root will be the same as the 6409 * current root) or not. 6410 */ 6411 static int ensure_commit_roots_uptodate(struct send_ctx *sctx) 6412 { 6413 int i; 6414 struct btrfs_trans_handle *trans = NULL; 6415 6416 again: 6417 if (sctx->parent_root && 6418 sctx->parent_root->node != sctx->parent_root->commit_root) 6419 goto commit_trans; 6420 6421 for (i = 0; i < sctx->clone_roots_cnt; i++) 6422 if (sctx->clone_roots[i].root->node != 6423 sctx->clone_roots[i].root->commit_root) 6424 goto commit_trans; 6425 6426 if (trans) 6427 return btrfs_end_transaction(trans); 6428 6429 return 0; 6430 6431 commit_trans: 6432 /* Use any root, all fs roots will get their commit roots updated. */ 6433 if (!trans) { 6434 trans = btrfs_join_transaction(sctx->send_root); 6435 if (IS_ERR(trans)) 6436 return PTR_ERR(trans); 6437 goto again; 6438 } 6439 6440 return btrfs_commit_transaction(trans); 6441 } 6442 6443 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root) 6444 { 6445 spin_lock(&root->root_item_lock); 6446 root->send_in_progress--; 6447 /* 6448 * Not much left to do, we don't know why it's unbalanced and 6449 * can't blindly reset it to 0. 6450 */ 6451 if (root->send_in_progress < 0) 6452 btrfs_err(root->fs_info, 6453 "send_in_progres unbalanced %d root %llu", 6454 root->send_in_progress, root->root_key.objectid); 6455 spin_unlock(&root->root_item_lock); 6456 } 6457 6458 long btrfs_ioctl_send(struct file *mnt_file, struct btrfs_ioctl_send_args *arg) 6459 { 6460 int ret = 0; 6461 struct btrfs_root *send_root = BTRFS_I(file_inode(mnt_file))->root; 6462 struct btrfs_fs_info *fs_info = send_root->fs_info; 6463 struct btrfs_root *clone_root; 6464 struct btrfs_key key; 6465 struct send_ctx *sctx = NULL; 6466 u32 i; 6467 u64 *clone_sources_tmp = NULL; 6468 int clone_sources_to_rollback = 0; 6469 unsigned alloc_size; 6470 int sort_clone_roots = 0; 6471 int index; 6472 6473 if (!capable(CAP_SYS_ADMIN)) 6474 return -EPERM; 6475 6476 /* 6477 * The subvolume must remain read-only during send, protect against 6478 * making it RW. This also protects against deletion. 6479 */ 6480 spin_lock(&send_root->root_item_lock); 6481 send_root->send_in_progress++; 6482 spin_unlock(&send_root->root_item_lock); 6483 6484 /* 6485 * This is done when we lookup the root, it should already be complete 6486 * by the time we get here. 6487 */ 6488 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE); 6489 6490 /* 6491 * Userspace tools do the checks and warn the user if it's 6492 * not RO. 6493 */ 6494 if (!btrfs_root_readonly(send_root)) { 6495 ret = -EPERM; 6496 goto out; 6497 } 6498 6499 /* 6500 * Check that we don't overflow at later allocations, we request 6501 * clone_sources_count + 1 items, and compare to unsigned long inside 6502 * access_ok. 6503 */ 6504 if (arg->clone_sources_count > 6505 ULONG_MAX / sizeof(struct clone_root) - 1) { 6506 ret = -EINVAL; 6507 goto out; 6508 } 6509 6510 if (!access_ok(VERIFY_READ, arg->clone_sources, 6511 sizeof(*arg->clone_sources) * 6512 arg->clone_sources_count)) { 6513 ret = -EFAULT; 6514 goto out; 6515 } 6516 6517 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) { 6518 ret = -EINVAL; 6519 goto out; 6520 } 6521 6522 sctx = kzalloc(sizeof(struct send_ctx), GFP_KERNEL); 6523 if (!sctx) { 6524 ret = -ENOMEM; 6525 goto out; 6526 } 6527 6528 INIT_LIST_HEAD(&sctx->new_refs); 6529 INIT_LIST_HEAD(&sctx->deleted_refs); 6530 INIT_RADIX_TREE(&sctx->name_cache, GFP_KERNEL); 6531 INIT_LIST_HEAD(&sctx->name_cache_list); 6532 6533 sctx->flags = arg->flags; 6534 6535 sctx->send_filp = fget(arg->send_fd); 6536 if (!sctx->send_filp) { 6537 ret = -EBADF; 6538 goto out; 6539 } 6540 6541 sctx->send_root = send_root; 6542 /* 6543 * Unlikely but possible, if the subvolume is marked for deletion but 6544 * is slow to remove the directory entry, send can still be started 6545 */ 6546 if (btrfs_root_dead(sctx->send_root)) { 6547 ret = -EPERM; 6548 goto out; 6549 } 6550 6551 sctx->clone_roots_cnt = arg->clone_sources_count; 6552 6553 sctx->send_max_size = BTRFS_SEND_BUF_SIZE; 6554 sctx->send_buf = kvmalloc(sctx->send_max_size, GFP_KERNEL); 6555 if (!sctx->send_buf) { 6556 ret = -ENOMEM; 6557 goto out; 6558 } 6559 6560 sctx->read_buf = kvmalloc(BTRFS_SEND_READ_SIZE, GFP_KERNEL); 6561 if (!sctx->read_buf) { 6562 ret = -ENOMEM; 6563 goto out; 6564 } 6565 6566 sctx->pending_dir_moves = RB_ROOT; 6567 sctx->waiting_dir_moves = RB_ROOT; 6568 sctx->orphan_dirs = RB_ROOT; 6569 6570 alloc_size = sizeof(struct clone_root) * (arg->clone_sources_count + 1); 6571 6572 sctx->clone_roots = kzalloc(alloc_size, GFP_KERNEL); 6573 if (!sctx->clone_roots) { 6574 ret = -ENOMEM; 6575 goto out; 6576 } 6577 6578 alloc_size = arg->clone_sources_count * sizeof(*arg->clone_sources); 6579 6580 if (arg->clone_sources_count) { 6581 clone_sources_tmp = kvmalloc(alloc_size, GFP_KERNEL); 6582 if (!clone_sources_tmp) { 6583 ret = -ENOMEM; 6584 goto out; 6585 } 6586 6587 ret = copy_from_user(clone_sources_tmp, arg->clone_sources, 6588 alloc_size); 6589 if (ret) { 6590 ret = -EFAULT; 6591 goto out; 6592 } 6593 6594 for (i = 0; i < arg->clone_sources_count; i++) { 6595 key.objectid = clone_sources_tmp[i]; 6596 key.type = BTRFS_ROOT_ITEM_KEY; 6597 key.offset = (u64)-1; 6598 6599 index = srcu_read_lock(&fs_info->subvol_srcu); 6600 6601 clone_root = btrfs_read_fs_root_no_name(fs_info, &key); 6602 if (IS_ERR(clone_root)) { 6603 srcu_read_unlock(&fs_info->subvol_srcu, index); 6604 ret = PTR_ERR(clone_root); 6605 goto out; 6606 } 6607 spin_lock(&clone_root->root_item_lock); 6608 if (!btrfs_root_readonly(clone_root) || 6609 btrfs_root_dead(clone_root)) { 6610 spin_unlock(&clone_root->root_item_lock); 6611 srcu_read_unlock(&fs_info->subvol_srcu, index); 6612 ret = -EPERM; 6613 goto out; 6614 } 6615 clone_root->send_in_progress++; 6616 spin_unlock(&clone_root->root_item_lock); 6617 srcu_read_unlock(&fs_info->subvol_srcu, index); 6618 6619 sctx->clone_roots[i].root = clone_root; 6620 clone_sources_to_rollback = i + 1; 6621 } 6622 kvfree(clone_sources_tmp); 6623 clone_sources_tmp = NULL; 6624 } 6625 6626 if (arg->parent_root) { 6627 key.objectid = arg->parent_root; 6628 key.type = BTRFS_ROOT_ITEM_KEY; 6629 key.offset = (u64)-1; 6630 6631 index = srcu_read_lock(&fs_info->subvol_srcu); 6632 6633 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key); 6634 if (IS_ERR(sctx->parent_root)) { 6635 srcu_read_unlock(&fs_info->subvol_srcu, index); 6636 ret = PTR_ERR(sctx->parent_root); 6637 goto out; 6638 } 6639 6640 spin_lock(&sctx->parent_root->root_item_lock); 6641 sctx->parent_root->send_in_progress++; 6642 if (!btrfs_root_readonly(sctx->parent_root) || 6643 btrfs_root_dead(sctx->parent_root)) { 6644 spin_unlock(&sctx->parent_root->root_item_lock); 6645 srcu_read_unlock(&fs_info->subvol_srcu, index); 6646 ret = -EPERM; 6647 goto out; 6648 } 6649 spin_unlock(&sctx->parent_root->root_item_lock); 6650 6651 srcu_read_unlock(&fs_info->subvol_srcu, index); 6652 } 6653 6654 /* 6655 * Clones from send_root are allowed, but only if the clone source 6656 * is behind the current send position. This is checked while searching 6657 * for possible clone sources. 6658 */ 6659 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root; 6660 6661 /* We do a bsearch later */ 6662 sort(sctx->clone_roots, sctx->clone_roots_cnt, 6663 sizeof(*sctx->clone_roots), __clone_root_cmp_sort, 6664 NULL); 6665 sort_clone_roots = 1; 6666 6667 ret = ensure_commit_roots_uptodate(sctx); 6668 if (ret) 6669 goto out; 6670 6671 current->journal_info = BTRFS_SEND_TRANS_STUB; 6672 ret = send_subvol(sctx); 6673 current->journal_info = NULL; 6674 if (ret < 0) 6675 goto out; 6676 6677 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) { 6678 ret = begin_cmd(sctx, BTRFS_SEND_C_END); 6679 if (ret < 0) 6680 goto out; 6681 ret = send_cmd(sctx); 6682 if (ret < 0) 6683 goto out; 6684 } 6685 6686 out: 6687 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)); 6688 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) { 6689 struct rb_node *n; 6690 struct pending_dir_move *pm; 6691 6692 n = rb_first(&sctx->pending_dir_moves); 6693 pm = rb_entry(n, struct pending_dir_move, node); 6694 while (!list_empty(&pm->list)) { 6695 struct pending_dir_move *pm2; 6696 6697 pm2 = list_first_entry(&pm->list, 6698 struct pending_dir_move, list); 6699 free_pending_move(sctx, pm2); 6700 } 6701 free_pending_move(sctx, pm); 6702 } 6703 6704 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)); 6705 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) { 6706 struct rb_node *n; 6707 struct waiting_dir_move *dm; 6708 6709 n = rb_first(&sctx->waiting_dir_moves); 6710 dm = rb_entry(n, struct waiting_dir_move, node); 6711 rb_erase(&dm->node, &sctx->waiting_dir_moves); 6712 kfree(dm); 6713 } 6714 6715 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->orphan_dirs)); 6716 while (sctx && !RB_EMPTY_ROOT(&sctx->orphan_dirs)) { 6717 struct rb_node *n; 6718 struct orphan_dir_info *odi; 6719 6720 n = rb_first(&sctx->orphan_dirs); 6721 odi = rb_entry(n, struct orphan_dir_info, node); 6722 free_orphan_dir_info(sctx, odi); 6723 } 6724 6725 if (sort_clone_roots) { 6726 for (i = 0; i < sctx->clone_roots_cnt; i++) 6727 btrfs_root_dec_send_in_progress( 6728 sctx->clone_roots[i].root); 6729 } else { 6730 for (i = 0; sctx && i < clone_sources_to_rollback; i++) 6731 btrfs_root_dec_send_in_progress( 6732 sctx->clone_roots[i].root); 6733 6734 btrfs_root_dec_send_in_progress(send_root); 6735 } 6736 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) 6737 btrfs_root_dec_send_in_progress(sctx->parent_root); 6738 6739 kvfree(clone_sources_tmp); 6740 6741 if (sctx) { 6742 if (sctx->send_filp) 6743 fput(sctx->send_filp); 6744 6745 kvfree(sctx->clone_roots); 6746 kvfree(sctx->send_buf); 6747 kvfree(sctx->read_buf); 6748 6749 name_cache_free(sctx); 6750 6751 kfree(sctx); 6752 } 6753 6754 return ret; 6755 } 6756