1 /* 2 * Copyright (C) 2012 Alexander Block. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/bsearch.h> 20 #include <linux/fs.h> 21 #include <linux/file.h> 22 #include <linux/sort.h> 23 #include <linux/mount.h> 24 #include <linux/xattr.h> 25 #include <linux/posix_acl_xattr.h> 26 #include <linux/radix-tree.h> 27 #include <linux/crc32c.h> 28 #include <linux/vmalloc.h> 29 #include <linux/string.h> 30 31 #include "send.h" 32 #include "backref.h" 33 #include "locking.h" 34 #include "disk-io.h" 35 #include "btrfs_inode.h" 36 #include "transaction.h" 37 38 static int g_verbose = 0; 39 40 #define verbose_printk(...) if (g_verbose) printk(__VA_ARGS__) 41 42 /* 43 * A fs_path is a helper to dynamically build path names with unknown size. 44 * It reallocates the internal buffer on demand. 45 * It allows fast adding of path elements on the right side (normal path) and 46 * fast adding to the left side (reversed path). A reversed path can also be 47 * unreversed if needed. 48 */ 49 struct fs_path { 50 union { 51 struct { 52 char *start; 53 char *end; 54 char *prepared; 55 56 char *buf; 57 int buf_len; 58 unsigned int reversed:1; 59 unsigned int virtual_mem:1; 60 char inline_buf[]; 61 }; 62 char pad[PAGE_SIZE]; 63 }; 64 }; 65 #define FS_PATH_INLINE_SIZE \ 66 (sizeof(struct fs_path) - offsetof(struct fs_path, inline_buf)) 67 68 69 /* reused for each extent */ 70 struct clone_root { 71 struct btrfs_root *root; 72 u64 ino; 73 u64 offset; 74 75 u64 found_refs; 76 }; 77 78 #define SEND_CTX_MAX_NAME_CACHE_SIZE 128 79 #define SEND_CTX_NAME_CACHE_CLEAN_SIZE (SEND_CTX_MAX_NAME_CACHE_SIZE * 2) 80 81 struct send_ctx { 82 struct file *send_filp; 83 loff_t send_off; 84 char *send_buf; 85 u32 send_size; 86 u32 send_max_size; 87 u64 total_send_size; 88 u64 cmd_send_size[BTRFS_SEND_C_MAX + 1]; 89 u64 flags; /* 'flags' member of btrfs_ioctl_send_args is u64 */ 90 91 struct btrfs_root *send_root; 92 struct btrfs_root *parent_root; 93 struct clone_root *clone_roots; 94 int clone_roots_cnt; 95 96 /* current state of the compare_tree call */ 97 struct btrfs_path *left_path; 98 struct btrfs_path *right_path; 99 struct btrfs_key *cmp_key; 100 101 /* 102 * infos of the currently processed inode. In case of deleted inodes, 103 * these are the values from the deleted inode. 104 */ 105 u64 cur_ino; 106 u64 cur_inode_gen; 107 int cur_inode_new; 108 int cur_inode_new_gen; 109 int cur_inode_deleted; 110 u64 cur_inode_size; 111 u64 cur_inode_mode; 112 u64 cur_inode_last_extent; 113 114 u64 send_progress; 115 116 struct list_head new_refs; 117 struct list_head deleted_refs; 118 119 struct radix_tree_root name_cache; 120 struct list_head name_cache_list; 121 int name_cache_size; 122 123 char *read_buf; 124 125 /* 126 * We process inodes by their increasing order, so if before an 127 * incremental send we reverse the parent/child relationship of 128 * directories such that a directory with a lower inode number was 129 * the parent of a directory with a higher inode number, and the one 130 * becoming the new parent got renamed too, we can't rename/move the 131 * directory with lower inode number when we finish processing it - we 132 * must process the directory with higher inode number first, then 133 * rename/move it and then rename/move the directory with lower inode 134 * number. Example follows. 135 * 136 * Tree state when the first send was performed: 137 * 138 * . 139 * |-- a (ino 257) 140 * |-- b (ino 258) 141 * | 142 * | 143 * |-- c (ino 259) 144 * | |-- d (ino 260) 145 * | 146 * |-- c2 (ino 261) 147 * 148 * Tree state when the second (incremental) send is performed: 149 * 150 * . 151 * |-- a (ino 257) 152 * |-- b (ino 258) 153 * |-- c2 (ino 261) 154 * |-- d2 (ino 260) 155 * |-- cc (ino 259) 156 * 157 * The sequence of steps that lead to the second state was: 158 * 159 * mv /a/b/c/d /a/b/c2/d2 160 * mv /a/b/c /a/b/c2/d2/cc 161 * 162 * "c" has lower inode number, but we can't move it (2nd mv operation) 163 * before we move "d", which has higher inode number. 164 * 165 * So we just memorize which move/rename operations must be performed 166 * later when their respective parent is processed and moved/renamed. 167 */ 168 169 /* Indexed by parent directory inode number. */ 170 struct rb_root pending_dir_moves; 171 172 /* 173 * Reverse index, indexed by the inode number of a directory that 174 * is waiting for the move/rename of its immediate parent before its 175 * own move/rename can be performed. 176 */ 177 struct rb_root waiting_dir_moves; 178 }; 179 180 struct pending_dir_move { 181 struct rb_node node; 182 struct list_head list; 183 u64 parent_ino; 184 u64 ino; 185 u64 gen; 186 struct list_head update_refs; 187 }; 188 189 struct waiting_dir_move { 190 struct rb_node node; 191 u64 ino; 192 }; 193 194 struct name_cache_entry { 195 struct list_head list; 196 /* 197 * radix_tree has only 32bit entries but we need to handle 64bit inums. 198 * We use the lower 32bit of the 64bit inum to store it in the tree. If 199 * more then one inum would fall into the same entry, we use radix_list 200 * to store the additional entries. radix_list is also used to store 201 * entries where two entries have the same inum but different 202 * generations. 203 */ 204 struct list_head radix_list; 205 u64 ino; 206 u64 gen; 207 u64 parent_ino; 208 u64 parent_gen; 209 int ret; 210 int need_later_update; 211 int name_len; 212 char name[]; 213 }; 214 215 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino); 216 217 static int need_send_hole(struct send_ctx *sctx) 218 { 219 return (sctx->parent_root && !sctx->cur_inode_new && 220 !sctx->cur_inode_new_gen && !sctx->cur_inode_deleted && 221 S_ISREG(sctx->cur_inode_mode)); 222 } 223 224 static void fs_path_reset(struct fs_path *p) 225 { 226 if (p->reversed) { 227 p->start = p->buf + p->buf_len - 1; 228 p->end = p->start; 229 *p->start = 0; 230 } else { 231 p->start = p->buf; 232 p->end = p->start; 233 *p->start = 0; 234 } 235 } 236 237 static struct fs_path *fs_path_alloc(void) 238 { 239 struct fs_path *p; 240 241 p = kmalloc(sizeof(*p), GFP_NOFS); 242 if (!p) 243 return NULL; 244 p->reversed = 0; 245 p->virtual_mem = 0; 246 p->buf = p->inline_buf; 247 p->buf_len = FS_PATH_INLINE_SIZE; 248 fs_path_reset(p); 249 return p; 250 } 251 252 static struct fs_path *fs_path_alloc_reversed(void) 253 { 254 struct fs_path *p; 255 256 p = fs_path_alloc(); 257 if (!p) 258 return NULL; 259 p->reversed = 1; 260 fs_path_reset(p); 261 return p; 262 } 263 264 static void fs_path_free(struct fs_path *p) 265 { 266 if (!p) 267 return; 268 if (p->buf != p->inline_buf) { 269 if (p->virtual_mem) 270 vfree(p->buf); 271 else 272 kfree(p->buf); 273 } 274 kfree(p); 275 } 276 277 static int fs_path_len(struct fs_path *p) 278 { 279 return p->end - p->start; 280 } 281 282 static int fs_path_ensure_buf(struct fs_path *p, int len) 283 { 284 char *tmp_buf; 285 int path_len; 286 int old_buf_len; 287 288 len++; 289 290 if (p->buf_len >= len) 291 return 0; 292 293 path_len = p->end - p->start; 294 old_buf_len = p->buf_len; 295 len = PAGE_ALIGN(len); 296 297 if (p->buf == p->inline_buf) { 298 tmp_buf = kmalloc(len, GFP_NOFS | __GFP_NOWARN); 299 if (!tmp_buf) { 300 tmp_buf = vmalloc(len); 301 if (!tmp_buf) 302 return -ENOMEM; 303 p->virtual_mem = 1; 304 } 305 memcpy(tmp_buf, p->buf, p->buf_len); 306 p->buf = tmp_buf; 307 p->buf_len = len; 308 } else { 309 if (p->virtual_mem) { 310 tmp_buf = vmalloc(len); 311 if (!tmp_buf) 312 return -ENOMEM; 313 memcpy(tmp_buf, p->buf, p->buf_len); 314 vfree(p->buf); 315 } else { 316 tmp_buf = krealloc(p->buf, len, GFP_NOFS); 317 if (!tmp_buf) { 318 tmp_buf = vmalloc(len); 319 if (!tmp_buf) 320 return -ENOMEM; 321 memcpy(tmp_buf, p->buf, p->buf_len); 322 kfree(p->buf); 323 p->virtual_mem = 1; 324 } 325 } 326 p->buf = tmp_buf; 327 p->buf_len = len; 328 } 329 if (p->reversed) { 330 tmp_buf = p->buf + old_buf_len - path_len - 1; 331 p->end = p->buf + p->buf_len - 1; 332 p->start = p->end - path_len; 333 memmove(p->start, tmp_buf, path_len + 1); 334 } else { 335 p->start = p->buf; 336 p->end = p->start + path_len; 337 } 338 return 0; 339 } 340 341 static int fs_path_prepare_for_add(struct fs_path *p, int name_len) 342 { 343 int ret; 344 int new_len; 345 346 new_len = p->end - p->start + name_len; 347 if (p->start != p->end) 348 new_len++; 349 ret = fs_path_ensure_buf(p, new_len); 350 if (ret < 0) 351 goto out; 352 353 if (p->reversed) { 354 if (p->start != p->end) 355 *--p->start = '/'; 356 p->start -= name_len; 357 p->prepared = p->start; 358 } else { 359 if (p->start != p->end) 360 *p->end++ = '/'; 361 p->prepared = p->end; 362 p->end += name_len; 363 *p->end = 0; 364 } 365 366 out: 367 return ret; 368 } 369 370 static int fs_path_add(struct fs_path *p, const char *name, int name_len) 371 { 372 int ret; 373 374 ret = fs_path_prepare_for_add(p, name_len); 375 if (ret < 0) 376 goto out; 377 memcpy(p->prepared, name, name_len); 378 p->prepared = NULL; 379 380 out: 381 return ret; 382 } 383 384 static int fs_path_add_path(struct fs_path *p, struct fs_path *p2) 385 { 386 int ret; 387 388 ret = fs_path_prepare_for_add(p, p2->end - p2->start); 389 if (ret < 0) 390 goto out; 391 memcpy(p->prepared, p2->start, p2->end - p2->start); 392 p->prepared = NULL; 393 394 out: 395 return ret; 396 } 397 398 static int fs_path_add_from_extent_buffer(struct fs_path *p, 399 struct extent_buffer *eb, 400 unsigned long off, int len) 401 { 402 int ret; 403 404 ret = fs_path_prepare_for_add(p, len); 405 if (ret < 0) 406 goto out; 407 408 read_extent_buffer(eb, p->prepared, off, len); 409 p->prepared = NULL; 410 411 out: 412 return ret; 413 } 414 415 static int fs_path_copy(struct fs_path *p, struct fs_path *from) 416 { 417 int ret; 418 419 p->reversed = from->reversed; 420 fs_path_reset(p); 421 422 ret = fs_path_add_path(p, from); 423 424 return ret; 425 } 426 427 428 static void fs_path_unreverse(struct fs_path *p) 429 { 430 char *tmp; 431 int len; 432 433 if (!p->reversed) 434 return; 435 436 tmp = p->start; 437 len = p->end - p->start; 438 p->start = p->buf; 439 p->end = p->start + len; 440 memmove(p->start, tmp, len + 1); 441 p->reversed = 0; 442 } 443 444 static struct btrfs_path *alloc_path_for_send(void) 445 { 446 struct btrfs_path *path; 447 448 path = btrfs_alloc_path(); 449 if (!path) 450 return NULL; 451 path->search_commit_root = 1; 452 path->skip_locking = 1; 453 return path; 454 } 455 456 static int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off) 457 { 458 int ret; 459 mm_segment_t old_fs; 460 u32 pos = 0; 461 462 old_fs = get_fs(); 463 set_fs(KERNEL_DS); 464 465 while (pos < len) { 466 ret = vfs_write(filp, (char *)buf + pos, len - pos, off); 467 /* TODO handle that correctly */ 468 /*if (ret == -ERESTARTSYS) { 469 continue; 470 }*/ 471 if (ret < 0) 472 goto out; 473 if (ret == 0) { 474 ret = -EIO; 475 goto out; 476 } 477 pos += ret; 478 } 479 480 ret = 0; 481 482 out: 483 set_fs(old_fs); 484 return ret; 485 } 486 487 static int tlv_put(struct send_ctx *sctx, u16 attr, const void *data, int len) 488 { 489 struct btrfs_tlv_header *hdr; 490 int total_len = sizeof(*hdr) + len; 491 int left = sctx->send_max_size - sctx->send_size; 492 493 if (unlikely(left < total_len)) 494 return -EOVERFLOW; 495 496 hdr = (struct btrfs_tlv_header *) (sctx->send_buf + sctx->send_size); 497 hdr->tlv_type = cpu_to_le16(attr); 498 hdr->tlv_len = cpu_to_le16(len); 499 memcpy(hdr + 1, data, len); 500 sctx->send_size += total_len; 501 502 return 0; 503 } 504 505 #define TLV_PUT_DEFINE_INT(bits) \ 506 static int tlv_put_u##bits(struct send_ctx *sctx, \ 507 u##bits attr, u##bits value) \ 508 { \ 509 __le##bits __tmp = cpu_to_le##bits(value); \ 510 return tlv_put(sctx, attr, &__tmp, sizeof(__tmp)); \ 511 } 512 513 TLV_PUT_DEFINE_INT(64) 514 515 static int tlv_put_string(struct send_ctx *sctx, u16 attr, 516 const char *str, int len) 517 { 518 if (len == -1) 519 len = strlen(str); 520 return tlv_put(sctx, attr, str, len); 521 } 522 523 static int tlv_put_uuid(struct send_ctx *sctx, u16 attr, 524 const u8 *uuid) 525 { 526 return tlv_put(sctx, attr, uuid, BTRFS_UUID_SIZE); 527 } 528 529 static int tlv_put_btrfs_timespec(struct send_ctx *sctx, u16 attr, 530 struct extent_buffer *eb, 531 struct btrfs_timespec *ts) 532 { 533 struct btrfs_timespec bts; 534 read_extent_buffer(eb, &bts, (unsigned long)ts, sizeof(bts)); 535 return tlv_put(sctx, attr, &bts, sizeof(bts)); 536 } 537 538 539 #define TLV_PUT(sctx, attrtype, attrlen, data) \ 540 do { \ 541 ret = tlv_put(sctx, attrtype, attrlen, data); \ 542 if (ret < 0) \ 543 goto tlv_put_failure; \ 544 } while (0) 545 546 #define TLV_PUT_INT(sctx, attrtype, bits, value) \ 547 do { \ 548 ret = tlv_put_u##bits(sctx, attrtype, value); \ 549 if (ret < 0) \ 550 goto tlv_put_failure; \ 551 } while (0) 552 553 #define TLV_PUT_U8(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 8, data) 554 #define TLV_PUT_U16(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 16, data) 555 #define TLV_PUT_U32(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 32, data) 556 #define TLV_PUT_U64(sctx, attrtype, data) TLV_PUT_INT(sctx, attrtype, 64, data) 557 #define TLV_PUT_STRING(sctx, attrtype, str, len) \ 558 do { \ 559 ret = tlv_put_string(sctx, attrtype, str, len); \ 560 if (ret < 0) \ 561 goto tlv_put_failure; \ 562 } while (0) 563 #define TLV_PUT_PATH(sctx, attrtype, p) \ 564 do { \ 565 ret = tlv_put_string(sctx, attrtype, p->start, \ 566 p->end - p->start); \ 567 if (ret < 0) \ 568 goto tlv_put_failure; \ 569 } while(0) 570 #define TLV_PUT_UUID(sctx, attrtype, uuid) \ 571 do { \ 572 ret = tlv_put_uuid(sctx, attrtype, uuid); \ 573 if (ret < 0) \ 574 goto tlv_put_failure; \ 575 } while (0) 576 #define TLV_PUT_BTRFS_TIMESPEC(sctx, attrtype, eb, ts) \ 577 do { \ 578 ret = tlv_put_btrfs_timespec(sctx, attrtype, eb, ts); \ 579 if (ret < 0) \ 580 goto tlv_put_failure; \ 581 } while (0) 582 583 static int send_header(struct send_ctx *sctx) 584 { 585 struct btrfs_stream_header hdr; 586 587 strcpy(hdr.magic, BTRFS_SEND_STREAM_MAGIC); 588 hdr.version = cpu_to_le32(BTRFS_SEND_STREAM_VERSION); 589 590 return write_buf(sctx->send_filp, &hdr, sizeof(hdr), 591 &sctx->send_off); 592 } 593 594 /* 595 * For each command/item we want to send to userspace, we call this function. 596 */ 597 static int begin_cmd(struct send_ctx *sctx, int cmd) 598 { 599 struct btrfs_cmd_header *hdr; 600 601 if (WARN_ON(!sctx->send_buf)) 602 return -EINVAL; 603 604 BUG_ON(sctx->send_size); 605 606 sctx->send_size += sizeof(*hdr); 607 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 608 hdr->cmd = cpu_to_le16(cmd); 609 610 return 0; 611 } 612 613 static int send_cmd(struct send_ctx *sctx) 614 { 615 int ret; 616 struct btrfs_cmd_header *hdr; 617 u32 crc; 618 619 hdr = (struct btrfs_cmd_header *)sctx->send_buf; 620 hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr)); 621 hdr->crc = 0; 622 623 crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size); 624 hdr->crc = cpu_to_le32(crc); 625 626 ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size, 627 &sctx->send_off); 628 629 sctx->total_send_size += sctx->send_size; 630 sctx->cmd_send_size[le16_to_cpu(hdr->cmd)] += sctx->send_size; 631 sctx->send_size = 0; 632 633 return ret; 634 } 635 636 /* 637 * Sends a move instruction to user space 638 */ 639 static int send_rename(struct send_ctx *sctx, 640 struct fs_path *from, struct fs_path *to) 641 { 642 int ret; 643 644 verbose_printk("btrfs: send_rename %s -> %s\n", from->start, to->start); 645 646 ret = begin_cmd(sctx, BTRFS_SEND_C_RENAME); 647 if (ret < 0) 648 goto out; 649 650 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, from); 651 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_TO, to); 652 653 ret = send_cmd(sctx); 654 655 tlv_put_failure: 656 out: 657 return ret; 658 } 659 660 /* 661 * Sends a link instruction to user space 662 */ 663 static int send_link(struct send_ctx *sctx, 664 struct fs_path *path, struct fs_path *lnk) 665 { 666 int ret; 667 668 verbose_printk("btrfs: send_link %s -> %s\n", path->start, lnk->start); 669 670 ret = begin_cmd(sctx, BTRFS_SEND_C_LINK); 671 if (ret < 0) 672 goto out; 673 674 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 675 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, lnk); 676 677 ret = send_cmd(sctx); 678 679 tlv_put_failure: 680 out: 681 return ret; 682 } 683 684 /* 685 * Sends an unlink instruction to user space 686 */ 687 static int send_unlink(struct send_ctx *sctx, struct fs_path *path) 688 { 689 int ret; 690 691 verbose_printk("btrfs: send_unlink %s\n", path->start); 692 693 ret = begin_cmd(sctx, BTRFS_SEND_C_UNLINK); 694 if (ret < 0) 695 goto out; 696 697 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 698 699 ret = send_cmd(sctx); 700 701 tlv_put_failure: 702 out: 703 return ret; 704 } 705 706 /* 707 * Sends a rmdir instruction to user space 708 */ 709 static int send_rmdir(struct send_ctx *sctx, struct fs_path *path) 710 { 711 int ret; 712 713 verbose_printk("btrfs: send_rmdir %s\n", path->start); 714 715 ret = begin_cmd(sctx, BTRFS_SEND_C_RMDIR); 716 if (ret < 0) 717 goto out; 718 719 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 720 721 ret = send_cmd(sctx); 722 723 tlv_put_failure: 724 out: 725 return ret; 726 } 727 728 /* 729 * Helper function to retrieve some fields from an inode item. 730 */ 731 static int get_inode_info(struct btrfs_root *root, 732 u64 ino, u64 *size, u64 *gen, 733 u64 *mode, u64 *uid, u64 *gid, 734 u64 *rdev) 735 { 736 int ret; 737 struct btrfs_inode_item *ii; 738 struct btrfs_key key; 739 struct btrfs_path *path; 740 741 path = alloc_path_for_send(); 742 if (!path) 743 return -ENOMEM; 744 745 key.objectid = ino; 746 key.type = BTRFS_INODE_ITEM_KEY; 747 key.offset = 0; 748 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 749 if (ret < 0) 750 goto out; 751 if (ret) { 752 ret = -ENOENT; 753 goto out; 754 } 755 756 ii = btrfs_item_ptr(path->nodes[0], path->slots[0], 757 struct btrfs_inode_item); 758 if (size) 759 *size = btrfs_inode_size(path->nodes[0], ii); 760 if (gen) 761 *gen = btrfs_inode_generation(path->nodes[0], ii); 762 if (mode) 763 *mode = btrfs_inode_mode(path->nodes[0], ii); 764 if (uid) 765 *uid = btrfs_inode_uid(path->nodes[0], ii); 766 if (gid) 767 *gid = btrfs_inode_gid(path->nodes[0], ii); 768 if (rdev) 769 *rdev = btrfs_inode_rdev(path->nodes[0], ii); 770 771 out: 772 btrfs_free_path(path); 773 return ret; 774 } 775 776 typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index, 777 struct fs_path *p, 778 void *ctx); 779 780 /* 781 * Helper function to iterate the entries in ONE btrfs_inode_ref or 782 * btrfs_inode_extref. 783 * The iterate callback may return a non zero value to stop iteration. This can 784 * be a negative value for error codes or 1 to simply stop it. 785 * 786 * path must point to the INODE_REF or INODE_EXTREF when called. 787 */ 788 static int iterate_inode_ref(struct btrfs_root *root, struct btrfs_path *path, 789 struct btrfs_key *found_key, int resolve, 790 iterate_inode_ref_t iterate, void *ctx) 791 { 792 struct extent_buffer *eb = path->nodes[0]; 793 struct btrfs_item *item; 794 struct btrfs_inode_ref *iref; 795 struct btrfs_inode_extref *extref; 796 struct btrfs_path *tmp_path; 797 struct fs_path *p; 798 u32 cur = 0; 799 u32 total; 800 int slot = path->slots[0]; 801 u32 name_len; 802 char *start; 803 int ret = 0; 804 int num = 0; 805 int index; 806 u64 dir; 807 unsigned long name_off; 808 unsigned long elem_size; 809 unsigned long ptr; 810 811 p = fs_path_alloc_reversed(); 812 if (!p) 813 return -ENOMEM; 814 815 tmp_path = alloc_path_for_send(); 816 if (!tmp_path) { 817 fs_path_free(p); 818 return -ENOMEM; 819 } 820 821 822 if (found_key->type == BTRFS_INODE_REF_KEY) { 823 ptr = (unsigned long)btrfs_item_ptr(eb, slot, 824 struct btrfs_inode_ref); 825 item = btrfs_item_nr(slot); 826 total = btrfs_item_size(eb, item); 827 elem_size = sizeof(*iref); 828 } else { 829 ptr = btrfs_item_ptr_offset(eb, slot); 830 total = btrfs_item_size_nr(eb, slot); 831 elem_size = sizeof(*extref); 832 } 833 834 while (cur < total) { 835 fs_path_reset(p); 836 837 if (found_key->type == BTRFS_INODE_REF_KEY) { 838 iref = (struct btrfs_inode_ref *)(ptr + cur); 839 name_len = btrfs_inode_ref_name_len(eb, iref); 840 name_off = (unsigned long)(iref + 1); 841 index = btrfs_inode_ref_index(eb, iref); 842 dir = found_key->offset; 843 } else { 844 extref = (struct btrfs_inode_extref *)(ptr + cur); 845 name_len = btrfs_inode_extref_name_len(eb, extref); 846 name_off = (unsigned long)&extref->name; 847 index = btrfs_inode_extref_index(eb, extref); 848 dir = btrfs_inode_extref_parent(eb, extref); 849 } 850 851 if (resolve) { 852 start = btrfs_ref_to_path(root, tmp_path, name_len, 853 name_off, eb, dir, 854 p->buf, p->buf_len); 855 if (IS_ERR(start)) { 856 ret = PTR_ERR(start); 857 goto out; 858 } 859 if (start < p->buf) { 860 /* overflow , try again with larger buffer */ 861 ret = fs_path_ensure_buf(p, 862 p->buf_len + p->buf - start); 863 if (ret < 0) 864 goto out; 865 start = btrfs_ref_to_path(root, tmp_path, 866 name_len, name_off, 867 eb, dir, 868 p->buf, p->buf_len); 869 if (IS_ERR(start)) { 870 ret = PTR_ERR(start); 871 goto out; 872 } 873 BUG_ON(start < p->buf); 874 } 875 p->start = start; 876 } else { 877 ret = fs_path_add_from_extent_buffer(p, eb, name_off, 878 name_len); 879 if (ret < 0) 880 goto out; 881 } 882 883 cur += elem_size + name_len; 884 ret = iterate(num, dir, index, p, ctx); 885 if (ret) 886 goto out; 887 num++; 888 } 889 890 out: 891 btrfs_free_path(tmp_path); 892 fs_path_free(p); 893 return ret; 894 } 895 896 typedef int (*iterate_dir_item_t)(int num, struct btrfs_key *di_key, 897 const char *name, int name_len, 898 const char *data, int data_len, 899 u8 type, void *ctx); 900 901 /* 902 * Helper function to iterate the entries in ONE btrfs_dir_item. 903 * The iterate callback may return a non zero value to stop iteration. This can 904 * be a negative value for error codes or 1 to simply stop it. 905 * 906 * path must point to the dir item when called. 907 */ 908 static int iterate_dir_item(struct btrfs_root *root, struct btrfs_path *path, 909 struct btrfs_key *found_key, 910 iterate_dir_item_t iterate, void *ctx) 911 { 912 int ret = 0; 913 struct extent_buffer *eb; 914 struct btrfs_item *item; 915 struct btrfs_dir_item *di; 916 struct btrfs_key di_key; 917 char *buf = NULL; 918 char *buf2 = NULL; 919 int buf_len; 920 int buf_virtual = 0; 921 u32 name_len; 922 u32 data_len; 923 u32 cur; 924 u32 len; 925 u32 total; 926 int slot; 927 int num; 928 u8 type; 929 930 buf_len = PAGE_SIZE; 931 buf = kmalloc(buf_len, GFP_NOFS); 932 if (!buf) { 933 ret = -ENOMEM; 934 goto out; 935 } 936 937 eb = path->nodes[0]; 938 slot = path->slots[0]; 939 item = btrfs_item_nr(slot); 940 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 941 cur = 0; 942 len = 0; 943 total = btrfs_item_size(eb, item); 944 945 num = 0; 946 while (cur < total) { 947 name_len = btrfs_dir_name_len(eb, di); 948 data_len = btrfs_dir_data_len(eb, di); 949 type = btrfs_dir_type(eb, di); 950 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 951 952 if (name_len + data_len > buf_len) { 953 buf_len = PAGE_ALIGN(name_len + data_len); 954 if (buf_virtual) { 955 buf2 = vmalloc(buf_len); 956 if (!buf2) { 957 ret = -ENOMEM; 958 goto out; 959 } 960 vfree(buf); 961 } else { 962 buf2 = krealloc(buf, buf_len, GFP_NOFS); 963 if (!buf2) { 964 buf2 = vmalloc(buf_len); 965 if (!buf2) { 966 ret = -ENOMEM; 967 goto out; 968 } 969 kfree(buf); 970 buf_virtual = 1; 971 } 972 } 973 974 buf = buf2; 975 buf2 = NULL; 976 } 977 978 read_extent_buffer(eb, buf, (unsigned long)(di + 1), 979 name_len + data_len); 980 981 len = sizeof(*di) + name_len + data_len; 982 di = (struct btrfs_dir_item *)((char *)di + len); 983 cur += len; 984 985 ret = iterate(num, &di_key, buf, name_len, buf + name_len, 986 data_len, type, ctx); 987 if (ret < 0) 988 goto out; 989 if (ret) { 990 ret = 0; 991 goto out; 992 } 993 994 num++; 995 } 996 997 out: 998 if (buf_virtual) 999 vfree(buf); 1000 else 1001 kfree(buf); 1002 return ret; 1003 } 1004 1005 static int __copy_first_ref(int num, u64 dir, int index, 1006 struct fs_path *p, void *ctx) 1007 { 1008 int ret; 1009 struct fs_path *pt = ctx; 1010 1011 ret = fs_path_copy(pt, p); 1012 if (ret < 0) 1013 return ret; 1014 1015 /* we want the first only */ 1016 return 1; 1017 } 1018 1019 /* 1020 * Retrieve the first path of an inode. If an inode has more then one 1021 * ref/hardlink, this is ignored. 1022 */ 1023 static int get_inode_path(struct btrfs_root *root, 1024 u64 ino, struct fs_path *path) 1025 { 1026 int ret; 1027 struct btrfs_key key, found_key; 1028 struct btrfs_path *p; 1029 1030 p = alloc_path_for_send(); 1031 if (!p) 1032 return -ENOMEM; 1033 1034 fs_path_reset(path); 1035 1036 key.objectid = ino; 1037 key.type = BTRFS_INODE_REF_KEY; 1038 key.offset = 0; 1039 1040 ret = btrfs_search_slot_for_read(root, &key, p, 1, 0); 1041 if (ret < 0) 1042 goto out; 1043 if (ret) { 1044 ret = 1; 1045 goto out; 1046 } 1047 btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]); 1048 if (found_key.objectid != ino || 1049 (found_key.type != BTRFS_INODE_REF_KEY && 1050 found_key.type != BTRFS_INODE_EXTREF_KEY)) { 1051 ret = -ENOENT; 1052 goto out; 1053 } 1054 1055 ret = iterate_inode_ref(root, p, &found_key, 1, 1056 __copy_first_ref, path); 1057 if (ret < 0) 1058 goto out; 1059 ret = 0; 1060 1061 out: 1062 btrfs_free_path(p); 1063 return ret; 1064 } 1065 1066 struct backref_ctx { 1067 struct send_ctx *sctx; 1068 1069 /* number of total found references */ 1070 u64 found; 1071 1072 /* 1073 * used for clones found in send_root. clones found behind cur_objectid 1074 * and cur_offset are not considered as allowed clones. 1075 */ 1076 u64 cur_objectid; 1077 u64 cur_offset; 1078 1079 /* may be truncated in case it's the last extent in a file */ 1080 u64 extent_len; 1081 1082 /* Just to check for bugs in backref resolving */ 1083 int found_itself; 1084 }; 1085 1086 static int __clone_root_cmp_bsearch(const void *key, const void *elt) 1087 { 1088 u64 root = (u64)(uintptr_t)key; 1089 struct clone_root *cr = (struct clone_root *)elt; 1090 1091 if (root < cr->root->objectid) 1092 return -1; 1093 if (root > cr->root->objectid) 1094 return 1; 1095 return 0; 1096 } 1097 1098 static int __clone_root_cmp_sort(const void *e1, const void *e2) 1099 { 1100 struct clone_root *cr1 = (struct clone_root *)e1; 1101 struct clone_root *cr2 = (struct clone_root *)e2; 1102 1103 if (cr1->root->objectid < cr2->root->objectid) 1104 return -1; 1105 if (cr1->root->objectid > cr2->root->objectid) 1106 return 1; 1107 return 0; 1108 } 1109 1110 /* 1111 * Called for every backref that is found for the current extent. 1112 * Results are collected in sctx->clone_roots->ino/offset/found_refs 1113 */ 1114 static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) 1115 { 1116 struct backref_ctx *bctx = ctx_; 1117 struct clone_root *found; 1118 int ret; 1119 u64 i_size; 1120 1121 /* First check if the root is in the list of accepted clone sources */ 1122 found = bsearch((void *)(uintptr_t)root, bctx->sctx->clone_roots, 1123 bctx->sctx->clone_roots_cnt, 1124 sizeof(struct clone_root), 1125 __clone_root_cmp_bsearch); 1126 if (!found) 1127 return 0; 1128 1129 if (found->root == bctx->sctx->send_root && 1130 ino == bctx->cur_objectid && 1131 offset == bctx->cur_offset) { 1132 bctx->found_itself = 1; 1133 } 1134 1135 /* 1136 * There are inodes that have extents that lie behind its i_size. Don't 1137 * accept clones from these extents. 1138 */ 1139 ret = get_inode_info(found->root, ino, &i_size, NULL, NULL, NULL, NULL, 1140 NULL); 1141 if (ret < 0) 1142 return ret; 1143 1144 if (offset + bctx->extent_len > i_size) 1145 return 0; 1146 1147 /* 1148 * Make sure we don't consider clones from send_root that are 1149 * behind the current inode/offset. 1150 */ 1151 if (found->root == bctx->sctx->send_root) { 1152 /* 1153 * TODO for the moment we don't accept clones from the inode 1154 * that is currently send. We may change this when 1155 * BTRFS_IOC_CLONE_RANGE supports cloning from and to the same 1156 * file. 1157 */ 1158 if (ino >= bctx->cur_objectid) 1159 return 0; 1160 #if 0 1161 if (ino > bctx->cur_objectid) 1162 return 0; 1163 if (offset + bctx->extent_len > bctx->cur_offset) 1164 return 0; 1165 #endif 1166 } 1167 1168 bctx->found++; 1169 found->found_refs++; 1170 if (ino < found->ino) { 1171 found->ino = ino; 1172 found->offset = offset; 1173 } else if (found->ino == ino) { 1174 /* 1175 * same extent found more then once in the same file. 1176 */ 1177 if (found->offset > offset + bctx->extent_len) 1178 found->offset = offset; 1179 } 1180 1181 return 0; 1182 } 1183 1184 /* 1185 * Given an inode, offset and extent item, it finds a good clone for a clone 1186 * instruction. Returns -ENOENT when none could be found. The function makes 1187 * sure that the returned clone is usable at the point where sending is at the 1188 * moment. This means, that no clones are accepted which lie behind the current 1189 * inode+offset. 1190 * 1191 * path must point to the extent item when called. 1192 */ 1193 static int find_extent_clone(struct send_ctx *sctx, 1194 struct btrfs_path *path, 1195 u64 ino, u64 data_offset, 1196 u64 ino_size, 1197 struct clone_root **found) 1198 { 1199 int ret; 1200 int extent_type; 1201 u64 logical; 1202 u64 disk_byte; 1203 u64 num_bytes; 1204 u64 extent_item_pos; 1205 u64 flags = 0; 1206 struct btrfs_file_extent_item *fi; 1207 struct extent_buffer *eb = path->nodes[0]; 1208 struct backref_ctx *backref_ctx = NULL; 1209 struct clone_root *cur_clone_root; 1210 struct btrfs_key found_key; 1211 struct btrfs_path *tmp_path; 1212 int compressed; 1213 u32 i; 1214 1215 tmp_path = alloc_path_for_send(); 1216 if (!tmp_path) 1217 return -ENOMEM; 1218 1219 backref_ctx = kmalloc(sizeof(*backref_ctx), GFP_NOFS); 1220 if (!backref_ctx) { 1221 ret = -ENOMEM; 1222 goto out; 1223 } 1224 1225 if (data_offset >= ino_size) { 1226 /* 1227 * There may be extents that lie behind the file's size. 1228 * I at least had this in combination with snapshotting while 1229 * writing large files. 1230 */ 1231 ret = 0; 1232 goto out; 1233 } 1234 1235 fi = btrfs_item_ptr(eb, path->slots[0], 1236 struct btrfs_file_extent_item); 1237 extent_type = btrfs_file_extent_type(eb, fi); 1238 if (extent_type == BTRFS_FILE_EXTENT_INLINE) { 1239 ret = -ENOENT; 1240 goto out; 1241 } 1242 compressed = btrfs_file_extent_compression(eb, fi); 1243 1244 num_bytes = btrfs_file_extent_num_bytes(eb, fi); 1245 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 1246 if (disk_byte == 0) { 1247 ret = -ENOENT; 1248 goto out; 1249 } 1250 logical = disk_byte + btrfs_file_extent_offset(eb, fi); 1251 1252 ret = extent_from_logical(sctx->send_root->fs_info, disk_byte, tmp_path, 1253 &found_key, &flags); 1254 btrfs_release_path(tmp_path); 1255 1256 if (ret < 0) 1257 goto out; 1258 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 1259 ret = -EIO; 1260 goto out; 1261 } 1262 1263 /* 1264 * Setup the clone roots. 1265 */ 1266 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1267 cur_clone_root = sctx->clone_roots + i; 1268 cur_clone_root->ino = (u64)-1; 1269 cur_clone_root->offset = 0; 1270 cur_clone_root->found_refs = 0; 1271 } 1272 1273 backref_ctx->sctx = sctx; 1274 backref_ctx->found = 0; 1275 backref_ctx->cur_objectid = ino; 1276 backref_ctx->cur_offset = data_offset; 1277 backref_ctx->found_itself = 0; 1278 backref_ctx->extent_len = num_bytes; 1279 1280 /* 1281 * The last extent of a file may be too large due to page alignment. 1282 * We need to adjust extent_len in this case so that the checks in 1283 * __iterate_backrefs work. 1284 */ 1285 if (data_offset + num_bytes >= ino_size) 1286 backref_ctx->extent_len = ino_size - data_offset; 1287 1288 /* 1289 * Now collect all backrefs. 1290 */ 1291 if (compressed == BTRFS_COMPRESS_NONE) 1292 extent_item_pos = logical - found_key.objectid; 1293 else 1294 extent_item_pos = 0; 1295 1296 extent_item_pos = logical - found_key.objectid; 1297 ret = iterate_extent_inodes(sctx->send_root->fs_info, 1298 found_key.objectid, extent_item_pos, 1, 1299 __iterate_backrefs, backref_ctx); 1300 1301 if (ret < 0) 1302 goto out; 1303 1304 if (!backref_ctx->found_itself) { 1305 /* found a bug in backref code? */ 1306 ret = -EIO; 1307 btrfs_err(sctx->send_root->fs_info, "did not find backref in " 1308 "send_root. inode=%llu, offset=%llu, " 1309 "disk_byte=%llu found extent=%llu\n", 1310 ino, data_offset, disk_byte, found_key.objectid); 1311 goto out; 1312 } 1313 1314 verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, " 1315 "ino=%llu, " 1316 "num_bytes=%llu, logical=%llu\n", 1317 data_offset, ino, num_bytes, logical); 1318 1319 if (!backref_ctx->found) 1320 verbose_printk("btrfs: no clones found\n"); 1321 1322 cur_clone_root = NULL; 1323 for (i = 0; i < sctx->clone_roots_cnt; i++) { 1324 if (sctx->clone_roots[i].found_refs) { 1325 if (!cur_clone_root) 1326 cur_clone_root = sctx->clone_roots + i; 1327 else if (sctx->clone_roots[i].root == sctx->send_root) 1328 /* prefer clones from send_root over others */ 1329 cur_clone_root = sctx->clone_roots + i; 1330 } 1331 1332 } 1333 1334 if (cur_clone_root) { 1335 *found = cur_clone_root; 1336 ret = 0; 1337 } else { 1338 ret = -ENOENT; 1339 } 1340 1341 out: 1342 btrfs_free_path(tmp_path); 1343 kfree(backref_ctx); 1344 return ret; 1345 } 1346 1347 static int read_symlink(struct btrfs_root *root, 1348 u64 ino, 1349 struct fs_path *dest) 1350 { 1351 int ret; 1352 struct btrfs_path *path; 1353 struct btrfs_key key; 1354 struct btrfs_file_extent_item *ei; 1355 u8 type; 1356 u8 compression; 1357 unsigned long off; 1358 int len; 1359 1360 path = alloc_path_for_send(); 1361 if (!path) 1362 return -ENOMEM; 1363 1364 key.objectid = ino; 1365 key.type = BTRFS_EXTENT_DATA_KEY; 1366 key.offset = 0; 1367 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1368 if (ret < 0) 1369 goto out; 1370 BUG_ON(ret); 1371 1372 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 1373 struct btrfs_file_extent_item); 1374 type = btrfs_file_extent_type(path->nodes[0], ei); 1375 compression = btrfs_file_extent_compression(path->nodes[0], ei); 1376 BUG_ON(type != BTRFS_FILE_EXTENT_INLINE); 1377 BUG_ON(compression); 1378 1379 off = btrfs_file_extent_inline_start(ei); 1380 len = btrfs_file_extent_inline_len(path->nodes[0], path->slots[0], ei); 1381 1382 ret = fs_path_add_from_extent_buffer(dest, path->nodes[0], off, len); 1383 1384 out: 1385 btrfs_free_path(path); 1386 return ret; 1387 } 1388 1389 /* 1390 * Helper function to generate a file name that is unique in the root of 1391 * send_root and parent_root. This is used to generate names for orphan inodes. 1392 */ 1393 static int gen_unique_name(struct send_ctx *sctx, 1394 u64 ino, u64 gen, 1395 struct fs_path *dest) 1396 { 1397 int ret = 0; 1398 struct btrfs_path *path; 1399 struct btrfs_dir_item *di; 1400 char tmp[64]; 1401 int len; 1402 u64 idx = 0; 1403 1404 path = alloc_path_for_send(); 1405 if (!path) 1406 return -ENOMEM; 1407 1408 while (1) { 1409 len = snprintf(tmp, sizeof(tmp), "o%llu-%llu-%llu", 1410 ino, gen, idx); 1411 if (len >= sizeof(tmp)) { 1412 /* should really not happen */ 1413 ret = -EOVERFLOW; 1414 goto out; 1415 } 1416 1417 di = btrfs_lookup_dir_item(NULL, sctx->send_root, 1418 path, BTRFS_FIRST_FREE_OBJECTID, 1419 tmp, strlen(tmp), 0); 1420 btrfs_release_path(path); 1421 if (IS_ERR(di)) { 1422 ret = PTR_ERR(di); 1423 goto out; 1424 } 1425 if (di) { 1426 /* not unique, try again */ 1427 idx++; 1428 continue; 1429 } 1430 1431 if (!sctx->parent_root) { 1432 /* unique */ 1433 ret = 0; 1434 break; 1435 } 1436 1437 di = btrfs_lookup_dir_item(NULL, sctx->parent_root, 1438 path, BTRFS_FIRST_FREE_OBJECTID, 1439 tmp, strlen(tmp), 0); 1440 btrfs_release_path(path); 1441 if (IS_ERR(di)) { 1442 ret = PTR_ERR(di); 1443 goto out; 1444 } 1445 if (di) { 1446 /* not unique, try again */ 1447 idx++; 1448 continue; 1449 } 1450 /* unique */ 1451 break; 1452 } 1453 1454 ret = fs_path_add(dest, tmp, strlen(tmp)); 1455 1456 out: 1457 btrfs_free_path(path); 1458 return ret; 1459 } 1460 1461 enum inode_state { 1462 inode_state_no_change, 1463 inode_state_will_create, 1464 inode_state_did_create, 1465 inode_state_will_delete, 1466 inode_state_did_delete, 1467 }; 1468 1469 static int get_cur_inode_state(struct send_ctx *sctx, u64 ino, u64 gen) 1470 { 1471 int ret; 1472 int left_ret; 1473 int right_ret; 1474 u64 left_gen; 1475 u64 right_gen; 1476 1477 ret = get_inode_info(sctx->send_root, ino, NULL, &left_gen, NULL, NULL, 1478 NULL, NULL); 1479 if (ret < 0 && ret != -ENOENT) 1480 goto out; 1481 left_ret = ret; 1482 1483 if (!sctx->parent_root) { 1484 right_ret = -ENOENT; 1485 } else { 1486 ret = get_inode_info(sctx->parent_root, ino, NULL, &right_gen, 1487 NULL, NULL, NULL, NULL); 1488 if (ret < 0 && ret != -ENOENT) 1489 goto out; 1490 right_ret = ret; 1491 } 1492 1493 if (!left_ret && !right_ret) { 1494 if (left_gen == gen && right_gen == gen) { 1495 ret = inode_state_no_change; 1496 } else if (left_gen == gen) { 1497 if (ino < sctx->send_progress) 1498 ret = inode_state_did_create; 1499 else 1500 ret = inode_state_will_create; 1501 } else if (right_gen == gen) { 1502 if (ino < sctx->send_progress) 1503 ret = inode_state_did_delete; 1504 else 1505 ret = inode_state_will_delete; 1506 } else { 1507 ret = -ENOENT; 1508 } 1509 } else if (!left_ret) { 1510 if (left_gen == gen) { 1511 if (ino < sctx->send_progress) 1512 ret = inode_state_did_create; 1513 else 1514 ret = inode_state_will_create; 1515 } else { 1516 ret = -ENOENT; 1517 } 1518 } else if (!right_ret) { 1519 if (right_gen == gen) { 1520 if (ino < sctx->send_progress) 1521 ret = inode_state_did_delete; 1522 else 1523 ret = inode_state_will_delete; 1524 } else { 1525 ret = -ENOENT; 1526 } 1527 } else { 1528 ret = -ENOENT; 1529 } 1530 1531 out: 1532 return ret; 1533 } 1534 1535 static int is_inode_existent(struct send_ctx *sctx, u64 ino, u64 gen) 1536 { 1537 int ret; 1538 1539 ret = get_cur_inode_state(sctx, ino, gen); 1540 if (ret < 0) 1541 goto out; 1542 1543 if (ret == inode_state_no_change || 1544 ret == inode_state_did_create || 1545 ret == inode_state_will_delete) 1546 ret = 1; 1547 else 1548 ret = 0; 1549 1550 out: 1551 return ret; 1552 } 1553 1554 /* 1555 * Helper function to lookup a dir item in a dir. 1556 */ 1557 static int lookup_dir_item_inode(struct btrfs_root *root, 1558 u64 dir, const char *name, int name_len, 1559 u64 *found_inode, 1560 u8 *found_type) 1561 { 1562 int ret = 0; 1563 struct btrfs_dir_item *di; 1564 struct btrfs_key key; 1565 struct btrfs_path *path; 1566 1567 path = alloc_path_for_send(); 1568 if (!path) 1569 return -ENOMEM; 1570 1571 di = btrfs_lookup_dir_item(NULL, root, path, 1572 dir, name, name_len, 0); 1573 if (!di) { 1574 ret = -ENOENT; 1575 goto out; 1576 } 1577 if (IS_ERR(di)) { 1578 ret = PTR_ERR(di); 1579 goto out; 1580 } 1581 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key); 1582 *found_inode = key.objectid; 1583 *found_type = btrfs_dir_type(path->nodes[0], di); 1584 1585 out: 1586 btrfs_free_path(path); 1587 return ret; 1588 } 1589 1590 /* 1591 * Looks up the first btrfs_inode_ref of a given ino. It returns the parent dir, 1592 * generation of the parent dir and the name of the dir entry. 1593 */ 1594 static int get_first_ref(struct btrfs_root *root, u64 ino, 1595 u64 *dir, u64 *dir_gen, struct fs_path *name) 1596 { 1597 int ret; 1598 struct btrfs_key key; 1599 struct btrfs_key found_key; 1600 struct btrfs_path *path; 1601 int len; 1602 u64 parent_dir; 1603 1604 path = alloc_path_for_send(); 1605 if (!path) 1606 return -ENOMEM; 1607 1608 key.objectid = ino; 1609 key.type = BTRFS_INODE_REF_KEY; 1610 key.offset = 0; 1611 1612 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); 1613 if (ret < 0) 1614 goto out; 1615 if (!ret) 1616 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1617 path->slots[0]); 1618 if (ret || found_key.objectid != ino || 1619 (found_key.type != BTRFS_INODE_REF_KEY && 1620 found_key.type != BTRFS_INODE_EXTREF_KEY)) { 1621 ret = -ENOENT; 1622 goto out; 1623 } 1624 1625 if (key.type == BTRFS_INODE_REF_KEY) { 1626 struct btrfs_inode_ref *iref; 1627 iref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1628 struct btrfs_inode_ref); 1629 len = btrfs_inode_ref_name_len(path->nodes[0], iref); 1630 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 1631 (unsigned long)(iref + 1), 1632 len); 1633 parent_dir = found_key.offset; 1634 } else { 1635 struct btrfs_inode_extref *extref; 1636 extref = btrfs_item_ptr(path->nodes[0], path->slots[0], 1637 struct btrfs_inode_extref); 1638 len = btrfs_inode_extref_name_len(path->nodes[0], extref); 1639 ret = fs_path_add_from_extent_buffer(name, path->nodes[0], 1640 (unsigned long)&extref->name, len); 1641 parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref); 1642 } 1643 if (ret < 0) 1644 goto out; 1645 btrfs_release_path(path); 1646 1647 ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, NULL, 1648 NULL, NULL); 1649 if (ret < 0) 1650 goto out; 1651 1652 *dir = parent_dir; 1653 1654 out: 1655 btrfs_free_path(path); 1656 return ret; 1657 } 1658 1659 static int is_first_ref(struct btrfs_root *root, 1660 u64 ino, u64 dir, 1661 const char *name, int name_len) 1662 { 1663 int ret; 1664 struct fs_path *tmp_name; 1665 u64 tmp_dir; 1666 u64 tmp_dir_gen; 1667 1668 tmp_name = fs_path_alloc(); 1669 if (!tmp_name) 1670 return -ENOMEM; 1671 1672 ret = get_first_ref(root, ino, &tmp_dir, &tmp_dir_gen, tmp_name); 1673 if (ret < 0) 1674 goto out; 1675 1676 if (dir != tmp_dir || name_len != fs_path_len(tmp_name)) { 1677 ret = 0; 1678 goto out; 1679 } 1680 1681 ret = !memcmp(tmp_name->start, name, name_len); 1682 1683 out: 1684 fs_path_free(tmp_name); 1685 return ret; 1686 } 1687 1688 /* 1689 * Used by process_recorded_refs to determine if a new ref would overwrite an 1690 * already existing ref. In case it detects an overwrite, it returns the 1691 * inode/gen in who_ino/who_gen. 1692 * When an overwrite is detected, process_recorded_refs does proper orphanizing 1693 * to make sure later references to the overwritten inode are possible. 1694 * Orphanizing is however only required for the first ref of an inode. 1695 * process_recorded_refs does an additional is_first_ref check to see if 1696 * orphanizing is really required. 1697 */ 1698 static int will_overwrite_ref(struct send_ctx *sctx, u64 dir, u64 dir_gen, 1699 const char *name, int name_len, 1700 u64 *who_ino, u64 *who_gen) 1701 { 1702 int ret = 0; 1703 u64 gen; 1704 u64 other_inode = 0; 1705 u8 other_type = 0; 1706 1707 if (!sctx->parent_root) 1708 goto out; 1709 1710 ret = is_inode_existent(sctx, dir, dir_gen); 1711 if (ret <= 0) 1712 goto out; 1713 1714 /* 1715 * If we have a parent root we need to verify that the parent dir was 1716 * not delted and then re-created, if it was then we have no overwrite 1717 * and we can just unlink this entry. 1718 */ 1719 if (sctx->parent_root) { 1720 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, 1721 NULL, NULL, NULL); 1722 if (ret < 0 && ret != -ENOENT) 1723 goto out; 1724 if (ret) { 1725 ret = 0; 1726 goto out; 1727 } 1728 if (gen != dir_gen) 1729 goto out; 1730 } 1731 1732 ret = lookup_dir_item_inode(sctx->parent_root, dir, name, name_len, 1733 &other_inode, &other_type); 1734 if (ret < 0 && ret != -ENOENT) 1735 goto out; 1736 if (ret) { 1737 ret = 0; 1738 goto out; 1739 } 1740 1741 /* 1742 * Check if the overwritten ref was already processed. If yes, the ref 1743 * was already unlinked/moved, so we can safely assume that we will not 1744 * overwrite anything at this point in time. 1745 */ 1746 if (other_inode > sctx->send_progress) { 1747 ret = get_inode_info(sctx->parent_root, other_inode, NULL, 1748 who_gen, NULL, NULL, NULL, NULL); 1749 if (ret < 0) 1750 goto out; 1751 1752 ret = 1; 1753 *who_ino = other_inode; 1754 } else { 1755 ret = 0; 1756 } 1757 1758 out: 1759 return ret; 1760 } 1761 1762 /* 1763 * Checks if the ref was overwritten by an already processed inode. This is 1764 * used by __get_cur_name_and_parent to find out if the ref was orphanized and 1765 * thus the orphan name needs be used. 1766 * process_recorded_refs also uses it to avoid unlinking of refs that were 1767 * overwritten. 1768 */ 1769 static int did_overwrite_ref(struct send_ctx *sctx, 1770 u64 dir, u64 dir_gen, 1771 u64 ino, u64 ino_gen, 1772 const char *name, int name_len) 1773 { 1774 int ret = 0; 1775 u64 gen; 1776 u64 ow_inode; 1777 u8 other_type; 1778 1779 if (!sctx->parent_root) 1780 goto out; 1781 1782 ret = is_inode_existent(sctx, dir, dir_gen); 1783 if (ret <= 0) 1784 goto out; 1785 1786 /* check if the ref was overwritten by another ref */ 1787 ret = lookup_dir_item_inode(sctx->send_root, dir, name, name_len, 1788 &ow_inode, &other_type); 1789 if (ret < 0 && ret != -ENOENT) 1790 goto out; 1791 if (ret) { 1792 /* was never and will never be overwritten */ 1793 ret = 0; 1794 goto out; 1795 } 1796 1797 ret = get_inode_info(sctx->send_root, ow_inode, NULL, &gen, NULL, NULL, 1798 NULL, NULL); 1799 if (ret < 0) 1800 goto out; 1801 1802 if (ow_inode == ino && gen == ino_gen) { 1803 ret = 0; 1804 goto out; 1805 } 1806 1807 /* we know that it is or will be overwritten. check this now */ 1808 if (ow_inode < sctx->send_progress) 1809 ret = 1; 1810 else 1811 ret = 0; 1812 1813 out: 1814 return ret; 1815 } 1816 1817 /* 1818 * Same as did_overwrite_ref, but also checks if it is the first ref of an inode 1819 * that got overwritten. This is used by process_recorded_refs to determine 1820 * if it has to use the path as returned by get_cur_path or the orphan name. 1821 */ 1822 static int did_overwrite_first_ref(struct send_ctx *sctx, u64 ino, u64 gen) 1823 { 1824 int ret = 0; 1825 struct fs_path *name = NULL; 1826 u64 dir; 1827 u64 dir_gen; 1828 1829 if (!sctx->parent_root) 1830 goto out; 1831 1832 name = fs_path_alloc(); 1833 if (!name) 1834 return -ENOMEM; 1835 1836 ret = get_first_ref(sctx->parent_root, ino, &dir, &dir_gen, name); 1837 if (ret < 0) 1838 goto out; 1839 1840 ret = did_overwrite_ref(sctx, dir, dir_gen, ino, gen, 1841 name->start, fs_path_len(name)); 1842 1843 out: 1844 fs_path_free(name); 1845 return ret; 1846 } 1847 1848 /* 1849 * Insert a name cache entry. On 32bit kernels the radix tree index is 32bit, 1850 * so we need to do some special handling in case we have clashes. This function 1851 * takes care of this with the help of name_cache_entry::radix_list. 1852 * In case of error, nce is kfreed. 1853 */ 1854 static int name_cache_insert(struct send_ctx *sctx, 1855 struct name_cache_entry *nce) 1856 { 1857 int ret = 0; 1858 struct list_head *nce_head; 1859 1860 nce_head = radix_tree_lookup(&sctx->name_cache, 1861 (unsigned long)nce->ino); 1862 if (!nce_head) { 1863 nce_head = kmalloc(sizeof(*nce_head), GFP_NOFS); 1864 if (!nce_head) { 1865 kfree(nce); 1866 return -ENOMEM; 1867 } 1868 INIT_LIST_HEAD(nce_head); 1869 1870 ret = radix_tree_insert(&sctx->name_cache, nce->ino, nce_head); 1871 if (ret < 0) { 1872 kfree(nce_head); 1873 kfree(nce); 1874 return ret; 1875 } 1876 } 1877 list_add_tail(&nce->radix_list, nce_head); 1878 list_add_tail(&nce->list, &sctx->name_cache_list); 1879 sctx->name_cache_size++; 1880 1881 return ret; 1882 } 1883 1884 static void name_cache_delete(struct send_ctx *sctx, 1885 struct name_cache_entry *nce) 1886 { 1887 struct list_head *nce_head; 1888 1889 nce_head = radix_tree_lookup(&sctx->name_cache, 1890 (unsigned long)nce->ino); 1891 BUG_ON(!nce_head); 1892 1893 list_del(&nce->radix_list); 1894 list_del(&nce->list); 1895 sctx->name_cache_size--; 1896 1897 if (list_empty(nce_head)) { 1898 radix_tree_delete(&sctx->name_cache, (unsigned long)nce->ino); 1899 kfree(nce_head); 1900 } 1901 } 1902 1903 static struct name_cache_entry *name_cache_search(struct send_ctx *sctx, 1904 u64 ino, u64 gen) 1905 { 1906 struct list_head *nce_head; 1907 struct name_cache_entry *cur; 1908 1909 nce_head = radix_tree_lookup(&sctx->name_cache, (unsigned long)ino); 1910 if (!nce_head) 1911 return NULL; 1912 1913 list_for_each_entry(cur, nce_head, radix_list) { 1914 if (cur->ino == ino && cur->gen == gen) 1915 return cur; 1916 } 1917 return NULL; 1918 } 1919 1920 /* 1921 * Removes the entry from the list and adds it back to the end. This marks the 1922 * entry as recently used so that name_cache_clean_unused does not remove it. 1923 */ 1924 static void name_cache_used(struct send_ctx *sctx, struct name_cache_entry *nce) 1925 { 1926 list_del(&nce->list); 1927 list_add_tail(&nce->list, &sctx->name_cache_list); 1928 } 1929 1930 /* 1931 * Remove some entries from the beginning of name_cache_list. 1932 */ 1933 static void name_cache_clean_unused(struct send_ctx *sctx) 1934 { 1935 struct name_cache_entry *nce; 1936 1937 if (sctx->name_cache_size < SEND_CTX_NAME_CACHE_CLEAN_SIZE) 1938 return; 1939 1940 while (sctx->name_cache_size > SEND_CTX_MAX_NAME_CACHE_SIZE) { 1941 nce = list_entry(sctx->name_cache_list.next, 1942 struct name_cache_entry, list); 1943 name_cache_delete(sctx, nce); 1944 kfree(nce); 1945 } 1946 } 1947 1948 static void name_cache_free(struct send_ctx *sctx) 1949 { 1950 struct name_cache_entry *nce; 1951 1952 while (!list_empty(&sctx->name_cache_list)) { 1953 nce = list_entry(sctx->name_cache_list.next, 1954 struct name_cache_entry, list); 1955 name_cache_delete(sctx, nce); 1956 kfree(nce); 1957 } 1958 } 1959 1960 /* 1961 * Used by get_cur_path for each ref up to the root. 1962 * Returns 0 if it succeeded. 1963 * Returns 1 if the inode is not existent or got overwritten. In that case, the 1964 * name is an orphan name. This instructs get_cur_path to stop iterating. If 1 1965 * is returned, parent_ino/parent_gen are not guaranteed to be valid. 1966 * Returns <0 in case of error. 1967 */ 1968 static int __get_cur_name_and_parent(struct send_ctx *sctx, 1969 u64 ino, u64 gen, 1970 int skip_name_cache, 1971 u64 *parent_ino, 1972 u64 *parent_gen, 1973 struct fs_path *dest) 1974 { 1975 int ret; 1976 int nce_ret; 1977 struct btrfs_path *path = NULL; 1978 struct name_cache_entry *nce = NULL; 1979 1980 if (skip_name_cache) 1981 goto get_ref; 1982 /* 1983 * First check if we already did a call to this function with the same 1984 * ino/gen. If yes, check if the cache entry is still up-to-date. If yes 1985 * return the cached result. 1986 */ 1987 nce = name_cache_search(sctx, ino, gen); 1988 if (nce) { 1989 if (ino < sctx->send_progress && nce->need_later_update) { 1990 name_cache_delete(sctx, nce); 1991 kfree(nce); 1992 nce = NULL; 1993 } else { 1994 name_cache_used(sctx, nce); 1995 *parent_ino = nce->parent_ino; 1996 *parent_gen = nce->parent_gen; 1997 ret = fs_path_add(dest, nce->name, nce->name_len); 1998 if (ret < 0) 1999 goto out; 2000 ret = nce->ret; 2001 goto out; 2002 } 2003 } 2004 2005 path = alloc_path_for_send(); 2006 if (!path) 2007 return -ENOMEM; 2008 2009 /* 2010 * If the inode is not existent yet, add the orphan name and return 1. 2011 * This should only happen for the parent dir that we determine in 2012 * __record_new_ref 2013 */ 2014 ret = is_inode_existent(sctx, ino, gen); 2015 if (ret < 0) 2016 goto out; 2017 2018 if (!ret) { 2019 ret = gen_unique_name(sctx, ino, gen, dest); 2020 if (ret < 0) 2021 goto out; 2022 ret = 1; 2023 goto out_cache; 2024 } 2025 2026 get_ref: 2027 /* 2028 * Depending on whether the inode was already processed or not, use 2029 * send_root or parent_root for ref lookup. 2030 */ 2031 if (ino < sctx->send_progress && !skip_name_cache) 2032 ret = get_first_ref(sctx->send_root, ino, 2033 parent_ino, parent_gen, dest); 2034 else 2035 ret = get_first_ref(sctx->parent_root, ino, 2036 parent_ino, parent_gen, dest); 2037 if (ret < 0) 2038 goto out; 2039 2040 /* 2041 * Check if the ref was overwritten by an inode's ref that was processed 2042 * earlier. If yes, treat as orphan and return 1. 2043 */ 2044 ret = did_overwrite_ref(sctx, *parent_ino, *parent_gen, ino, gen, 2045 dest->start, dest->end - dest->start); 2046 if (ret < 0) 2047 goto out; 2048 if (ret) { 2049 fs_path_reset(dest); 2050 ret = gen_unique_name(sctx, ino, gen, dest); 2051 if (ret < 0) 2052 goto out; 2053 ret = 1; 2054 } 2055 if (skip_name_cache) 2056 goto out; 2057 2058 out_cache: 2059 /* 2060 * Store the result of the lookup in the name cache. 2061 */ 2062 nce = kmalloc(sizeof(*nce) + fs_path_len(dest) + 1, GFP_NOFS); 2063 if (!nce) { 2064 ret = -ENOMEM; 2065 goto out; 2066 } 2067 2068 nce->ino = ino; 2069 nce->gen = gen; 2070 nce->parent_ino = *parent_ino; 2071 nce->parent_gen = *parent_gen; 2072 nce->name_len = fs_path_len(dest); 2073 nce->ret = ret; 2074 strcpy(nce->name, dest->start); 2075 2076 if (ino < sctx->send_progress) 2077 nce->need_later_update = 0; 2078 else 2079 nce->need_later_update = 1; 2080 2081 nce_ret = name_cache_insert(sctx, nce); 2082 if (nce_ret < 0) 2083 ret = nce_ret; 2084 name_cache_clean_unused(sctx); 2085 2086 out: 2087 btrfs_free_path(path); 2088 return ret; 2089 } 2090 2091 /* 2092 * Magic happens here. This function returns the first ref to an inode as it 2093 * would look like while receiving the stream at this point in time. 2094 * We walk the path up to the root. For every inode in between, we check if it 2095 * was already processed/sent. If yes, we continue with the parent as found 2096 * in send_root. If not, we continue with the parent as found in parent_root. 2097 * If we encounter an inode that was deleted at this point in time, we use the 2098 * inodes "orphan" name instead of the real name and stop. Same with new inodes 2099 * that were not created yet and overwritten inodes/refs. 2100 * 2101 * When do we have have orphan inodes: 2102 * 1. When an inode is freshly created and thus no valid refs are available yet 2103 * 2. When a directory lost all it's refs (deleted) but still has dir items 2104 * inside which were not processed yet (pending for move/delete). If anyone 2105 * tried to get the path to the dir items, it would get a path inside that 2106 * orphan directory. 2107 * 3. When an inode is moved around or gets new links, it may overwrite the ref 2108 * of an unprocessed inode. If in that case the first ref would be 2109 * overwritten, the overwritten inode gets "orphanized". Later when we 2110 * process this overwritten inode, it is restored at a new place by moving 2111 * the orphan inode. 2112 * 2113 * sctx->send_progress tells this function at which point in time receiving 2114 * would be. 2115 */ 2116 static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen, 2117 struct fs_path *dest) 2118 { 2119 int ret = 0; 2120 struct fs_path *name = NULL; 2121 u64 parent_inode = 0; 2122 u64 parent_gen = 0; 2123 int stop = 0; 2124 u64 start_ino = ino; 2125 u64 start_gen = gen; 2126 int skip_name_cache = 0; 2127 2128 name = fs_path_alloc(); 2129 if (!name) { 2130 ret = -ENOMEM; 2131 goto out; 2132 } 2133 2134 if (is_waiting_for_move(sctx, ino)) 2135 skip_name_cache = 1; 2136 2137 again: 2138 dest->reversed = 1; 2139 fs_path_reset(dest); 2140 2141 while (!stop && ino != BTRFS_FIRST_FREE_OBJECTID) { 2142 fs_path_reset(name); 2143 2144 ret = __get_cur_name_and_parent(sctx, ino, gen, skip_name_cache, 2145 &parent_inode, &parent_gen, name); 2146 if (ret < 0) 2147 goto out; 2148 if (ret) 2149 stop = 1; 2150 2151 if (!skip_name_cache && 2152 is_waiting_for_move(sctx, parent_inode)) { 2153 ino = start_ino; 2154 gen = start_gen; 2155 stop = 0; 2156 skip_name_cache = 1; 2157 goto again; 2158 } 2159 2160 ret = fs_path_add_path(dest, name); 2161 if (ret < 0) 2162 goto out; 2163 2164 ino = parent_inode; 2165 gen = parent_gen; 2166 } 2167 2168 out: 2169 fs_path_free(name); 2170 if (!ret) 2171 fs_path_unreverse(dest); 2172 return ret; 2173 } 2174 2175 /* 2176 * Sends a BTRFS_SEND_C_SUBVOL command/item to userspace 2177 */ 2178 static int send_subvol_begin(struct send_ctx *sctx) 2179 { 2180 int ret; 2181 struct btrfs_root *send_root = sctx->send_root; 2182 struct btrfs_root *parent_root = sctx->parent_root; 2183 struct btrfs_path *path; 2184 struct btrfs_key key; 2185 struct btrfs_root_ref *ref; 2186 struct extent_buffer *leaf; 2187 char *name = NULL; 2188 int namelen; 2189 2190 path = btrfs_alloc_path(); 2191 if (!path) 2192 return -ENOMEM; 2193 2194 name = kmalloc(BTRFS_PATH_NAME_MAX, GFP_NOFS); 2195 if (!name) { 2196 btrfs_free_path(path); 2197 return -ENOMEM; 2198 } 2199 2200 key.objectid = send_root->objectid; 2201 key.type = BTRFS_ROOT_BACKREF_KEY; 2202 key.offset = 0; 2203 2204 ret = btrfs_search_slot_for_read(send_root->fs_info->tree_root, 2205 &key, path, 1, 0); 2206 if (ret < 0) 2207 goto out; 2208 if (ret) { 2209 ret = -ENOENT; 2210 goto out; 2211 } 2212 2213 leaf = path->nodes[0]; 2214 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 2215 if (key.type != BTRFS_ROOT_BACKREF_KEY || 2216 key.objectid != send_root->objectid) { 2217 ret = -ENOENT; 2218 goto out; 2219 } 2220 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); 2221 namelen = btrfs_root_ref_name_len(leaf, ref); 2222 read_extent_buffer(leaf, name, (unsigned long)(ref + 1), namelen); 2223 btrfs_release_path(path); 2224 2225 if (parent_root) { 2226 ret = begin_cmd(sctx, BTRFS_SEND_C_SNAPSHOT); 2227 if (ret < 0) 2228 goto out; 2229 } else { 2230 ret = begin_cmd(sctx, BTRFS_SEND_C_SUBVOL); 2231 if (ret < 0) 2232 goto out; 2233 } 2234 2235 TLV_PUT_STRING(sctx, BTRFS_SEND_A_PATH, name, namelen); 2236 TLV_PUT_UUID(sctx, BTRFS_SEND_A_UUID, 2237 sctx->send_root->root_item.uuid); 2238 TLV_PUT_U64(sctx, BTRFS_SEND_A_CTRANSID, 2239 le64_to_cpu(sctx->send_root->root_item.ctransid)); 2240 if (parent_root) { 2241 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 2242 sctx->parent_root->root_item.uuid); 2243 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, 2244 le64_to_cpu(sctx->parent_root->root_item.ctransid)); 2245 } 2246 2247 ret = send_cmd(sctx); 2248 2249 tlv_put_failure: 2250 out: 2251 btrfs_free_path(path); 2252 kfree(name); 2253 return ret; 2254 } 2255 2256 static int send_truncate(struct send_ctx *sctx, u64 ino, u64 gen, u64 size) 2257 { 2258 int ret = 0; 2259 struct fs_path *p; 2260 2261 verbose_printk("btrfs: send_truncate %llu size=%llu\n", ino, size); 2262 2263 p = fs_path_alloc(); 2264 if (!p) 2265 return -ENOMEM; 2266 2267 ret = begin_cmd(sctx, BTRFS_SEND_C_TRUNCATE); 2268 if (ret < 0) 2269 goto out; 2270 2271 ret = get_cur_path(sctx, ino, gen, p); 2272 if (ret < 0) 2273 goto out; 2274 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2275 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, size); 2276 2277 ret = send_cmd(sctx); 2278 2279 tlv_put_failure: 2280 out: 2281 fs_path_free(p); 2282 return ret; 2283 } 2284 2285 static int send_chmod(struct send_ctx *sctx, u64 ino, u64 gen, u64 mode) 2286 { 2287 int ret = 0; 2288 struct fs_path *p; 2289 2290 verbose_printk("btrfs: send_chmod %llu mode=%llu\n", ino, mode); 2291 2292 p = fs_path_alloc(); 2293 if (!p) 2294 return -ENOMEM; 2295 2296 ret = begin_cmd(sctx, BTRFS_SEND_C_CHMOD); 2297 if (ret < 0) 2298 goto out; 2299 2300 ret = get_cur_path(sctx, ino, gen, p); 2301 if (ret < 0) 2302 goto out; 2303 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2304 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode & 07777); 2305 2306 ret = send_cmd(sctx); 2307 2308 tlv_put_failure: 2309 out: 2310 fs_path_free(p); 2311 return ret; 2312 } 2313 2314 static int send_chown(struct send_ctx *sctx, u64 ino, u64 gen, u64 uid, u64 gid) 2315 { 2316 int ret = 0; 2317 struct fs_path *p; 2318 2319 verbose_printk("btrfs: send_chown %llu uid=%llu, gid=%llu\n", ino, uid, gid); 2320 2321 p = fs_path_alloc(); 2322 if (!p) 2323 return -ENOMEM; 2324 2325 ret = begin_cmd(sctx, BTRFS_SEND_C_CHOWN); 2326 if (ret < 0) 2327 goto out; 2328 2329 ret = get_cur_path(sctx, ino, gen, p); 2330 if (ret < 0) 2331 goto out; 2332 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2333 TLV_PUT_U64(sctx, BTRFS_SEND_A_UID, uid); 2334 TLV_PUT_U64(sctx, BTRFS_SEND_A_GID, gid); 2335 2336 ret = send_cmd(sctx); 2337 2338 tlv_put_failure: 2339 out: 2340 fs_path_free(p); 2341 return ret; 2342 } 2343 2344 static int send_utimes(struct send_ctx *sctx, u64 ino, u64 gen) 2345 { 2346 int ret = 0; 2347 struct fs_path *p = NULL; 2348 struct btrfs_inode_item *ii; 2349 struct btrfs_path *path = NULL; 2350 struct extent_buffer *eb; 2351 struct btrfs_key key; 2352 int slot; 2353 2354 verbose_printk("btrfs: send_utimes %llu\n", ino); 2355 2356 p = fs_path_alloc(); 2357 if (!p) 2358 return -ENOMEM; 2359 2360 path = alloc_path_for_send(); 2361 if (!path) { 2362 ret = -ENOMEM; 2363 goto out; 2364 } 2365 2366 key.objectid = ino; 2367 key.type = BTRFS_INODE_ITEM_KEY; 2368 key.offset = 0; 2369 ret = btrfs_search_slot(NULL, sctx->send_root, &key, path, 0, 0); 2370 if (ret < 0) 2371 goto out; 2372 2373 eb = path->nodes[0]; 2374 slot = path->slots[0]; 2375 ii = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 2376 2377 ret = begin_cmd(sctx, BTRFS_SEND_C_UTIMES); 2378 if (ret < 0) 2379 goto out; 2380 2381 ret = get_cur_path(sctx, ino, gen, p); 2382 if (ret < 0) 2383 goto out; 2384 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2385 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, 2386 btrfs_inode_atime(ii)); 2387 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, 2388 btrfs_inode_mtime(ii)); 2389 TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, 2390 btrfs_inode_ctime(ii)); 2391 /* TODO Add otime support when the otime patches get into upstream */ 2392 2393 ret = send_cmd(sctx); 2394 2395 tlv_put_failure: 2396 out: 2397 fs_path_free(p); 2398 btrfs_free_path(path); 2399 return ret; 2400 } 2401 2402 /* 2403 * Sends a BTRFS_SEND_C_MKXXX or SYMLINK command to user space. We don't have 2404 * a valid path yet because we did not process the refs yet. So, the inode 2405 * is created as orphan. 2406 */ 2407 static int send_create_inode(struct send_ctx *sctx, u64 ino) 2408 { 2409 int ret = 0; 2410 struct fs_path *p; 2411 int cmd; 2412 u64 gen; 2413 u64 mode; 2414 u64 rdev; 2415 2416 verbose_printk("btrfs: send_create_inode %llu\n", ino); 2417 2418 p = fs_path_alloc(); 2419 if (!p) 2420 return -ENOMEM; 2421 2422 ret = get_inode_info(sctx->send_root, ino, NULL, &gen, &mode, NULL, 2423 NULL, &rdev); 2424 if (ret < 0) 2425 goto out; 2426 2427 if (S_ISREG(mode)) { 2428 cmd = BTRFS_SEND_C_MKFILE; 2429 } else if (S_ISDIR(mode)) { 2430 cmd = BTRFS_SEND_C_MKDIR; 2431 } else if (S_ISLNK(mode)) { 2432 cmd = BTRFS_SEND_C_SYMLINK; 2433 } else if (S_ISCHR(mode) || S_ISBLK(mode)) { 2434 cmd = BTRFS_SEND_C_MKNOD; 2435 } else if (S_ISFIFO(mode)) { 2436 cmd = BTRFS_SEND_C_MKFIFO; 2437 } else if (S_ISSOCK(mode)) { 2438 cmd = BTRFS_SEND_C_MKSOCK; 2439 } else { 2440 printk(KERN_WARNING "btrfs: unexpected inode type %o", 2441 (int)(mode & S_IFMT)); 2442 ret = -ENOTSUPP; 2443 goto out; 2444 } 2445 2446 ret = begin_cmd(sctx, cmd); 2447 if (ret < 0) 2448 goto out; 2449 2450 ret = gen_unique_name(sctx, ino, gen, p); 2451 if (ret < 0) 2452 goto out; 2453 2454 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 2455 TLV_PUT_U64(sctx, BTRFS_SEND_A_INO, ino); 2456 2457 if (S_ISLNK(mode)) { 2458 fs_path_reset(p); 2459 ret = read_symlink(sctx->send_root, ino, p); 2460 if (ret < 0) 2461 goto out; 2462 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p); 2463 } else if (S_ISCHR(mode) || S_ISBLK(mode) || 2464 S_ISFIFO(mode) || S_ISSOCK(mode)) { 2465 TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev)); 2466 TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode); 2467 } 2468 2469 ret = send_cmd(sctx); 2470 if (ret < 0) 2471 goto out; 2472 2473 2474 tlv_put_failure: 2475 out: 2476 fs_path_free(p); 2477 return ret; 2478 } 2479 2480 /* 2481 * We need some special handling for inodes that get processed before the parent 2482 * directory got created. See process_recorded_refs for details. 2483 * This function does the check if we already created the dir out of order. 2484 */ 2485 static int did_create_dir(struct send_ctx *sctx, u64 dir) 2486 { 2487 int ret = 0; 2488 struct btrfs_path *path = NULL; 2489 struct btrfs_key key; 2490 struct btrfs_key found_key; 2491 struct btrfs_key di_key; 2492 struct extent_buffer *eb; 2493 struct btrfs_dir_item *di; 2494 int slot; 2495 2496 path = alloc_path_for_send(); 2497 if (!path) { 2498 ret = -ENOMEM; 2499 goto out; 2500 } 2501 2502 key.objectid = dir; 2503 key.type = BTRFS_DIR_INDEX_KEY; 2504 key.offset = 0; 2505 while (1) { 2506 ret = btrfs_search_slot_for_read(sctx->send_root, &key, path, 2507 1, 0); 2508 if (ret < 0) 2509 goto out; 2510 if (!ret) { 2511 eb = path->nodes[0]; 2512 slot = path->slots[0]; 2513 btrfs_item_key_to_cpu(eb, &found_key, slot); 2514 } 2515 if (ret || found_key.objectid != key.objectid || 2516 found_key.type != key.type) { 2517 ret = 0; 2518 goto out; 2519 } 2520 2521 di = btrfs_item_ptr(eb, slot, struct btrfs_dir_item); 2522 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 2523 2524 if (di_key.type != BTRFS_ROOT_ITEM_KEY && 2525 di_key.objectid < sctx->send_progress) { 2526 ret = 1; 2527 goto out; 2528 } 2529 2530 key.offset = found_key.offset + 1; 2531 btrfs_release_path(path); 2532 } 2533 2534 out: 2535 btrfs_free_path(path); 2536 return ret; 2537 } 2538 2539 /* 2540 * Only creates the inode if it is: 2541 * 1. Not a directory 2542 * 2. Or a directory which was not created already due to out of order 2543 * directories. See did_create_dir and process_recorded_refs for details. 2544 */ 2545 static int send_create_inode_if_needed(struct send_ctx *sctx) 2546 { 2547 int ret; 2548 2549 if (S_ISDIR(sctx->cur_inode_mode)) { 2550 ret = did_create_dir(sctx, sctx->cur_ino); 2551 if (ret < 0) 2552 goto out; 2553 if (ret) { 2554 ret = 0; 2555 goto out; 2556 } 2557 } 2558 2559 ret = send_create_inode(sctx, sctx->cur_ino); 2560 if (ret < 0) 2561 goto out; 2562 2563 out: 2564 return ret; 2565 } 2566 2567 struct recorded_ref { 2568 struct list_head list; 2569 char *dir_path; 2570 char *name; 2571 struct fs_path *full_path; 2572 u64 dir; 2573 u64 dir_gen; 2574 int dir_path_len; 2575 int name_len; 2576 }; 2577 2578 /* 2579 * We need to process new refs before deleted refs, but compare_tree gives us 2580 * everything mixed. So we first record all refs and later process them. 2581 * This function is a helper to record one ref. 2582 */ 2583 static int record_ref(struct list_head *head, u64 dir, 2584 u64 dir_gen, struct fs_path *path) 2585 { 2586 struct recorded_ref *ref; 2587 2588 ref = kmalloc(sizeof(*ref), GFP_NOFS); 2589 if (!ref) 2590 return -ENOMEM; 2591 2592 ref->dir = dir; 2593 ref->dir_gen = dir_gen; 2594 ref->full_path = path; 2595 2596 ref->name = (char *)kbasename(ref->full_path->start); 2597 ref->name_len = ref->full_path->end - ref->name; 2598 ref->dir_path = ref->full_path->start; 2599 if (ref->name == ref->full_path->start) 2600 ref->dir_path_len = 0; 2601 else 2602 ref->dir_path_len = ref->full_path->end - 2603 ref->full_path->start - 1 - ref->name_len; 2604 2605 list_add_tail(&ref->list, head); 2606 return 0; 2607 } 2608 2609 static int dup_ref(struct recorded_ref *ref, struct list_head *list) 2610 { 2611 struct recorded_ref *new; 2612 2613 new = kmalloc(sizeof(*ref), GFP_NOFS); 2614 if (!new) 2615 return -ENOMEM; 2616 2617 new->dir = ref->dir; 2618 new->dir_gen = ref->dir_gen; 2619 new->full_path = NULL; 2620 INIT_LIST_HEAD(&new->list); 2621 list_add_tail(&new->list, list); 2622 return 0; 2623 } 2624 2625 static void __free_recorded_refs(struct list_head *head) 2626 { 2627 struct recorded_ref *cur; 2628 2629 while (!list_empty(head)) { 2630 cur = list_entry(head->next, struct recorded_ref, list); 2631 fs_path_free(cur->full_path); 2632 list_del(&cur->list); 2633 kfree(cur); 2634 } 2635 } 2636 2637 static void free_recorded_refs(struct send_ctx *sctx) 2638 { 2639 __free_recorded_refs(&sctx->new_refs); 2640 __free_recorded_refs(&sctx->deleted_refs); 2641 } 2642 2643 /* 2644 * Renames/moves a file/dir to its orphan name. Used when the first 2645 * ref of an unprocessed inode gets overwritten and for all non empty 2646 * directories. 2647 */ 2648 static int orphanize_inode(struct send_ctx *sctx, u64 ino, u64 gen, 2649 struct fs_path *path) 2650 { 2651 int ret; 2652 struct fs_path *orphan; 2653 2654 orphan = fs_path_alloc(); 2655 if (!orphan) 2656 return -ENOMEM; 2657 2658 ret = gen_unique_name(sctx, ino, gen, orphan); 2659 if (ret < 0) 2660 goto out; 2661 2662 ret = send_rename(sctx, path, orphan); 2663 2664 out: 2665 fs_path_free(orphan); 2666 return ret; 2667 } 2668 2669 /* 2670 * Returns 1 if a directory can be removed at this point in time. 2671 * We check this by iterating all dir items and checking if the inode behind 2672 * the dir item was already processed. 2673 */ 2674 static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 send_progress) 2675 { 2676 int ret = 0; 2677 struct btrfs_root *root = sctx->parent_root; 2678 struct btrfs_path *path; 2679 struct btrfs_key key; 2680 struct btrfs_key found_key; 2681 struct btrfs_key loc; 2682 struct btrfs_dir_item *di; 2683 2684 /* 2685 * Don't try to rmdir the top/root subvolume dir. 2686 */ 2687 if (dir == BTRFS_FIRST_FREE_OBJECTID) 2688 return 0; 2689 2690 path = alloc_path_for_send(); 2691 if (!path) 2692 return -ENOMEM; 2693 2694 key.objectid = dir; 2695 key.type = BTRFS_DIR_INDEX_KEY; 2696 key.offset = 0; 2697 2698 while (1) { 2699 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); 2700 if (ret < 0) 2701 goto out; 2702 if (!ret) { 2703 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2704 path->slots[0]); 2705 } 2706 if (ret || found_key.objectid != key.objectid || 2707 found_key.type != key.type) { 2708 break; 2709 } 2710 2711 di = btrfs_item_ptr(path->nodes[0], path->slots[0], 2712 struct btrfs_dir_item); 2713 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &loc); 2714 2715 if (loc.objectid > send_progress) { 2716 ret = 0; 2717 goto out; 2718 } 2719 2720 btrfs_release_path(path); 2721 key.offset = found_key.offset + 1; 2722 } 2723 2724 ret = 1; 2725 2726 out: 2727 btrfs_free_path(path); 2728 return ret; 2729 } 2730 2731 static int is_waiting_for_move(struct send_ctx *sctx, u64 ino) 2732 { 2733 struct rb_node *n = sctx->waiting_dir_moves.rb_node; 2734 struct waiting_dir_move *entry; 2735 2736 while (n) { 2737 entry = rb_entry(n, struct waiting_dir_move, node); 2738 if (ino < entry->ino) 2739 n = n->rb_left; 2740 else if (ino > entry->ino) 2741 n = n->rb_right; 2742 else 2743 return 1; 2744 } 2745 return 0; 2746 } 2747 2748 static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino) 2749 { 2750 struct rb_node **p = &sctx->waiting_dir_moves.rb_node; 2751 struct rb_node *parent = NULL; 2752 struct waiting_dir_move *entry, *dm; 2753 2754 dm = kmalloc(sizeof(*dm), GFP_NOFS); 2755 if (!dm) 2756 return -ENOMEM; 2757 dm->ino = ino; 2758 2759 while (*p) { 2760 parent = *p; 2761 entry = rb_entry(parent, struct waiting_dir_move, node); 2762 if (ino < entry->ino) { 2763 p = &(*p)->rb_left; 2764 } else if (ino > entry->ino) { 2765 p = &(*p)->rb_right; 2766 } else { 2767 kfree(dm); 2768 return -EEXIST; 2769 } 2770 } 2771 2772 rb_link_node(&dm->node, parent, p); 2773 rb_insert_color(&dm->node, &sctx->waiting_dir_moves); 2774 return 0; 2775 } 2776 2777 #ifdef CONFIG_BTRFS_ASSERT 2778 2779 static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino) 2780 { 2781 struct rb_node *n = sctx->waiting_dir_moves.rb_node; 2782 struct waiting_dir_move *entry; 2783 2784 while (n) { 2785 entry = rb_entry(n, struct waiting_dir_move, node); 2786 if (ino < entry->ino) { 2787 n = n->rb_left; 2788 } else if (ino > entry->ino) { 2789 n = n->rb_right; 2790 } else { 2791 rb_erase(&entry->node, &sctx->waiting_dir_moves); 2792 kfree(entry); 2793 return 0; 2794 } 2795 } 2796 return -ENOENT; 2797 } 2798 2799 #endif 2800 2801 static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino) 2802 { 2803 struct rb_node **p = &sctx->pending_dir_moves.rb_node; 2804 struct rb_node *parent = NULL; 2805 struct pending_dir_move *entry, *pm; 2806 struct recorded_ref *cur; 2807 int exists = 0; 2808 int ret; 2809 2810 pm = kmalloc(sizeof(*pm), GFP_NOFS); 2811 if (!pm) 2812 return -ENOMEM; 2813 pm->parent_ino = parent_ino; 2814 pm->ino = sctx->cur_ino; 2815 pm->gen = sctx->cur_inode_gen; 2816 INIT_LIST_HEAD(&pm->list); 2817 INIT_LIST_HEAD(&pm->update_refs); 2818 RB_CLEAR_NODE(&pm->node); 2819 2820 while (*p) { 2821 parent = *p; 2822 entry = rb_entry(parent, struct pending_dir_move, node); 2823 if (parent_ino < entry->parent_ino) { 2824 p = &(*p)->rb_left; 2825 } else if (parent_ino > entry->parent_ino) { 2826 p = &(*p)->rb_right; 2827 } else { 2828 exists = 1; 2829 break; 2830 } 2831 } 2832 2833 list_for_each_entry(cur, &sctx->deleted_refs, list) { 2834 ret = dup_ref(cur, &pm->update_refs); 2835 if (ret < 0) 2836 goto out; 2837 } 2838 list_for_each_entry(cur, &sctx->new_refs, list) { 2839 ret = dup_ref(cur, &pm->update_refs); 2840 if (ret < 0) 2841 goto out; 2842 } 2843 2844 ret = add_waiting_dir_move(sctx, pm->ino); 2845 if (ret) 2846 goto out; 2847 2848 if (exists) { 2849 list_add_tail(&pm->list, &entry->list); 2850 } else { 2851 rb_link_node(&pm->node, parent, p); 2852 rb_insert_color(&pm->node, &sctx->pending_dir_moves); 2853 } 2854 ret = 0; 2855 out: 2856 if (ret) { 2857 __free_recorded_refs(&pm->update_refs); 2858 kfree(pm); 2859 } 2860 return ret; 2861 } 2862 2863 static struct pending_dir_move *get_pending_dir_moves(struct send_ctx *sctx, 2864 u64 parent_ino) 2865 { 2866 struct rb_node *n = sctx->pending_dir_moves.rb_node; 2867 struct pending_dir_move *entry; 2868 2869 while (n) { 2870 entry = rb_entry(n, struct pending_dir_move, node); 2871 if (parent_ino < entry->parent_ino) 2872 n = n->rb_left; 2873 else if (parent_ino > entry->parent_ino) 2874 n = n->rb_right; 2875 else 2876 return entry; 2877 } 2878 return NULL; 2879 } 2880 2881 static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) 2882 { 2883 struct fs_path *from_path = NULL; 2884 struct fs_path *to_path = NULL; 2885 u64 orig_progress = sctx->send_progress; 2886 struct recorded_ref *cur; 2887 int ret; 2888 2889 from_path = fs_path_alloc(); 2890 if (!from_path) 2891 return -ENOMEM; 2892 2893 sctx->send_progress = pm->ino; 2894 ret = get_cur_path(sctx, pm->ino, pm->gen, from_path); 2895 if (ret < 0) 2896 goto out; 2897 2898 to_path = fs_path_alloc(); 2899 if (!to_path) { 2900 ret = -ENOMEM; 2901 goto out; 2902 } 2903 2904 sctx->send_progress = sctx->cur_ino + 1; 2905 ASSERT(del_waiting_dir_move(sctx, pm->ino) == 0); 2906 ret = get_cur_path(sctx, pm->ino, pm->gen, to_path); 2907 if (ret < 0) 2908 goto out; 2909 2910 ret = send_rename(sctx, from_path, to_path); 2911 if (ret < 0) 2912 goto out; 2913 2914 ret = send_utimes(sctx, pm->ino, pm->gen); 2915 if (ret < 0) 2916 goto out; 2917 2918 /* 2919 * After rename/move, need to update the utimes of both new parent(s) 2920 * and old parent(s). 2921 */ 2922 list_for_each_entry(cur, &pm->update_refs, list) { 2923 ret = send_utimes(sctx, cur->dir, cur->dir_gen); 2924 if (ret < 0) 2925 goto out; 2926 } 2927 2928 out: 2929 fs_path_free(from_path); 2930 fs_path_free(to_path); 2931 sctx->send_progress = orig_progress; 2932 2933 return ret; 2934 } 2935 2936 static void free_pending_move(struct send_ctx *sctx, struct pending_dir_move *m) 2937 { 2938 if (!list_empty(&m->list)) 2939 list_del(&m->list); 2940 if (!RB_EMPTY_NODE(&m->node)) 2941 rb_erase(&m->node, &sctx->pending_dir_moves); 2942 __free_recorded_refs(&m->update_refs); 2943 kfree(m); 2944 } 2945 2946 static void tail_append_pending_moves(struct pending_dir_move *moves, 2947 struct list_head *stack) 2948 { 2949 if (list_empty(&moves->list)) { 2950 list_add_tail(&moves->list, stack); 2951 } else { 2952 LIST_HEAD(list); 2953 list_splice_init(&moves->list, &list); 2954 list_add_tail(&moves->list, stack); 2955 list_splice_tail(&list, stack); 2956 } 2957 } 2958 2959 static int apply_children_dir_moves(struct send_ctx *sctx) 2960 { 2961 struct pending_dir_move *pm; 2962 struct list_head stack; 2963 u64 parent_ino = sctx->cur_ino; 2964 int ret = 0; 2965 2966 pm = get_pending_dir_moves(sctx, parent_ino); 2967 if (!pm) 2968 return 0; 2969 2970 INIT_LIST_HEAD(&stack); 2971 tail_append_pending_moves(pm, &stack); 2972 2973 while (!list_empty(&stack)) { 2974 pm = list_first_entry(&stack, struct pending_dir_move, list); 2975 parent_ino = pm->ino; 2976 ret = apply_dir_move(sctx, pm); 2977 free_pending_move(sctx, pm); 2978 if (ret) 2979 goto out; 2980 pm = get_pending_dir_moves(sctx, parent_ino); 2981 if (pm) 2982 tail_append_pending_moves(pm, &stack); 2983 } 2984 return 0; 2985 2986 out: 2987 while (!list_empty(&stack)) { 2988 pm = list_first_entry(&stack, struct pending_dir_move, list); 2989 free_pending_move(sctx, pm); 2990 } 2991 return ret; 2992 } 2993 2994 static int wait_for_parent_move(struct send_ctx *sctx, 2995 struct recorded_ref *parent_ref) 2996 { 2997 int ret; 2998 u64 ino = parent_ref->dir; 2999 u64 parent_ino_before, parent_ino_after; 3000 u64 new_gen, old_gen; 3001 struct fs_path *path_before = NULL; 3002 struct fs_path *path_after = NULL; 3003 int len1, len2; 3004 3005 if (parent_ref->dir <= sctx->cur_ino) 3006 return 0; 3007 3008 if (is_waiting_for_move(sctx, ino)) 3009 return 1; 3010 3011 ret = get_inode_info(sctx->parent_root, ino, NULL, &old_gen, 3012 NULL, NULL, NULL, NULL); 3013 if (ret == -ENOENT) 3014 return 0; 3015 else if (ret < 0) 3016 return ret; 3017 3018 ret = get_inode_info(sctx->send_root, ino, NULL, &new_gen, 3019 NULL, NULL, NULL, NULL); 3020 if (ret < 0) 3021 return ret; 3022 3023 if (new_gen != old_gen) 3024 return 0; 3025 3026 path_before = fs_path_alloc(); 3027 if (!path_before) 3028 return -ENOMEM; 3029 3030 ret = get_first_ref(sctx->parent_root, ino, &parent_ino_before, 3031 NULL, path_before); 3032 if (ret == -ENOENT) { 3033 ret = 0; 3034 goto out; 3035 } else if (ret < 0) { 3036 goto out; 3037 } 3038 3039 path_after = fs_path_alloc(); 3040 if (!path_after) { 3041 ret = -ENOMEM; 3042 goto out; 3043 } 3044 3045 ret = get_first_ref(sctx->send_root, ino, &parent_ino_after, 3046 NULL, path_after); 3047 if (ret == -ENOENT) { 3048 ret = 0; 3049 goto out; 3050 } else if (ret < 0) { 3051 goto out; 3052 } 3053 3054 len1 = fs_path_len(path_before); 3055 len2 = fs_path_len(path_after); 3056 if ((parent_ino_before != parent_ino_after) && (len1 != len2 || 3057 memcmp(path_before->start, path_after->start, len1))) { 3058 ret = 1; 3059 goto out; 3060 } 3061 ret = 0; 3062 3063 out: 3064 fs_path_free(path_before); 3065 fs_path_free(path_after); 3066 3067 return ret; 3068 } 3069 3070 /* 3071 * This does all the move/link/unlink/rmdir magic. 3072 */ 3073 static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) 3074 { 3075 int ret = 0; 3076 struct recorded_ref *cur; 3077 struct recorded_ref *cur2; 3078 struct list_head check_dirs; 3079 struct fs_path *valid_path = NULL; 3080 u64 ow_inode = 0; 3081 u64 ow_gen; 3082 int did_overwrite = 0; 3083 int is_orphan = 0; 3084 3085 verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); 3086 3087 /* 3088 * This should never happen as the root dir always has the same ref 3089 * which is always '..' 3090 */ 3091 BUG_ON(sctx->cur_ino <= BTRFS_FIRST_FREE_OBJECTID); 3092 INIT_LIST_HEAD(&check_dirs); 3093 3094 valid_path = fs_path_alloc(); 3095 if (!valid_path) { 3096 ret = -ENOMEM; 3097 goto out; 3098 } 3099 3100 /* 3101 * First, check if the first ref of the current inode was overwritten 3102 * before. If yes, we know that the current inode was already orphanized 3103 * and thus use the orphan name. If not, we can use get_cur_path to 3104 * get the path of the first ref as it would like while receiving at 3105 * this point in time. 3106 * New inodes are always orphan at the beginning, so force to use the 3107 * orphan name in this case. 3108 * The first ref is stored in valid_path and will be updated if it 3109 * gets moved around. 3110 */ 3111 if (!sctx->cur_inode_new) { 3112 ret = did_overwrite_first_ref(sctx, sctx->cur_ino, 3113 sctx->cur_inode_gen); 3114 if (ret < 0) 3115 goto out; 3116 if (ret) 3117 did_overwrite = 1; 3118 } 3119 if (sctx->cur_inode_new || did_overwrite) { 3120 ret = gen_unique_name(sctx, sctx->cur_ino, 3121 sctx->cur_inode_gen, valid_path); 3122 if (ret < 0) 3123 goto out; 3124 is_orphan = 1; 3125 } else { 3126 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, 3127 valid_path); 3128 if (ret < 0) 3129 goto out; 3130 } 3131 3132 list_for_each_entry(cur, &sctx->new_refs, list) { 3133 /* 3134 * We may have refs where the parent directory does not exist 3135 * yet. This happens if the parent directories inum is higher 3136 * the the current inum. To handle this case, we create the 3137 * parent directory out of order. But we need to check if this 3138 * did already happen before due to other refs in the same dir. 3139 */ 3140 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); 3141 if (ret < 0) 3142 goto out; 3143 if (ret == inode_state_will_create) { 3144 ret = 0; 3145 /* 3146 * First check if any of the current inodes refs did 3147 * already create the dir. 3148 */ 3149 list_for_each_entry(cur2, &sctx->new_refs, list) { 3150 if (cur == cur2) 3151 break; 3152 if (cur2->dir == cur->dir) { 3153 ret = 1; 3154 break; 3155 } 3156 } 3157 3158 /* 3159 * If that did not happen, check if a previous inode 3160 * did already create the dir. 3161 */ 3162 if (!ret) 3163 ret = did_create_dir(sctx, cur->dir); 3164 if (ret < 0) 3165 goto out; 3166 if (!ret) { 3167 ret = send_create_inode(sctx, cur->dir); 3168 if (ret < 0) 3169 goto out; 3170 } 3171 } 3172 3173 /* 3174 * Check if this new ref would overwrite the first ref of 3175 * another unprocessed inode. If yes, orphanize the 3176 * overwritten inode. If we find an overwritten ref that is 3177 * not the first ref, simply unlink it. 3178 */ 3179 ret = will_overwrite_ref(sctx, cur->dir, cur->dir_gen, 3180 cur->name, cur->name_len, 3181 &ow_inode, &ow_gen); 3182 if (ret < 0) 3183 goto out; 3184 if (ret) { 3185 ret = is_first_ref(sctx->parent_root, 3186 ow_inode, cur->dir, cur->name, 3187 cur->name_len); 3188 if (ret < 0) 3189 goto out; 3190 if (ret) { 3191 ret = orphanize_inode(sctx, ow_inode, ow_gen, 3192 cur->full_path); 3193 if (ret < 0) 3194 goto out; 3195 } else { 3196 ret = send_unlink(sctx, cur->full_path); 3197 if (ret < 0) 3198 goto out; 3199 } 3200 } 3201 3202 /* 3203 * link/move the ref to the new place. If we have an orphan 3204 * inode, move it and update valid_path. If not, link or move 3205 * it depending on the inode mode. 3206 */ 3207 if (is_orphan) { 3208 ret = send_rename(sctx, valid_path, cur->full_path); 3209 if (ret < 0) 3210 goto out; 3211 is_orphan = 0; 3212 ret = fs_path_copy(valid_path, cur->full_path); 3213 if (ret < 0) 3214 goto out; 3215 } else { 3216 if (S_ISDIR(sctx->cur_inode_mode)) { 3217 /* 3218 * Dirs can't be linked, so move it. For moved 3219 * dirs, we always have one new and one deleted 3220 * ref. The deleted ref is ignored later. 3221 */ 3222 if (wait_for_parent_move(sctx, cur)) { 3223 ret = add_pending_dir_move(sctx, 3224 cur->dir); 3225 *pending_move = 1; 3226 } else { 3227 ret = send_rename(sctx, valid_path, 3228 cur->full_path); 3229 if (!ret) 3230 ret = fs_path_copy(valid_path, 3231 cur->full_path); 3232 } 3233 if (ret < 0) 3234 goto out; 3235 } else { 3236 ret = send_link(sctx, cur->full_path, 3237 valid_path); 3238 if (ret < 0) 3239 goto out; 3240 } 3241 } 3242 ret = dup_ref(cur, &check_dirs); 3243 if (ret < 0) 3244 goto out; 3245 } 3246 3247 if (S_ISDIR(sctx->cur_inode_mode) && sctx->cur_inode_deleted) { 3248 /* 3249 * Check if we can already rmdir the directory. If not, 3250 * orphanize it. For every dir item inside that gets deleted 3251 * later, we do this check again and rmdir it then if possible. 3252 * See the use of check_dirs for more details. 3253 */ 3254 ret = can_rmdir(sctx, sctx->cur_ino, sctx->cur_ino); 3255 if (ret < 0) 3256 goto out; 3257 if (ret) { 3258 ret = send_rmdir(sctx, valid_path); 3259 if (ret < 0) 3260 goto out; 3261 } else if (!is_orphan) { 3262 ret = orphanize_inode(sctx, sctx->cur_ino, 3263 sctx->cur_inode_gen, valid_path); 3264 if (ret < 0) 3265 goto out; 3266 is_orphan = 1; 3267 } 3268 3269 list_for_each_entry(cur, &sctx->deleted_refs, list) { 3270 ret = dup_ref(cur, &check_dirs); 3271 if (ret < 0) 3272 goto out; 3273 } 3274 } else if (S_ISDIR(sctx->cur_inode_mode) && 3275 !list_empty(&sctx->deleted_refs)) { 3276 /* 3277 * We have a moved dir. Add the old parent to check_dirs 3278 */ 3279 cur = list_entry(sctx->deleted_refs.next, struct recorded_ref, 3280 list); 3281 ret = dup_ref(cur, &check_dirs); 3282 if (ret < 0) 3283 goto out; 3284 } else if (!S_ISDIR(sctx->cur_inode_mode)) { 3285 /* 3286 * We have a non dir inode. Go through all deleted refs and 3287 * unlink them if they were not already overwritten by other 3288 * inodes. 3289 */ 3290 list_for_each_entry(cur, &sctx->deleted_refs, list) { 3291 ret = did_overwrite_ref(sctx, cur->dir, cur->dir_gen, 3292 sctx->cur_ino, sctx->cur_inode_gen, 3293 cur->name, cur->name_len); 3294 if (ret < 0) 3295 goto out; 3296 if (!ret) { 3297 ret = send_unlink(sctx, cur->full_path); 3298 if (ret < 0) 3299 goto out; 3300 } 3301 ret = dup_ref(cur, &check_dirs); 3302 if (ret < 0) 3303 goto out; 3304 } 3305 /* 3306 * If the inode is still orphan, unlink the orphan. This may 3307 * happen when a previous inode did overwrite the first ref 3308 * of this inode and no new refs were added for the current 3309 * inode. Unlinking does not mean that the inode is deleted in 3310 * all cases. There may still be links to this inode in other 3311 * places. 3312 */ 3313 if (is_orphan) { 3314 ret = send_unlink(sctx, valid_path); 3315 if (ret < 0) 3316 goto out; 3317 } 3318 } 3319 3320 /* 3321 * We did collect all parent dirs where cur_inode was once located. We 3322 * now go through all these dirs and check if they are pending for 3323 * deletion and if it's finally possible to perform the rmdir now. 3324 * We also update the inode stats of the parent dirs here. 3325 */ 3326 list_for_each_entry(cur, &check_dirs, list) { 3327 /* 3328 * In case we had refs into dirs that were not processed yet, 3329 * we don't need to do the utime and rmdir logic for these dirs. 3330 * The dir will be processed later. 3331 */ 3332 if (cur->dir > sctx->cur_ino) 3333 continue; 3334 3335 ret = get_cur_inode_state(sctx, cur->dir, cur->dir_gen); 3336 if (ret < 0) 3337 goto out; 3338 3339 if (ret == inode_state_did_create || 3340 ret == inode_state_no_change) { 3341 /* TODO delayed utimes */ 3342 ret = send_utimes(sctx, cur->dir, cur->dir_gen); 3343 if (ret < 0) 3344 goto out; 3345 } else if (ret == inode_state_did_delete) { 3346 ret = can_rmdir(sctx, cur->dir, sctx->cur_ino); 3347 if (ret < 0) 3348 goto out; 3349 if (ret) { 3350 ret = get_cur_path(sctx, cur->dir, 3351 cur->dir_gen, valid_path); 3352 if (ret < 0) 3353 goto out; 3354 ret = send_rmdir(sctx, valid_path); 3355 if (ret < 0) 3356 goto out; 3357 } 3358 } 3359 } 3360 3361 ret = 0; 3362 3363 out: 3364 __free_recorded_refs(&check_dirs); 3365 free_recorded_refs(sctx); 3366 fs_path_free(valid_path); 3367 return ret; 3368 } 3369 3370 static int __record_new_ref(int num, u64 dir, int index, 3371 struct fs_path *name, 3372 void *ctx) 3373 { 3374 int ret = 0; 3375 struct send_ctx *sctx = ctx; 3376 struct fs_path *p; 3377 u64 gen; 3378 3379 p = fs_path_alloc(); 3380 if (!p) 3381 return -ENOMEM; 3382 3383 ret = get_inode_info(sctx->send_root, dir, NULL, &gen, NULL, NULL, 3384 NULL, NULL); 3385 if (ret < 0) 3386 goto out; 3387 3388 ret = get_cur_path(sctx, dir, gen, p); 3389 if (ret < 0) 3390 goto out; 3391 ret = fs_path_add_path(p, name); 3392 if (ret < 0) 3393 goto out; 3394 3395 ret = record_ref(&sctx->new_refs, dir, gen, p); 3396 3397 out: 3398 if (ret) 3399 fs_path_free(p); 3400 return ret; 3401 } 3402 3403 static int __record_deleted_ref(int num, u64 dir, int index, 3404 struct fs_path *name, 3405 void *ctx) 3406 { 3407 int ret = 0; 3408 struct send_ctx *sctx = ctx; 3409 struct fs_path *p; 3410 u64 gen; 3411 3412 p = fs_path_alloc(); 3413 if (!p) 3414 return -ENOMEM; 3415 3416 ret = get_inode_info(sctx->parent_root, dir, NULL, &gen, NULL, NULL, 3417 NULL, NULL); 3418 if (ret < 0) 3419 goto out; 3420 3421 ret = get_cur_path(sctx, dir, gen, p); 3422 if (ret < 0) 3423 goto out; 3424 ret = fs_path_add_path(p, name); 3425 if (ret < 0) 3426 goto out; 3427 3428 ret = record_ref(&sctx->deleted_refs, dir, gen, p); 3429 3430 out: 3431 if (ret) 3432 fs_path_free(p); 3433 return ret; 3434 } 3435 3436 static int record_new_ref(struct send_ctx *sctx) 3437 { 3438 int ret; 3439 3440 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, 3441 sctx->cmp_key, 0, __record_new_ref, sctx); 3442 if (ret < 0) 3443 goto out; 3444 ret = 0; 3445 3446 out: 3447 return ret; 3448 } 3449 3450 static int record_deleted_ref(struct send_ctx *sctx) 3451 { 3452 int ret; 3453 3454 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, 3455 sctx->cmp_key, 0, __record_deleted_ref, sctx); 3456 if (ret < 0) 3457 goto out; 3458 ret = 0; 3459 3460 out: 3461 return ret; 3462 } 3463 3464 struct find_ref_ctx { 3465 u64 dir; 3466 u64 dir_gen; 3467 struct btrfs_root *root; 3468 struct fs_path *name; 3469 int found_idx; 3470 }; 3471 3472 static int __find_iref(int num, u64 dir, int index, 3473 struct fs_path *name, 3474 void *ctx_) 3475 { 3476 struct find_ref_ctx *ctx = ctx_; 3477 u64 dir_gen; 3478 int ret; 3479 3480 if (dir == ctx->dir && fs_path_len(name) == fs_path_len(ctx->name) && 3481 strncmp(name->start, ctx->name->start, fs_path_len(name)) == 0) { 3482 /* 3483 * To avoid doing extra lookups we'll only do this if everything 3484 * else matches. 3485 */ 3486 ret = get_inode_info(ctx->root, dir, NULL, &dir_gen, NULL, 3487 NULL, NULL, NULL); 3488 if (ret) 3489 return ret; 3490 if (dir_gen != ctx->dir_gen) 3491 return 0; 3492 ctx->found_idx = num; 3493 return 1; 3494 } 3495 return 0; 3496 } 3497 3498 static int find_iref(struct btrfs_root *root, 3499 struct btrfs_path *path, 3500 struct btrfs_key *key, 3501 u64 dir, u64 dir_gen, struct fs_path *name) 3502 { 3503 int ret; 3504 struct find_ref_ctx ctx; 3505 3506 ctx.dir = dir; 3507 ctx.name = name; 3508 ctx.dir_gen = dir_gen; 3509 ctx.found_idx = -1; 3510 ctx.root = root; 3511 3512 ret = iterate_inode_ref(root, path, key, 0, __find_iref, &ctx); 3513 if (ret < 0) 3514 return ret; 3515 3516 if (ctx.found_idx == -1) 3517 return -ENOENT; 3518 3519 return ctx.found_idx; 3520 } 3521 3522 static int __record_changed_new_ref(int num, u64 dir, int index, 3523 struct fs_path *name, 3524 void *ctx) 3525 { 3526 u64 dir_gen; 3527 int ret; 3528 struct send_ctx *sctx = ctx; 3529 3530 ret = get_inode_info(sctx->send_root, dir, NULL, &dir_gen, NULL, 3531 NULL, NULL, NULL); 3532 if (ret) 3533 return ret; 3534 3535 ret = find_iref(sctx->parent_root, sctx->right_path, 3536 sctx->cmp_key, dir, dir_gen, name); 3537 if (ret == -ENOENT) 3538 ret = __record_new_ref(num, dir, index, name, sctx); 3539 else if (ret > 0) 3540 ret = 0; 3541 3542 return ret; 3543 } 3544 3545 static int __record_changed_deleted_ref(int num, u64 dir, int index, 3546 struct fs_path *name, 3547 void *ctx) 3548 { 3549 u64 dir_gen; 3550 int ret; 3551 struct send_ctx *sctx = ctx; 3552 3553 ret = get_inode_info(sctx->parent_root, dir, NULL, &dir_gen, NULL, 3554 NULL, NULL, NULL); 3555 if (ret) 3556 return ret; 3557 3558 ret = find_iref(sctx->send_root, sctx->left_path, sctx->cmp_key, 3559 dir, dir_gen, name); 3560 if (ret == -ENOENT) 3561 ret = __record_deleted_ref(num, dir, index, name, sctx); 3562 else if (ret > 0) 3563 ret = 0; 3564 3565 return ret; 3566 } 3567 3568 static int record_changed_ref(struct send_ctx *sctx) 3569 { 3570 int ret = 0; 3571 3572 ret = iterate_inode_ref(sctx->send_root, sctx->left_path, 3573 sctx->cmp_key, 0, __record_changed_new_ref, sctx); 3574 if (ret < 0) 3575 goto out; 3576 ret = iterate_inode_ref(sctx->parent_root, sctx->right_path, 3577 sctx->cmp_key, 0, __record_changed_deleted_ref, sctx); 3578 if (ret < 0) 3579 goto out; 3580 ret = 0; 3581 3582 out: 3583 return ret; 3584 } 3585 3586 /* 3587 * Record and process all refs at once. Needed when an inode changes the 3588 * generation number, which means that it was deleted and recreated. 3589 */ 3590 static int process_all_refs(struct send_ctx *sctx, 3591 enum btrfs_compare_tree_result cmd) 3592 { 3593 int ret; 3594 struct btrfs_root *root; 3595 struct btrfs_path *path; 3596 struct btrfs_key key; 3597 struct btrfs_key found_key; 3598 struct extent_buffer *eb; 3599 int slot; 3600 iterate_inode_ref_t cb; 3601 int pending_move = 0; 3602 3603 path = alloc_path_for_send(); 3604 if (!path) 3605 return -ENOMEM; 3606 3607 if (cmd == BTRFS_COMPARE_TREE_NEW) { 3608 root = sctx->send_root; 3609 cb = __record_new_ref; 3610 } else if (cmd == BTRFS_COMPARE_TREE_DELETED) { 3611 root = sctx->parent_root; 3612 cb = __record_deleted_ref; 3613 } else { 3614 BUG(); 3615 } 3616 3617 key.objectid = sctx->cmp_key->objectid; 3618 key.type = BTRFS_INODE_REF_KEY; 3619 key.offset = 0; 3620 while (1) { 3621 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); 3622 if (ret < 0) 3623 goto out; 3624 if (ret) 3625 break; 3626 3627 eb = path->nodes[0]; 3628 slot = path->slots[0]; 3629 btrfs_item_key_to_cpu(eb, &found_key, slot); 3630 3631 if (found_key.objectid != key.objectid || 3632 (found_key.type != BTRFS_INODE_REF_KEY && 3633 found_key.type != BTRFS_INODE_EXTREF_KEY)) 3634 break; 3635 3636 ret = iterate_inode_ref(root, path, &found_key, 0, cb, sctx); 3637 btrfs_release_path(path); 3638 if (ret < 0) 3639 goto out; 3640 3641 key.offset = found_key.offset + 1; 3642 } 3643 btrfs_release_path(path); 3644 3645 ret = process_recorded_refs(sctx, &pending_move); 3646 /* Only applicable to an incremental send. */ 3647 ASSERT(pending_move == 0); 3648 3649 out: 3650 btrfs_free_path(path); 3651 return ret; 3652 } 3653 3654 static int send_set_xattr(struct send_ctx *sctx, 3655 struct fs_path *path, 3656 const char *name, int name_len, 3657 const char *data, int data_len) 3658 { 3659 int ret = 0; 3660 3661 ret = begin_cmd(sctx, BTRFS_SEND_C_SET_XATTR); 3662 if (ret < 0) 3663 goto out; 3664 3665 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 3666 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); 3667 TLV_PUT(sctx, BTRFS_SEND_A_XATTR_DATA, data, data_len); 3668 3669 ret = send_cmd(sctx); 3670 3671 tlv_put_failure: 3672 out: 3673 return ret; 3674 } 3675 3676 static int send_remove_xattr(struct send_ctx *sctx, 3677 struct fs_path *path, 3678 const char *name, int name_len) 3679 { 3680 int ret = 0; 3681 3682 ret = begin_cmd(sctx, BTRFS_SEND_C_REMOVE_XATTR); 3683 if (ret < 0) 3684 goto out; 3685 3686 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, path); 3687 TLV_PUT_STRING(sctx, BTRFS_SEND_A_XATTR_NAME, name, name_len); 3688 3689 ret = send_cmd(sctx); 3690 3691 tlv_put_failure: 3692 out: 3693 return ret; 3694 } 3695 3696 static int __process_new_xattr(int num, struct btrfs_key *di_key, 3697 const char *name, int name_len, 3698 const char *data, int data_len, 3699 u8 type, void *ctx) 3700 { 3701 int ret; 3702 struct send_ctx *sctx = ctx; 3703 struct fs_path *p; 3704 posix_acl_xattr_header dummy_acl; 3705 3706 p = fs_path_alloc(); 3707 if (!p) 3708 return -ENOMEM; 3709 3710 /* 3711 * This hack is needed because empty acl's are stored as zero byte 3712 * data in xattrs. Problem with that is, that receiving these zero byte 3713 * acl's will fail later. To fix this, we send a dummy acl list that 3714 * only contains the version number and no entries. 3715 */ 3716 if (!strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS, name_len) || 3717 !strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT, name_len)) { 3718 if (data_len == 0) { 3719 dummy_acl.a_version = 3720 cpu_to_le32(POSIX_ACL_XATTR_VERSION); 3721 data = (char *)&dummy_acl; 3722 data_len = sizeof(dummy_acl); 3723 } 3724 } 3725 3726 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 3727 if (ret < 0) 3728 goto out; 3729 3730 ret = send_set_xattr(sctx, p, name, name_len, data, data_len); 3731 3732 out: 3733 fs_path_free(p); 3734 return ret; 3735 } 3736 3737 static int __process_deleted_xattr(int num, struct btrfs_key *di_key, 3738 const char *name, int name_len, 3739 const char *data, int data_len, 3740 u8 type, void *ctx) 3741 { 3742 int ret; 3743 struct send_ctx *sctx = ctx; 3744 struct fs_path *p; 3745 3746 p = fs_path_alloc(); 3747 if (!p) 3748 return -ENOMEM; 3749 3750 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 3751 if (ret < 0) 3752 goto out; 3753 3754 ret = send_remove_xattr(sctx, p, name, name_len); 3755 3756 out: 3757 fs_path_free(p); 3758 return ret; 3759 } 3760 3761 static int process_new_xattr(struct send_ctx *sctx) 3762 { 3763 int ret = 0; 3764 3765 ret = iterate_dir_item(sctx->send_root, sctx->left_path, 3766 sctx->cmp_key, __process_new_xattr, sctx); 3767 3768 return ret; 3769 } 3770 3771 static int process_deleted_xattr(struct send_ctx *sctx) 3772 { 3773 int ret; 3774 3775 ret = iterate_dir_item(sctx->parent_root, sctx->right_path, 3776 sctx->cmp_key, __process_deleted_xattr, sctx); 3777 3778 return ret; 3779 } 3780 3781 struct find_xattr_ctx { 3782 const char *name; 3783 int name_len; 3784 int found_idx; 3785 char *found_data; 3786 int found_data_len; 3787 }; 3788 3789 static int __find_xattr(int num, struct btrfs_key *di_key, 3790 const char *name, int name_len, 3791 const char *data, int data_len, 3792 u8 type, void *vctx) 3793 { 3794 struct find_xattr_ctx *ctx = vctx; 3795 3796 if (name_len == ctx->name_len && 3797 strncmp(name, ctx->name, name_len) == 0) { 3798 ctx->found_idx = num; 3799 ctx->found_data_len = data_len; 3800 ctx->found_data = kmemdup(data, data_len, GFP_NOFS); 3801 if (!ctx->found_data) 3802 return -ENOMEM; 3803 return 1; 3804 } 3805 return 0; 3806 } 3807 3808 static int find_xattr(struct btrfs_root *root, 3809 struct btrfs_path *path, 3810 struct btrfs_key *key, 3811 const char *name, int name_len, 3812 char **data, int *data_len) 3813 { 3814 int ret; 3815 struct find_xattr_ctx ctx; 3816 3817 ctx.name = name; 3818 ctx.name_len = name_len; 3819 ctx.found_idx = -1; 3820 ctx.found_data = NULL; 3821 ctx.found_data_len = 0; 3822 3823 ret = iterate_dir_item(root, path, key, __find_xattr, &ctx); 3824 if (ret < 0) 3825 return ret; 3826 3827 if (ctx.found_idx == -1) 3828 return -ENOENT; 3829 if (data) { 3830 *data = ctx.found_data; 3831 *data_len = ctx.found_data_len; 3832 } else { 3833 kfree(ctx.found_data); 3834 } 3835 return ctx.found_idx; 3836 } 3837 3838 3839 static int __process_changed_new_xattr(int num, struct btrfs_key *di_key, 3840 const char *name, int name_len, 3841 const char *data, int data_len, 3842 u8 type, void *ctx) 3843 { 3844 int ret; 3845 struct send_ctx *sctx = ctx; 3846 char *found_data = NULL; 3847 int found_data_len = 0; 3848 3849 ret = find_xattr(sctx->parent_root, sctx->right_path, 3850 sctx->cmp_key, name, name_len, &found_data, 3851 &found_data_len); 3852 if (ret == -ENOENT) { 3853 ret = __process_new_xattr(num, di_key, name, name_len, data, 3854 data_len, type, ctx); 3855 } else if (ret >= 0) { 3856 if (data_len != found_data_len || 3857 memcmp(data, found_data, data_len)) { 3858 ret = __process_new_xattr(num, di_key, name, name_len, 3859 data, data_len, type, ctx); 3860 } else { 3861 ret = 0; 3862 } 3863 } 3864 3865 kfree(found_data); 3866 return ret; 3867 } 3868 3869 static int __process_changed_deleted_xattr(int num, struct btrfs_key *di_key, 3870 const char *name, int name_len, 3871 const char *data, int data_len, 3872 u8 type, void *ctx) 3873 { 3874 int ret; 3875 struct send_ctx *sctx = ctx; 3876 3877 ret = find_xattr(sctx->send_root, sctx->left_path, sctx->cmp_key, 3878 name, name_len, NULL, NULL); 3879 if (ret == -ENOENT) 3880 ret = __process_deleted_xattr(num, di_key, name, name_len, data, 3881 data_len, type, ctx); 3882 else if (ret >= 0) 3883 ret = 0; 3884 3885 return ret; 3886 } 3887 3888 static int process_changed_xattr(struct send_ctx *sctx) 3889 { 3890 int ret = 0; 3891 3892 ret = iterate_dir_item(sctx->send_root, sctx->left_path, 3893 sctx->cmp_key, __process_changed_new_xattr, sctx); 3894 if (ret < 0) 3895 goto out; 3896 ret = iterate_dir_item(sctx->parent_root, sctx->right_path, 3897 sctx->cmp_key, __process_changed_deleted_xattr, sctx); 3898 3899 out: 3900 return ret; 3901 } 3902 3903 static int process_all_new_xattrs(struct send_ctx *sctx) 3904 { 3905 int ret; 3906 struct btrfs_root *root; 3907 struct btrfs_path *path; 3908 struct btrfs_key key; 3909 struct btrfs_key found_key; 3910 struct extent_buffer *eb; 3911 int slot; 3912 3913 path = alloc_path_for_send(); 3914 if (!path) 3915 return -ENOMEM; 3916 3917 root = sctx->send_root; 3918 3919 key.objectid = sctx->cmp_key->objectid; 3920 key.type = BTRFS_XATTR_ITEM_KEY; 3921 key.offset = 0; 3922 while (1) { 3923 ret = btrfs_search_slot_for_read(root, &key, path, 1, 0); 3924 if (ret < 0) 3925 goto out; 3926 if (ret) { 3927 ret = 0; 3928 goto out; 3929 } 3930 3931 eb = path->nodes[0]; 3932 slot = path->slots[0]; 3933 btrfs_item_key_to_cpu(eb, &found_key, slot); 3934 3935 if (found_key.objectid != key.objectid || 3936 found_key.type != key.type) { 3937 ret = 0; 3938 goto out; 3939 } 3940 3941 ret = iterate_dir_item(root, path, &found_key, 3942 __process_new_xattr, sctx); 3943 if (ret < 0) 3944 goto out; 3945 3946 btrfs_release_path(path); 3947 key.offset = found_key.offset + 1; 3948 } 3949 3950 out: 3951 btrfs_free_path(path); 3952 return ret; 3953 } 3954 3955 static ssize_t fill_read_buf(struct send_ctx *sctx, u64 offset, u32 len) 3956 { 3957 struct btrfs_root *root = sctx->send_root; 3958 struct btrfs_fs_info *fs_info = root->fs_info; 3959 struct inode *inode; 3960 struct page *page; 3961 char *addr; 3962 struct btrfs_key key; 3963 pgoff_t index = offset >> PAGE_CACHE_SHIFT; 3964 pgoff_t last_index; 3965 unsigned pg_offset = offset & ~PAGE_CACHE_MASK; 3966 ssize_t ret = 0; 3967 3968 key.objectid = sctx->cur_ino; 3969 key.type = BTRFS_INODE_ITEM_KEY; 3970 key.offset = 0; 3971 3972 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 3973 if (IS_ERR(inode)) 3974 return PTR_ERR(inode); 3975 3976 if (offset + len > i_size_read(inode)) { 3977 if (offset > i_size_read(inode)) 3978 len = 0; 3979 else 3980 len = offset - i_size_read(inode); 3981 } 3982 if (len == 0) 3983 goto out; 3984 3985 last_index = (offset + len - 1) >> PAGE_CACHE_SHIFT; 3986 while (index <= last_index) { 3987 unsigned cur_len = min_t(unsigned, len, 3988 PAGE_CACHE_SIZE - pg_offset); 3989 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS); 3990 if (!page) { 3991 ret = -ENOMEM; 3992 break; 3993 } 3994 3995 if (!PageUptodate(page)) { 3996 btrfs_readpage(NULL, page); 3997 lock_page(page); 3998 if (!PageUptodate(page)) { 3999 unlock_page(page); 4000 page_cache_release(page); 4001 ret = -EIO; 4002 break; 4003 } 4004 } 4005 4006 addr = kmap(page); 4007 memcpy(sctx->read_buf + ret, addr + pg_offset, cur_len); 4008 kunmap(page); 4009 unlock_page(page); 4010 page_cache_release(page); 4011 index++; 4012 pg_offset = 0; 4013 len -= cur_len; 4014 ret += cur_len; 4015 } 4016 out: 4017 iput(inode); 4018 return ret; 4019 } 4020 4021 /* 4022 * Read some bytes from the current inode/file and send a write command to 4023 * user space. 4024 */ 4025 static int send_write(struct send_ctx *sctx, u64 offset, u32 len) 4026 { 4027 int ret = 0; 4028 struct fs_path *p; 4029 ssize_t num_read = 0; 4030 4031 p = fs_path_alloc(); 4032 if (!p) 4033 return -ENOMEM; 4034 4035 verbose_printk("btrfs: send_write offset=%llu, len=%d\n", offset, len); 4036 4037 num_read = fill_read_buf(sctx, offset, len); 4038 if (num_read <= 0) { 4039 if (num_read < 0) 4040 ret = num_read; 4041 goto out; 4042 } 4043 4044 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 4045 if (ret < 0) 4046 goto out; 4047 4048 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4049 if (ret < 0) 4050 goto out; 4051 4052 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4053 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4054 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, num_read); 4055 4056 ret = send_cmd(sctx); 4057 4058 tlv_put_failure: 4059 out: 4060 fs_path_free(p); 4061 if (ret < 0) 4062 return ret; 4063 return num_read; 4064 } 4065 4066 /* 4067 * Send a clone command to user space. 4068 */ 4069 static int send_clone(struct send_ctx *sctx, 4070 u64 offset, u32 len, 4071 struct clone_root *clone_root) 4072 { 4073 int ret = 0; 4074 struct fs_path *p; 4075 u64 gen; 4076 4077 verbose_printk("btrfs: send_clone offset=%llu, len=%d, clone_root=%llu, " 4078 "clone_inode=%llu, clone_offset=%llu\n", offset, len, 4079 clone_root->root->objectid, clone_root->ino, 4080 clone_root->offset); 4081 4082 p = fs_path_alloc(); 4083 if (!p) 4084 return -ENOMEM; 4085 4086 ret = begin_cmd(sctx, BTRFS_SEND_C_CLONE); 4087 if (ret < 0) 4088 goto out; 4089 4090 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4091 if (ret < 0) 4092 goto out; 4093 4094 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4095 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_LEN, len); 4096 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4097 4098 if (clone_root->root == sctx->send_root) { 4099 ret = get_inode_info(sctx->send_root, clone_root->ino, NULL, 4100 &gen, NULL, NULL, NULL, NULL); 4101 if (ret < 0) 4102 goto out; 4103 ret = get_cur_path(sctx, clone_root->ino, gen, p); 4104 } else { 4105 ret = get_inode_path(clone_root->root, clone_root->ino, p); 4106 } 4107 if (ret < 0) 4108 goto out; 4109 4110 TLV_PUT_UUID(sctx, BTRFS_SEND_A_CLONE_UUID, 4111 clone_root->root->root_item.uuid); 4112 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_CTRANSID, 4113 le64_to_cpu(clone_root->root->root_item.ctransid)); 4114 TLV_PUT_PATH(sctx, BTRFS_SEND_A_CLONE_PATH, p); 4115 TLV_PUT_U64(sctx, BTRFS_SEND_A_CLONE_OFFSET, 4116 clone_root->offset); 4117 4118 ret = send_cmd(sctx); 4119 4120 tlv_put_failure: 4121 out: 4122 fs_path_free(p); 4123 return ret; 4124 } 4125 4126 /* 4127 * Send an update extent command to user space. 4128 */ 4129 static int send_update_extent(struct send_ctx *sctx, 4130 u64 offset, u32 len) 4131 { 4132 int ret = 0; 4133 struct fs_path *p; 4134 4135 p = fs_path_alloc(); 4136 if (!p) 4137 return -ENOMEM; 4138 4139 ret = begin_cmd(sctx, BTRFS_SEND_C_UPDATE_EXTENT); 4140 if (ret < 0) 4141 goto out; 4142 4143 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4144 if (ret < 0) 4145 goto out; 4146 4147 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4148 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4149 TLV_PUT_U64(sctx, BTRFS_SEND_A_SIZE, len); 4150 4151 ret = send_cmd(sctx); 4152 4153 tlv_put_failure: 4154 out: 4155 fs_path_free(p); 4156 return ret; 4157 } 4158 4159 static int send_hole(struct send_ctx *sctx, u64 end) 4160 { 4161 struct fs_path *p = NULL; 4162 u64 offset = sctx->cur_inode_last_extent; 4163 u64 len; 4164 int ret = 0; 4165 4166 p = fs_path_alloc(); 4167 if (!p) 4168 return -ENOMEM; 4169 memset(sctx->read_buf, 0, BTRFS_SEND_READ_SIZE); 4170 while (offset < end) { 4171 len = min_t(u64, end - offset, BTRFS_SEND_READ_SIZE); 4172 4173 ret = begin_cmd(sctx, BTRFS_SEND_C_WRITE); 4174 if (ret < 0) 4175 break; 4176 ret = get_cur_path(sctx, sctx->cur_ino, sctx->cur_inode_gen, p); 4177 if (ret < 0) 4178 break; 4179 TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p); 4180 TLV_PUT_U64(sctx, BTRFS_SEND_A_FILE_OFFSET, offset); 4181 TLV_PUT(sctx, BTRFS_SEND_A_DATA, sctx->read_buf, len); 4182 ret = send_cmd(sctx); 4183 if (ret < 0) 4184 break; 4185 offset += len; 4186 } 4187 tlv_put_failure: 4188 fs_path_free(p); 4189 return ret; 4190 } 4191 4192 static int send_write_or_clone(struct send_ctx *sctx, 4193 struct btrfs_path *path, 4194 struct btrfs_key *key, 4195 struct clone_root *clone_root) 4196 { 4197 int ret = 0; 4198 struct btrfs_file_extent_item *ei; 4199 u64 offset = key->offset; 4200 u64 pos = 0; 4201 u64 len; 4202 u32 l; 4203 u8 type; 4204 u64 bs = sctx->send_root->fs_info->sb->s_blocksize; 4205 4206 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 4207 struct btrfs_file_extent_item); 4208 type = btrfs_file_extent_type(path->nodes[0], ei); 4209 if (type == BTRFS_FILE_EXTENT_INLINE) { 4210 len = btrfs_file_extent_inline_len(path->nodes[0], 4211 path->slots[0], ei); 4212 /* 4213 * it is possible the inline item won't cover the whole page, 4214 * but there may be items after this page. Make 4215 * sure to send the whole thing 4216 */ 4217 len = PAGE_CACHE_ALIGN(len); 4218 } else { 4219 len = btrfs_file_extent_num_bytes(path->nodes[0], ei); 4220 } 4221 4222 if (offset + len > sctx->cur_inode_size) 4223 len = sctx->cur_inode_size - offset; 4224 if (len == 0) { 4225 ret = 0; 4226 goto out; 4227 } 4228 4229 if (clone_root && IS_ALIGNED(offset + len, bs)) { 4230 ret = send_clone(sctx, offset, len, clone_root); 4231 } else if (sctx->flags & BTRFS_SEND_FLAG_NO_FILE_DATA) { 4232 ret = send_update_extent(sctx, offset, len); 4233 } else { 4234 while (pos < len) { 4235 l = len - pos; 4236 if (l > BTRFS_SEND_READ_SIZE) 4237 l = BTRFS_SEND_READ_SIZE; 4238 ret = send_write(sctx, pos + offset, l); 4239 if (ret < 0) 4240 goto out; 4241 if (!ret) 4242 break; 4243 pos += ret; 4244 } 4245 ret = 0; 4246 } 4247 out: 4248 return ret; 4249 } 4250 4251 static int is_extent_unchanged(struct send_ctx *sctx, 4252 struct btrfs_path *left_path, 4253 struct btrfs_key *ekey) 4254 { 4255 int ret = 0; 4256 struct btrfs_key key; 4257 struct btrfs_path *path = NULL; 4258 struct extent_buffer *eb; 4259 int slot; 4260 struct btrfs_key found_key; 4261 struct btrfs_file_extent_item *ei; 4262 u64 left_disknr; 4263 u64 right_disknr; 4264 u64 left_offset; 4265 u64 right_offset; 4266 u64 left_offset_fixed; 4267 u64 left_len; 4268 u64 right_len; 4269 u64 left_gen; 4270 u64 right_gen; 4271 u8 left_type; 4272 u8 right_type; 4273 4274 path = alloc_path_for_send(); 4275 if (!path) 4276 return -ENOMEM; 4277 4278 eb = left_path->nodes[0]; 4279 slot = left_path->slots[0]; 4280 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 4281 left_type = btrfs_file_extent_type(eb, ei); 4282 4283 if (left_type != BTRFS_FILE_EXTENT_REG) { 4284 ret = 0; 4285 goto out; 4286 } 4287 left_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 4288 left_len = btrfs_file_extent_num_bytes(eb, ei); 4289 left_offset = btrfs_file_extent_offset(eb, ei); 4290 left_gen = btrfs_file_extent_generation(eb, ei); 4291 4292 /* 4293 * Following comments will refer to these graphics. L is the left 4294 * extents which we are checking at the moment. 1-8 are the right 4295 * extents that we iterate. 4296 * 4297 * |-----L-----| 4298 * |-1-|-2a-|-3-|-4-|-5-|-6-| 4299 * 4300 * |-----L-----| 4301 * |--1--|-2b-|...(same as above) 4302 * 4303 * Alternative situation. Happens on files where extents got split. 4304 * |-----L-----| 4305 * |-----------7-----------|-6-| 4306 * 4307 * Alternative situation. Happens on files which got larger. 4308 * |-----L-----| 4309 * |-8-| 4310 * Nothing follows after 8. 4311 */ 4312 4313 key.objectid = ekey->objectid; 4314 key.type = BTRFS_EXTENT_DATA_KEY; 4315 key.offset = ekey->offset; 4316 ret = btrfs_search_slot_for_read(sctx->parent_root, &key, path, 0, 0); 4317 if (ret < 0) 4318 goto out; 4319 if (ret) { 4320 ret = 0; 4321 goto out; 4322 } 4323 4324 /* 4325 * Handle special case where the right side has no extents at all. 4326 */ 4327 eb = path->nodes[0]; 4328 slot = path->slots[0]; 4329 btrfs_item_key_to_cpu(eb, &found_key, slot); 4330 if (found_key.objectid != key.objectid || 4331 found_key.type != key.type) { 4332 /* If we're a hole then just pretend nothing changed */ 4333 ret = (left_disknr) ? 0 : 1; 4334 goto out; 4335 } 4336 4337 /* 4338 * We're now on 2a, 2b or 7. 4339 */ 4340 key = found_key; 4341 while (key.offset < ekey->offset + left_len) { 4342 ei = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 4343 right_type = btrfs_file_extent_type(eb, ei); 4344 if (right_type != BTRFS_FILE_EXTENT_REG) { 4345 ret = 0; 4346 goto out; 4347 } 4348 4349 right_disknr = btrfs_file_extent_disk_bytenr(eb, ei); 4350 right_len = btrfs_file_extent_num_bytes(eb, ei); 4351 right_offset = btrfs_file_extent_offset(eb, ei); 4352 right_gen = btrfs_file_extent_generation(eb, ei); 4353 4354 /* 4355 * Are we at extent 8? If yes, we know the extent is changed. 4356 * This may only happen on the first iteration. 4357 */ 4358 if (found_key.offset + right_len <= ekey->offset) { 4359 /* If we're a hole just pretend nothing changed */ 4360 ret = (left_disknr) ? 0 : 1; 4361 goto out; 4362 } 4363 4364 left_offset_fixed = left_offset; 4365 if (key.offset < ekey->offset) { 4366 /* Fix the right offset for 2a and 7. */ 4367 right_offset += ekey->offset - key.offset; 4368 } else { 4369 /* Fix the left offset for all behind 2a and 2b */ 4370 left_offset_fixed += key.offset - ekey->offset; 4371 } 4372 4373 /* 4374 * Check if we have the same extent. 4375 */ 4376 if (left_disknr != right_disknr || 4377 left_offset_fixed != right_offset || 4378 left_gen != right_gen) { 4379 ret = 0; 4380 goto out; 4381 } 4382 4383 /* 4384 * Go to the next extent. 4385 */ 4386 ret = btrfs_next_item(sctx->parent_root, path); 4387 if (ret < 0) 4388 goto out; 4389 if (!ret) { 4390 eb = path->nodes[0]; 4391 slot = path->slots[0]; 4392 btrfs_item_key_to_cpu(eb, &found_key, slot); 4393 } 4394 if (ret || found_key.objectid != key.objectid || 4395 found_key.type != key.type) { 4396 key.offset += right_len; 4397 break; 4398 } 4399 if (found_key.offset != key.offset + right_len) { 4400 ret = 0; 4401 goto out; 4402 } 4403 key = found_key; 4404 } 4405 4406 /* 4407 * We're now behind the left extent (treat as unchanged) or at the end 4408 * of the right side (treat as changed). 4409 */ 4410 if (key.offset >= ekey->offset + left_len) 4411 ret = 1; 4412 else 4413 ret = 0; 4414 4415 4416 out: 4417 btrfs_free_path(path); 4418 return ret; 4419 } 4420 4421 static int get_last_extent(struct send_ctx *sctx, u64 offset) 4422 { 4423 struct btrfs_path *path; 4424 struct btrfs_root *root = sctx->send_root; 4425 struct btrfs_file_extent_item *fi; 4426 struct btrfs_key key; 4427 u64 extent_end; 4428 u8 type; 4429 int ret; 4430 4431 path = alloc_path_for_send(); 4432 if (!path) 4433 return -ENOMEM; 4434 4435 sctx->cur_inode_last_extent = 0; 4436 4437 key.objectid = sctx->cur_ino; 4438 key.type = BTRFS_EXTENT_DATA_KEY; 4439 key.offset = offset; 4440 ret = btrfs_search_slot_for_read(root, &key, path, 0, 1); 4441 if (ret < 0) 4442 goto out; 4443 ret = 0; 4444 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 4445 if (key.objectid != sctx->cur_ino || key.type != BTRFS_EXTENT_DATA_KEY) 4446 goto out; 4447 4448 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 4449 struct btrfs_file_extent_item); 4450 type = btrfs_file_extent_type(path->nodes[0], fi); 4451 if (type == BTRFS_FILE_EXTENT_INLINE) { 4452 u64 size = btrfs_file_extent_inline_len(path->nodes[0], 4453 path->slots[0], fi); 4454 extent_end = ALIGN(key.offset + size, 4455 sctx->send_root->sectorsize); 4456 } else { 4457 extent_end = key.offset + 4458 btrfs_file_extent_num_bytes(path->nodes[0], fi); 4459 } 4460 sctx->cur_inode_last_extent = extent_end; 4461 out: 4462 btrfs_free_path(path); 4463 return ret; 4464 } 4465 4466 static int maybe_send_hole(struct send_ctx *sctx, struct btrfs_path *path, 4467 struct btrfs_key *key) 4468 { 4469 struct btrfs_file_extent_item *fi; 4470 u64 extent_end; 4471 u8 type; 4472 int ret = 0; 4473 4474 if (sctx->cur_ino != key->objectid || !need_send_hole(sctx)) 4475 return 0; 4476 4477 if (sctx->cur_inode_last_extent == (u64)-1) { 4478 ret = get_last_extent(sctx, key->offset - 1); 4479 if (ret) 4480 return ret; 4481 } 4482 4483 fi = btrfs_item_ptr(path->nodes[0], path->slots[0], 4484 struct btrfs_file_extent_item); 4485 type = btrfs_file_extent_type(path->nodes[0], fi); 4486 if (type == BTRFS_FILE_EXTENT_INLINE) { 4487 u64 size = btrfs_file_extent_inline_len(path->nodes[0], 4488 path->slots[0], fi); 4489 extent_end = ALIGN(key->offset + size, 4490 sctx->send_root->sectorsize); 4491 } else { 4492 extent_end = key->offset + 4493 btrfs_file_extent_num_bytes(path->nodes[0], fi); 4494 } 4495 4496 if (path->slots[0] == 0 && 4497 sctx->cur_inode_last_extent < key->offset) { 4498 /* 4499 * We might have skipped entire leafs that contained only 4500 * file extent items for our current inode. These leafs have 4501 * a generation number smaller (older) than the one in the 4502 * current leaf and the leaf our last extent came from, and 4503 * are located between these 2 leafs. 4504 */ 4505 ret = get_last_extent(sctx, key->offset - 1); 4506 if (ret) 4507 return ret; 4508 } 4509 4510 if (sctx->cur_inode_last_extent < key->offset) 4511 ret = send_hole(sctx, key->offset); 4512 sctx->cur_inode_last_extent = extent_end; 4513 return ret; 4514 } 4515 4516 static int process_extent(struct send_ctx *sctx, 4517 struct btrfs_path *path, 4518 struct btrfs_key *key) 4519 { 4520 struct clone_root *found_clone = NULL; 4521 int ret = 0; 4522 4523 if (S_ISLNK(sctx->cur_inode_mode)) 4524 return 0; 4525 4526 if (sctx->parent_root && !sctx->cur_inode_new) { 4527 ret = is_extent_unchanged(sctx, path, key); 4528 if (ret < 0) 4529 goto out; 4530 if (ret) { 4531 ret = 0; 4532 goto out_hole; 4533 } 4534 } else { 4535 struct btrfs_file_extent_item *ei; 4536 u8 type; 4537 4538 ei = btrfs_item_ptr(path->nodes[0], path->slots[0], 4539 struct btrfs_file_extent_item); 4540 type = btrfs_file_extent_type(path->nodes[0], ei); 4541 if (type == BTRFS_FILE_EXTENT_PREALLOC || 4542 type == BTRFS_FILE_EXTENT_REG) { 4543 /* 4544 * The send spec does not have a prealloc command yet, 4545 * so just leave a hole for prealloc'ed extents until 4546 * we have enough commands queued up to justify rev'ing 4547 * the send spec. 4548 */ 4549 if (type == BTRFS_FILE_EXTENT_PREALLOC) { 4550 ret = 0; 4551 goto out; 4552 } 4553 4554 /* Have a hole, just skip it. */ 4555 if (btrfs_file_extent_disk_bytenr(path->nodes[0], ei) == 0) { 4556 ret = 0; 4557 goto out; 4558 } 4559 } 4560 } 4561 4562 ret = find_extent_clone(sctx, path, key->objectid, key->offset, 4563 sctx->cur_inode_size, &found_clone); 4564 if (ret != -ENOENT && ret < 0) 4565 goto out; 4566 4567 ret = send_write_or_clone(sctx, path, key, found_clone); 4568 if (ret) 4569 goto out; 4570 out_hole: 4571 ret = maybe_send_hole(sctx, path, key); 4572 out: 4573 return ret; 4574 } 4575 4576 static int process_all_extents(struct send_ctx *sctx) 4577 { 4578 int ret; 4579 struct btrfs_root *root; 4580 struct btrfs_path *path; 4581 struct btrfs_key key; 4582 struct btrfs_key found_key; 4583 struct extent_buffer *eb; 4584 int slot; 4585 4586 root = sctx->send_root; 4587 path = alloc_path_for_send(); 4588 if (!path) 4589 return -ENOMEM; 4590 4591 key.objectid = sctx->cmp_key->objectid; 4592 key.type = BTRFS_EXTENT_DATA_KEY; 4593 key.offset = 0; 4594 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4595 if (ret < 0) 4596 goto out; 4597 4598 while (1) { 4599 eb = path->nodes[0]; 4600 slot = path->slots[0]; 4601 4602 if (slot >= btrfs_header_nritems(eb)) { 4603 ret = btrfs_next_leaf(root, path); 4604 if (ret < 0) { 4605 goto out; 4606 } else if (ret > 0) { 4607 ret = 0; 4608 break; 4609 } 4610 continue; 4611 } 4612 4613 btrfs_item_key_to_cpu(eb, &found_key, slot); 4614 4615 if (found_key.objectid != key.objectid || 4616 found_key.type != key.type) { 4617 ret = 0; 4618 goto out; 4619 } 4620 4621 ret = process_extent(sctx, path, &found_key); 4622 if (ret < 0) 4623 goto out; 4624 4625 path->slots[0]++; 4626 } 4627 4628 out: 4629 btrfs_free_path(path); 4630 return ret; 4631 } 4632 4633 static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end, 4634 int *pending_move, 4635 int *refs_processed) 4636 { 4637 int ret = 0; 4638 4639 if (sctx->cur_ino == 0) 4640 goto out; 4641 if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid && 4642 sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY) 4643 goto out; 4644 if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs)) 4645 goto out; 4646 4647 ret = process_recorded_refs(sctx, pending_move); 4648 if (ret < 0) 4649 goto out; 4650 4651 *refs_processed = 1; 4652 out: 4653 return ret; 4654 } 4655 4656 static int finish_inode_if_needed(struct send_ctx *sctx, int at_end) 4657 { 4658 int ret = 0; 4659 u64 left_mode; 4660 u64 left_uid; 4661 u64 left_gid; 4662 u64 right_mode; 4663 u64 right_uid; 4664 u64 right_gid; 4665 int need_chmod = 0; 4666 int need_chown = 0; 4667 int pending_move = 0; 4668 int refs_processed = 0; 4669 4670 ret = process_recorded_refs_if_needed(sctx, at_end, &pending_move, 4671 &refs_processed); 4672 if (ret < 0) 4673 goto out; 4674 4675 /* 4676 * We have processed the refs and thus need to advance send_progress. 4677 * Now, calls to get_cur_xxx will take the updated refs of the current 4678 * inode into account. 4679 * 4680 * On the other hand, if our current inode is a directory and couldn't 4681 * be moved/renamed because its parent was renamed/moved too and it has 4682 * a higher inode number, we can only move/rename our current inode 4683 * after we moved/renamed its parent. Therefore in this case operate on 4684 * the old path (pre move/rename) of our current inode, and the 4685 * move/rename will be performed later. 4686 */ 4687 if (refs_processed && !pending_move) 4688 sctx->send_progress = sctx->cur_ino + 1; 4689 4690 if (sctx->cur_ino == 0 || sctx->cur_inode_deleted) 4691 goto out; 4692 if (!at_end && sctx->cmp_key->objectid == sctx->cur_ino) 4693 goto out; 4694 4695 ret = get_inode_info(sctx->send_root, sctx->cur_ino, NULL, NULL, 4696 &left_mode, &left_uid, &left_gid, NULL); 4697 if (ret < 0) 4698 goto out; 4699 4700 if (!sctx->parent_root || sctx->cur_inode_new) { 4701 need_chown = 1; 4702 if (!S_ISLNK(sctx->cur_inode_mode)) 4703 need_chmod = 1; 4704 } else { 4705 ret = get_inode_info(sctx->parent_root, sctx->cur_ino, 4706 NULL, NULL, &right_mode, &right_uid, 4707 &right_gid, NULL); 4708 if (ret < 0) 4709 goto out; 4710 4711 if (left_uid != right_uid || left_gid != right_gid) 4712 need_chown = 1; 4713 if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode) 4714 need_chmod = 1; 4715 } 4716 4717 if (S_ISREG(sctx->cur_inode_mode)) { 4718 if (need_send_hole(sctx)) { 4719 if (sctx->cur_inode_last_extent == (u64)-1) { 4720 ret = get_last_extent(sctx, (u64)-1); 4721 if (ret) 4722 goto out; 4723 } 4724 if (sctx->cur_inode_last_extent < 4725 sctx->cur_inode_size) { 4726 ret = send_hole(sctx, sctx->cur_inode_size); 4727 if (ret) 4728 goto out; 4729 } 4730 } 4731 ret = send_truncate(sctx, sctx->cur_ino, sctx->cur_inode_gen, 4732 sctx->cur_inode_size); 4733 if (ret < 0) 4734 goto out; 4735 } 4736 4737 if (need_chown) { 4738 ret = send_chown(sctx, sctx->cur_ino, sctx->cur_inode_gen, 4739 left_uid, left_gid); 4740 if (ret < 0) 4741 goto out; 4742 } 4743 if (need_chmod) { 4744 ret = send_chmod(sctx, sctx->cur_ino, sctx->cur_inode_gen, 4745 left_mode); 4746 if (ret < 0) 4747 goto out; 4748 } 4749 4750 /* 4751 * If other directory inodes depended on our current directory 4752 * inode's move/rename, now do their move/rename operations. 4753 */ 4754 if (!is_waiting_for_move(sctx, sctx->cur_ino)) { 4755 ret = apply_children_dir_moves(sctx); 4756 if (ret) 4757 goto out; 4758 } 4759 4760 /* 4761 * Need to send that every time, no matter if it actually 4762 * changed between the two trees as we have done changes to 4763 * the inode before. 4764 */ 4765 sctx->send_progress = sctx->cur_ino + 1; 4766 ret = send_utimes(sctx, sctx->cur_ino, sctx->cur_inode_gen); 4767 if (ret < 0) 4768 goto out; 4769 4770 out: 4771 return ret; 4772 } 4773 4774 static int changed_inode(struct send_ctx *sctx, 4775 enum btrfs_compare_tree_result result) 4776 { 4777 int ret = 0; 4778 struct btrfs_key *key = sctx->cmp_key; 4779 struct btrfs_inode_item *left_ii = NULL; 4780 struct btrfs_inode_item *right_ii = NULL; 4781 u64 left_gen = 0; 4782 u64 right_gen = 0; 4783 4784 sctx->cur_ino = key->objectid; 4785 sctx->cur_inode_new_gen = 0; 4786 sctx->cur_inode_last_extent = (u64)-1; 4787 4788 /* 4789 * Set send_progress to current inode. This will tell all get_cur_xxx 4790 * functions that the current inode's refs are not updated yet. Later, 4791 * when process_recorded_refs is finished, it is set to cur_ino + 1. 4792 */ 4793 sctx->send_progress = sctx->cur_ino; 4794 4795 if (result == BTRFS_COMPARE_TREE_NEW || 4796 result == BTRFS_COMPARE_TREE_CHANGED) { 4797 left_ii = btrfs_item_ptr(sctx->left_path->nodes[0], 4798 sctx->left_path->slots[0], 4799 struct btrfs_inode_item); 4800 left_gen = btrfs_inode_generation(sctx->left_path->nodes[0], 4801 left_ii); 4802 } else { 4803 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], 4804 sctx->right_path->slots[0], 4805 struct btrfs_inode_item); 4806 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], 4807 right_ii); 4808 } 4809 if (result == BTRFS_COMPARE_TREE_CHANGED) { 4810 right_ii = btrfs_item_ptr(sctx->right_path->nodes[0], 4811 sctx->right_path->slots[0], 4812 struct btrfs_inode_item); 4813 4814 right_gen = btrfs_inode_generation(sctx->right_path->nodes[0], 4815 right_ii); 4816 4817 /* 4818 * The cur_ino = root dir case is special here. We can't treat 4819 * the inode as deleted+reused because it would generate a 4820 * stream that tries to delete/mkdir the root dir. 4821 */ 4822 if (left_gen != right_gen && 4823 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) 4824 sctx->cur_inode_new_gen = 1; 4825 } 4826 4827 if (result == BTRFS_COMPARE_TREE_NEW) { 4828 sctx->cur_inode_gen = left_gen; 4829 sctx->cur_inode_new = 1; 4830 sctx->cur_inode_deleted = 0; 4831 sctx->cur_inode_size = btrfs_inode_size( 4832 sctx->left_path->nodes[0], left_ii); 4833 sctx->cur_inode_mode = btrfs_inode_mode( 4834 sctx->left_path->nodes[0], left_ii); 4835 if (sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) 4836 ret = send_create_inode_if_needed(sctx); 4837 } else if (result == BTRFS_COMPARE_TREE_DELETED) { 4838 sctx->cur_inode_gen = right_gen; 4839 sctx->cur_inode_new = 0; 4840 sctx->cur_inode_deleted = 1; 4841 sctx->cur_inode_size = btrfs_inode_size( 4842 sctx->right_path->nodes[0], right_ii); 4843 sctx->cur_inode_mode = btrfs_inode_mode( 4844 sctx->right_path->nodes[0], right_ii); 4845 } else if (result == BTRFS_COMPARE_TREE_CHANGED) { 4846 /* 4847 * We need to do some special handling in case the inode was 4848 * reported as changed with a changed generation number. This 4849 * means that the original inode was deleted and new inode 4850 * reused the same inum. So we have to treat the old inode as 4851 * deleted and the new one as new. 4852 */ 4853 if (sctx->cur_inode_new_gen) { 4854 /* 4855 * First, process the inode as if it was deleted. 4856 */ 4857 sctx->cur_inode_gen = right_gen; 4858 sctx->cur_inode_new = 0; 4859 sctx->cur_inode_deleted = 1; 4860 sctx->cur_inode_size = btrfs_inode_size( 4861 sctx->right_path->nodes[0], right_ii); 4862 sctx->cur_inode_mode = btrfs_inode_mode( 4863 sctx->right_path->nodes[0], right_ii); 4864 ret = process_all_refs(sctx, 4865 BTRFS_COMPARE_TREE_DELETED); 4866 if (ret < 0) 4867 goto out; 4868 4869 /* 4870 * Now process the inode as if it was new. 4871 */ 4872 sctx->cur_inode_gen = left_gen; 4873 sctx->cur_inode_new = 1; 4874 sctx->cur_inode_deleted = 0; 4875 sctx->cur_inode_size = btrfs_inode_size( 4876 sctx->left_path->nodes[0], left_ii); 4877 sctx->cur_inode_mode = btrfs_inode_mode( 4878 sctx->left_path->nodes[0], left_ii); 4879 ret = send_create_inode_if_needed(sctx); 4880 if (ret < 0) 4881 goto out; 4882 4883 ret = process_all_refs(sctx, BTRFS_COMPARE_TREE_NEW); 4884 if (ret < 0) 4885 goto out; 4886 /* 4887 * Advance send_progress now as we did not get into 4888 * process_recorded_refs_if_needed in the new_gen case. 4889 */ 4890 sctx->send_progress = sctx->cur_ino + 1; 4891 4892 /* 4893 * Now process all extents and xattrs of the inode as if 4894 * they were all new. 4895 */ 4896 ret = process_all_extents(sctx); 4897 if (ret < 0) 4898 goto out; 4899 ret = process_all_new_xattrs(sctx); 4900 if (ret < 0) 4901 goto out; 4902 } else { 4903 sctx->cur_inode_gen = left_gen; 4904 sctx->cur_inode_new = 0; 4905 sctx->cur_inode_new_gen = 0; 4906 sctx->cur_inode_deleted = 0; 4907 sctx->cur_inode_size = btrfs_inode_size( 4908 sctx->left_path->nodes[0], left_ii); 4909 sctx->cur_inode_mode = btrfs_inode_mode( 4910 sctx->left_path->nodes[0], left_ii); 4911 } 4912 } 4913 4914 out: 4915 return ret; 4916 } 4917 4918 /* 4919 * We have to process new refs before deleted refs, but compare_trees gives us 4920 * the new and deleted refs mixed. To fix this, we record the new/deleted refs 4921 * first and later process them in process_recorded_refs. 4922 * For the cur_inode_new_gen case, we skip recording completely because 4923 * changed_inode did already initiate processing of refs. The reason for this is 4924 * that in this case, compare_tree actually compares the refs of 2 different 4925 * inodes. To fix this, process_all_refs is used in changed_inode to handle all 4926 * refs of the right tree as deleted and all refs of the left tree as new. 4927 */ 4928 static int changed_ref(struct send_ctx *sctx, 4929 enum btrfs_compare_tree_result result) 4930 { 4931 int ret = 0; 4932 4933 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); 4934 4935 if (!sctx->cur_inode_new_gen && 4936 sctx->cur_ino != BTRFS_FIRST_FREE_OBJECTID) { 4937 if (result == BTRFS_COMPARE_TREE_NEW) 4938 ret = record_new_ref(sctx); 4939 else if (result == BTRFS_COMPARE_TREE_DELETED) 4940 ret = record_deleted_ref(sctx); 4941 else if (result == BTRFS_COMPARE_TREE_CHANGED) 4942 ret = record_changed_ref(sctx); 4943 } 4944 4945 return ret; 4946 } 4947 4948 /* 4949 * Process new/deleted/changed xattrs. We skip processing in the 4950 * cur_inode_new_gen case because changed_inode did already initiate processing 4951 * of xattrs. The reason is the same as in changed_ref 4952 */ 4953 static int changed_xattr(struct send_ctx *sctx, 4954 enum btrfs_compare_tree_result result) 4955 { 4956 int ret = 0; 4957 4958 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); 4959 4960 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 4961 if (result == BTRFS_COMPARE_TREE_NEW) 4962 ret = process_new_xattr(sctx); 4963 else if (result == BTRFS_COMPARE_TREE_DELETED) 4964 ret = process_deleted_xattr(sctx); 4965 else if (result == BTRFS_COMPARE_TREE_CHANGED) 4966 ret = process_changed_xattr(sctx); 4967 } 4968 4969 return ret; 4970 } 4971 4972 /* 4973 * Process new/deleted/changed extents. We skip processing in the 4974 * cur_inode_new_gen case because changed_inode did already initiate processing 4975 * of extents. The reason is the same as in changed_ref 4976 */ 4977 static int changed_extent(struct send_ctx *sctx, 4978 enum btrfs_compare_tree_result result) 4979 { 4980 int ret = 0; 4981 4982 BUG_ON(sctx->cur_ino != sctx->cmp_key->objectid); 4983 4984 if (!sctx->cur_inode_new_gen && !sctx->cur_inode_deleted) { 4985 if (result != BTRFS_COMPARE_TREE_DELETED) 4986 ret = process_extent(sctx, sctx->left_path, 4987 sctx->cmp_key); 4988 } 4989 4990 return ret; 4991 } 4992 4993 static int dir_changed(struct send_ctx *sctx, u64 dir) 4994 { 4995 u64 orig_gen, new_gen; 4996 int ret; 4997 4998 ret = get_inode_info(sctx->send_root, dir, NULL, &new_gen, NULL, NULL, 4999 NULL, NULL); 5000 if (ret) 5001 return ret; 5002 5003 ret = get_inode_info(sctx->parent_root, dir, NULL, &orig_gen, NULL, 5004 NULL, NULL, NULL); 5005 if (ret) 5006 return ret; 5007 5008 return (orig_gen != new_gen) ? 1 : 0; 5009 } 5010 5011 static int compare_refs(struct send_ctx *sctx, struct btrfs_path *path, 5012 struct btrfs_key *key) 5013 { 5014 struct btrfs_inode_extref *extref; 5015 struct extent_buffer *leaf; 5016 u64 dirid = 0, last_dirid = 0; 5017 unsigned long ptr; 5018 u32 item_size; 5019 u32 cur_offset = 0; 5020 int ref_name_len; 5021 int ret = 0; 5022 5023 /* Easy case, just check this one dirid */ 5024 if (key->type == BTRFS_INODE_REF_KEY) { 5025 dirid = key->offset; 5026 5027 ret = dir_changed(sctx, dirid); 5028 goto out; 5029 } 5030 5031 leaf = path->nodes[0]; 5032 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 5033 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 5034 while (cur_offset < item_size) { 5035 extref = (struct btrfs_inode_extref *)(ptr + 5036 cur_offset); 5037 dirid = btrfs_inode_extref_parent(leaf, extref); 5038 ref_name_len = btrfs_inode_extref_name_len(leaf, extref); 5039 cur_offset += ref_name_len + sizeof(*extref); 5040 if (dirid == last_dirid) 5041 continue; 5042 ret = dir_changed(sctx, dirid); 5043 if (ret) 5044 break; 5045 last_dirid = dirid; 5046 } 5047 out: 5048 return ret; 5049 } 5050 5051 /* 5052 * Updates compare related fields in sctx and simply forwards to the actual 5053 * changed_xxx functions. 5054 */ 5055 static int changed_cb(struct btrfs_root *left_root, 5056 struct btrfs_root *right_root, 5057 struct btrfs_path *left_path, 5058 struct btrfs_path *right_path, 5059 struct btrfs_key *key, 5060 enum btrfs_compare_tree_result result, 5061 void *ctx) 5062 { 5063 int ret = 0; 5064 struct send_ctx *sctx = ctx; 5065 5066 if (result == BTRFS_COMPARE_TREE_SAME) { 5067 if (key->type == BTRFS_INODE_REF_KEY || 5068 key->type == BTRFS_INODE_EXTREF_KEY) { 5069 ret = compare_refs(sctx, left_path, key); 5070 if (!ret) 5071 return 0; 5072 if (ret < 0) 5073 return ret; 5074 } else if (key->type == BTRFS_EXTENT_DATA_KEY) { 5075 return maybe_send_hole(sctx, left_path, key); 5076 } else { 5077 return 0; 5078 } 5079 result = BTRFS_COMPARE_TREE_CHANGED; 5080 ret = 0; 5081 } 5082 5083 sctx->left_path = left_path; 5084 sctx->right_path = right_path; 5085 sctx->cmp_key = key; 5086 5087 ret = finish_inode_if_needed(sctx, 0); 5088 if (ret < 0) 5089 goto out; 5090 5091 /* Ignore non-FS objects */ 5092 if (key->objectid == BTRFS_FREE_INO_OBJECTID || 5093 key->objectid == BTRFS_FREE_SPACE_OBJECTID) 5094 goto out; 5095 5096 if (key->type == BTRFS_INODE_ITEM_KEY) 5097 ret = changed_inode(sctx, result); 5098 else if (key->type == BTRFS_INODE_REF_KEY || 5099 key->type == BTRFS_INODE_EXTREF_KEY) 5100 ret = changed_ref(sctx, result); 5101 else if (key->type == BTRFS_XATTR_ITEM_KEY) 5102 ret = changed_xattr(sctx, result); 5103 else if (key->type == BTRFS_EXTENT_DATA_KEY) 5104 ret = changed_extent(sctx, result); 5105 5106 out: 5107 return ret; 5108 } 5109 5110 static int full_send_tree(struct send_ctx *sctx) 5111 { 5112 int ret; 5113 struct btrfs_root *send_root = sctx->send_root; 5114 struct btrfs_key key; 5115 struct btrfs_key found_key; 5116 struct btrfs_path *path; 5117 struct extent_buffer *eb; 5118 int slot; 5119 u64 start_ctransid; 5120 u64 ctransid; 5121 5122 path = alloc_path_for_send(); 5123 if (!path) 5124 return -ENOMEM; 5125 5126 spin_lock(&send_root->root_item_lock); 5127 start_ctransid = btrfs_root_ctransid(&send_root->root_item); 5128 spin_unlock(&send_root->root_item_lock); 5129 5130 key.objectid = BTRFS_FIRST_FREE_OBJECTID; 5131 key.type = BTRFS_INODE_ITEM_KEY; 5132 key.offset = 0; 5133 5134 /* 5135 * Make sure the tree has not changed after re-joining. We detect this 5136 * by comparing start_ctransid and ctransid. They should always match. 5137 */ 5138 spin_lock(&send_root->root_item_lock); 5139 ctransid = btrfs_root_ctransid(&send_root->root_item); 5140 spin_unlock(&send_root->root_item_lock); 5141 5142 if (ctransid != start_ctransid) { 5143 WARN(1, KERN_WARNING "BTRFS: the root that you're trying to " 5144 "send was modified in between. This is " 5145 "probably a bug.\n"); 5146 ret = -EIO; 5147 goto out; 5148 } 5149 5150 ret = btrfs_search_slot_for_read(send_root, &key, path, 1, 0); 5151 if (ret < 0) 5152 goto out; 5153 if (ret) 5154 goto out_finish; 5155 5156 while (1) { 5157 eb = path->nodes[0]; 5158 slot = path->slots[0]; 5159 btrfs_item_key_to_cpu(eb, &found_key, slot); 5160 5161 ret = changed_cb(send_root, NULL, path, NULL, 5162 &found_key, BTRFS_COMPARE_TREE_NEW, sctx); 5163 if (ret < 0) 5164 goto out; 5165 5166 key.objectid = found_key.objectid; 5167 key.type = found_key.type; 5168 key.offset = found_key.offset + 1; 5169 5170 ret = btrfs_next_item(send_root, path); 5171 if (ret < 0) 5172 goto out; 5173 if (ret) { 5174 ret = 0; 5175 break; 5176 } 5177 } 5178 5179 out_finish: 5180 ret = finish_inode_if_needed(sctx, 1); 5181 5182 out: 5183 btrfs_free_path(path); 5184 return ret; 5185 } 5186 5187 static int send_subvol(struct send_ctx *sctx) 5188 { 5189 int ret; 5190 5191 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_STREAM_HEADER)) { 5192 ret = send_header(sctx); 5193 if (ret < 0) 5194 goto out; 5195 } 5196 5197 ret = send_subvol_begin(sctx); 5198 if (ret < 0) 5199 goto out; 5200 5201 if (sctx->parent_root) { 5202 ret = btrfs_compare_trees(sctx->send_root, sctx->parent_root, 5203 changed_cb, sctx); 5204 if (ret < 0) 5205 goto out; 5206 ret = finish_inode_if_needed(sctx, 1); 5207 if (ret < 0) 5208 goto out; 5209 } else { 5210 ret = full_send_tree(sctx); 5211 if (ret < 0) 5212 goto out; 5213 } 5214 5215 out: 5216 free_recorded_refs(sctx); 5217 return ret; 5218 } 5219 5220 static void btrfs_root_dec_send_in_progress(struct btrfs_root* root) 5221 { 5222 spin_lock(&root->root_item_lock); 5223 root->send_in_progress--; 5224 /* 5225 * Not much left to do, we don't know why it's unbalanced and 5226 * can't blindly reset it to 0. 5227 */ 5228 if (root->send_in_progress < 0) 5229 btrfs_err(root->fs_info, 5230 "send_in_progres unbalanced %d root %llu\n", 5231 root->send_in_progress, root->root_key.objectid); 5232 spin_unlock(&root->root_item_lock); 5233 } 5234 5235 long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_) 5236 { 5237 int ret = 0; 5238 struct btrfs_root *send_root; 5239 struct btrfs_root *clone_root; 5240 struct btrfs_fs_info *fs_info; 5241 struct btrfs_ioctl_send_args *arg = NULL; 5242 struct btrfs_key key; 5243 struct send_ctx *sctx = NULL; 5244 u32 i; 5245 u64 *clone_sources_tmp = NULL; 5246 int clone_sources_to_rollback = 0; 5247 int sort_clone_roots = 0; 5248 int index; 5249 5250 if (!capable(CAP_SYS_ADMIN)) 5251 return -EPERM; 5252 5253 send_root = BTRFS_I(file_inode(mnt_file))->root; 5254 fs_info = send_root->fs_info; 5255 5256 /* 5257 * The subvolume must remain read-only during send, protect against 5258 * making it RW. 5259 */ 5260 spin_lock(&send_root->root_item_lock); 5261 send_root->send_in_progress++; 5262 spin_unlock(&send_root->root_item_lock); 5263 5264 /* 5265 * This is done when we lookup the root, it should already be complete 5266 * by the time we get here. 5267 */ 5268 WARN_ON(send_root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE); 5269 5270 /* 5271 * Userspace tools do the checks and warn the user if it's 5272 * not RO. 5273 */ 5274 if (!btrfs_root_readonly(send_root)) { 5275 ret = -EPERM; 5276 goto out; 5277 } 5278 5279 arg = memdup_user(arg_, sizeof(*arg)); 5280 if (IS_ERR(arg)) { 5281 ret = PTR_ERR(arg); 5282 arg = NULL; 5283 goto out; 5284 } 5285 5286 if (!access_ok(VERIFY_READ, arg->clone_sources, 5287 sizeof(*arg->clone_sources) * 5288 arg->clone_sources_count)) { 5289 ret = -EFAULT; 5290 goto out; 5291 } 5292 5293 if (arg->flags & ~BTRFS_SEND_FLAG_MASK) { 5294 ret = -EINVAL; 5295 goto out; 5296 } 5297 5298 sctx = kzalloc(sizeof(struct send_ctx), GFP_NOFS); 5299 if (!sctx) { 5300 ret = -ENOMEM; 5301 goto out; 5302 } 5303 5304 INIT_LIST_HEAD(&sctx->new_refs); 5305 INIT_LIST_HEAD(&sctx->deleted_refs); 5306 INIT_RADIX_TREE(&sctx->name_cache, GFP_NOFS); 5307 INIT_LIST_HEAD(&sctx->name_cache_list); 5308 5309 sctx->flags = arg->flags; 5310 5311 sctx->send_filp = fget(arg->send_fd); 5312 if (!sctx->send_filp) { 5313 ret = -EBADF; 5314 goto out; 5315 } 5316 5317 sctx->send_root = send_root; 5318 sctx->clone_roots_cnt = arg->clone_sources_count; 5319 5320 sctx->send_max_size = BTRFS_SEND_BUF_SIZE; 5321 sctx->send_buf = vmalloc(sctx->send_max_size); 5322 if (!sctx->send_buf) { 5323 ret = -ENOMEM; 5324 goto out; 5325 } 5326 5327 sctx->read_buf = vmalloc(BTRFS_SEND_READ_SIZE); 5328 if (!sctx->read_buf) { 5329 ret = -ENOMEM; 5330 goto out; 5331 } 5332 5333 sctx->pending_dir_moves = RB_ROOT; 5334 sctx->waiting_dir_moves = RB_ROOT; 5335 5336 sctx->clone_roots = vzalloc(sizeof(struct clone_root) * 5337 (arg->clone_sources_count + 1)); 5338 if (!sctx->clone_roots) { 5339 ret = -ENOMEM; 5340 goto out; 5341 } 5342 5343 if (arg->clone_sources_count) { 5344 clone_sources_tmp = vmalloc(arg->clone_sources_count * 5345 sizeof(*arg->clone_sources)); 5346 if (!clone_sources_tmp) { 5347 ret = -ENOMEM; 5348 goto out; 5349 } 5350 5351 ret = copy_from_user(clone_sources_tmp, arg->clone_sources, 5352 arg->clone_sources_count * 5353 sizeof(*arg->clone_sources)); 5354 if (ret) { 5355 ret = -EFAULT; 5356 goto out; 5357 } 5358 5359 for (i = 0; i < arg->clone_sources_count; i++) { 5360 key.objectid = clone_sources_tmp[i]; 5361 key.type = BTRFS_ROOT_ITEM_KEY; 5362 key.offset = (u64)-1; 5363 5364 index = srcu_read_lock(&fs_info->subvol_srcu); 5365 5366 clone_root = btrfs_read_fs_root_no_name(fs_info, &key); 5367 if (IS_ERR(clone_root)) { 5368 srcu_read_unlock(&fs_info->subvol_srcu, index); 5369 ret = PTR_ERR(clone_root); 5370 goto out; 5371 } 5372 clone_sources_to_rollback = i + 1; 5373 spin_lock(&clone_root->root_item_lock); 5374 clone_root->send_in_progress++; 5375 if (!btrfs_root_readonly(clone_root)) { 5376 spin_unlock(&clone_root->root_item_lock); 5377 srcu_read_unlock(&fs_info->subvol_srcu, index); 5378 ret = -EPERM; 5379 goto out; 5380 } 5381 spin_unlock(&clone_root->root_item_lock); 5382 srcu_read_unlock(&fs_info->subvol_srcu, index); 5383 5384 sctx->clone_roots[i].root = clone_root; 5385 } 5386 vfree(clone_sources_tmp); 5387 clone_sources_tmp = NULL; 5388 } 5389 5390 if (arg->parent_root) { 5391 key.objectid = arg->parent_root; 5392 key.type = BTRFS_ROOT_ITEM_KEY; 5393 key.offset = (u64)-1; 5394 5395 index = srcu_read_lock(&fs_info->subvol_srcu); 5396 5397 sctx->parent_root = btrfs_read_fs_root_no_name(fs_info, &key); 5398 if (IS_ERR(sctx->parent_root)) { 5399 srcu_read_unlock(&fs_info->subvol_srcu, index); 5400 ret = PTR_ERR(sctx->parent_root); 5401 goto out; 5402 } 5403 5404 spin_lock(&sctx->parent_root->root_item_lock); 5405 sctx->parent_root->send_in_progress++; 5406 if (!btrfs_root_readonly(sctx->parent_root)) { 5407 spin_unlock(&sctx->parent_root->root_item_lock); 5408 srcu_read_unlock(&fs_info->subvol_srcu, index); 5409 ret = -EPERM; 5410 goto out; 5411 } 5412 spin_unlock(&sctx->parent_root->root_item_lock); 5413 5414 srcu_read_unlock(&fs_info->subvol_srcu, index); 5415 } 5416 5417 /* 5418 * Clones from send_root are allowed, but only if the clone source 5419 * is behind the current send position. This is checked while searching 5420 * for possible clone sources. 5421 */ 5422 sctx->clone_roots[sctx->clone_roots_cnt++].root = sctx->send_root; 5423 5424 /* We do a bsearch later */ 5425 sort(sctx->clone_roots, sctx->clone_roots_cnt, 5426 sizeof(*sctx->clone_roots), __clone_root_cmp_sort, 5427 NULL); 5428 sort_clone_roots = 1; 5429 5430 ret = send_subvol(sctx); 5431 if (ret < 0) 5432 goto out; 5433 5434 if (!(sctx->flags & BTRFS_SEND_FLAG_OMIT_END_CMD)) { 5435 ret = begin_cmd(sctx, BTRFS_SEND_C_END); 5436 if (ret < 0) 5437 goto out; 5438 ret = send_cmd(sctx); 5439 if (ret < 0) 5440 goto out; 5441 } 5442 5443 out: 5444 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)); 5445 while (sctx && !RB_EMPTY_ROOT(&sctx->pending_dir_moves)) { 5446 struct rb_node *n; 5447 struct pending_dir_move *pm; 5448 5449 n = rb_first(&sctx->pending_dir_moves); 5450 pm = rb_entry(n, struct pending_dir_move, node); 5451 while (!list_empty(&pm->list)) { 5452 struct pending_dir_move *pm2; 5453 5454 pm2 = list_first_entry(&pm->list, 5455 struct pending_dir_move, list); 5456 free_pending_move(sctx, pm2); 5457 } 5458 free_pending_move(sctx, pm); 5459 } 5460 5461 WARN_ON(sctx && !ret && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)); 5462 while (sctx && !RB_EMPTY_ROOT(&sctx->waiting_dir_moves)) { 5463 struct rb_node *n; 5464 struct waiting_dir_move *dm; 5465 5466 n = rb_first(&sctx->waiting_dir_moves); 5467 dm = rb_entry(n, struct waiting_dir_move, node); 5468 rb_erase(&dm->node, &sctx->waiting_dir_moves); 5469 kfree(dm); 5470 } 5471 5472 if (sort_clone_roots) { 5473 for (i = 0; i < sctx->clone_roots_cnt; i++) 5474 btrfs_root_dec_send_in_progress( 5475 sctx->clone_roots[i].root); 5476 } else { 5477 for (i = 0; sctx && i < clone_sources_to_rollback; i++) 5478 btrfs_root_dec_send_in_progress( 5479 sctx->clone_roots[i].root); 5480 5481 btrfs_root_dec_send_in_progress(send_root); 5482 } 5483 if (sctx && !IS_ERR_OR_NULL(sctx->parent_root)) 5484 btrfs_root_dec_send_in_progress(sctx->parent_root); 5485 5486 kfree(arg); 5487 vfree(clone_sources_tmp); 5488 5489 if (sctx) { 5490 if (sctx->send_filp) 5491 fput(sctx->send_filp); 5492 5493 vfree(sctx->clone_roots); 5494 vfree(sctx->send_buf); 5495 vfree(sctx->read_buf); 5496 5497 name_cache_free(sctx); 5498 5499 kfree(sctx); 5500 } 5501 5502 return ret; 5503 } 5504