1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include <linux/blkdev.h> 22 #include <linux/list_sort.h> 23 #include "ctree.h" 24 #include "transaction.h" 25 #include "disk-io.h" 26 #include "locking.h" 27 #include "print-tree.h" 28 #include "backref.h" 29 #include "tree-log.h" 30 #include "hash.h" 31 32 /* magic values for the inode_only field in btrfs_log_inode: 33 * 34 * LOG_INODE_ALL means to log everything 35 * LOG_INODE_EXISTS means to log just enough to recreate the inode 36 * during log replay 37 */ 38 #define LOG_INODE_ALL 0 39 #define LOG_INODE_EXISTS 1 40 41 /* 42 * directory trouble cases 43 * 44 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync 45 * log, we must force a full commit before doing an fsync of the directory 46 * where the unlink was done. 47 * ---> record transid of last unlink/rename per directory 48 * 49 * mkdir foo/some_dir 50 * normal commit 51 * rename foo/some_dir foo2/some_dir 52 * mkdir foo/some_dir 53 * fsync foo/some_dir/some_file 54 * 55 * The fsync above will unlink the original some_dir without recording 56 * it in its new location (foo2). After a crash, some_dir will be gone 57 * unless the fsync of some_file forces a full commit 58 * 59 * 2) we must log any new names for any file or dir that is in the fsync 60 * log. ---> check inode while renaming/linking. 61 * 62 * 2a) we must log any new names for any file or dir during rename 63 * when the directory they are being removed from was logged. 64 * ---> check inode and old parent dir during rename 65 * 66 * 2a is actually the more important variant. With the extra logging 67 * a crash might unlink the old name without recreating the new one 68 * 69 * 3) after a crash, we must go through any directories with a link count 70 * of zero and redo the rm -rf 71 * 72 * mkdir f1/foo 73 * normal commit 74 * rm -rf f1/foo 75 * fsync(f1) 76 * 77 * The directory f1 was fully removed from the FS, but fsync was never 78 * called on f1, only its parent dir. After a crash the rm -rf must 79 * be replayed. This must be able to recurse down the entire 80 * directory tree. The inode link count fixup code takes care of the 81 * ugly details. 82 */ 83 84 /* 85 * stages for the tree walking. The first 86 * stage (0) is to only pin down the blocks we find 87 * the second stage (1) is to make sure that all the inodes 88 * we find in the log are created in the subvolume. 89 * 90 * The last stage is to deal with directories and links and extents 91 * and all the other fun semantics 92 */ 93 #define LOG_WALK_PIN_ONLY 0 94 #define LOG_WALK_REPLAY_INODES 1 95 #define LOG_WALK_REPLAY_DIR_INDEX 2 96 #define LOG_WALK_REPLAY_ALL 3 97 98 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 99 struct btrfs_root *root, struct inode *inode, 100 int inode_only); 101 static int link_to_fixup_dir(struct btrfs_trans_handle *trans, 102 struct btrfs_root *root, 103 struct btrfs_path *path, u64 objectid); 104 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 105 struct btrfs_root *root, 106 struct btrfs_root *log, 107 struct btrfs_path *path, 108 u64 dirid, int del_all); 109 110 /* 111 * tree logging is a special write ahead log used to make sure that 112 * fsyncs and O_SYNCs can happen without doing full tree commits. 113 * 114 * Full tree commits are expensive because they require commonly 115 * modified blocks to be recowed, creating many dirty pages in the 116 * extent tree an 4x-6x higher write load than ext3. 117 * 118 * Instead of doing a tree commit on every fsync, we use the 119 * key ranges and transaction ids to find items for a given file or directory 120 * that have changed in this transaction. Those items are copied into 121 * a special tree (one per subvolume root), that tree is written to disk 122 * and then the fsync is considered complete. 123 * 124 * After a crash, items are copied out of the log-tree back into the 125 * subvolume tree. Any file data extents found are recorded in the extent 126 * allocation tree, and the log-tree freed. 127 * 128 * The log tree is read three times, once to pin down all the extents it is 129 * using in ram and once, once to create all the inodes logged in the tree 130 * and once to do all the other items. 131 */ 132 133 /* 134 * start a sub transaction and setup the log tree 135 * this increments the log tree writer count to make the people 136 * syncing the tree wait for us to finish 137 */ 138 static int start_log_trans(struct btrfs_trans_handle *trans, 139 struct btrfs_root *root) 140 { 141 int ret; 142 int err = 0; 143 144 mutex_lock(&root->log_mutex); 145 if (root->log_root) { 146 if (!root->log_start_pid) { 147 root->log_start_pid = current->pid; 148 root->log_multiple_pids = false; 149 } else if (root->log_start_pid != current->pid) { 150 root->log_multiple_pids = true; 151 } 152 153 atomic_inc(&root->log_batch); 154 atomic_inc(&root->log_writers); 155 mutex_unlock(&root->log_mutex); 156 return 0; 157 } 158 root->log_multiple_pids = false; 159 root->log_start_pid = current->pid; 160 mutex_lock(&root->fs_info->tree_log_mutex); 161 if (!root->fs_info->log_root_tree) { 162 ret = btrfs_init_log_root_tree(trans, root->fs_info); 163 if (ret) 164 err = ret; 165 } 166 if (err == 0 && !root->log_root) { 167 ret = btrfs_add_log_tree(trans, root); 168 if (ret) 169 err = ret; 170 } 171 mutex_unlock(&root->fs_info->tree_log_mutex); 172 atomic_inc(&root->log_batch); 173 atomic_inc(&root->log_writers); 174 mutex_unlock(&root->log_mutex); 175 return err; 176 } 177 178 /* 179 * returns 0 if there was a log transaction running and we were able 180 * to join, or returns -ENOENT if there were not transactions 181 * in progress 182 */ 183 static int join_running_log_trans(struct btrfs_root *root) 184 { 185 int ret = -ENOENT; 186 187 smp_mb(); 188 if (!root->log_root) 189 return -ENOENT; 190 191 mutex_lock(&root->log_mutex); 192 if (root->log_root) { 193 ret = 0; 194 atomic_inc(&root->log_writers); 195 } 196 mutex_unlock(&root->log_mutex); 197 return ret; 198 } 199 200 /* 201 * This either makes the current running log transaction wait 202 * until you call btrfs_end_log_trans() or it makes any future 203 * log transactions wait until you call btrfs_end_log_trans() 204 */ 205 int btrfs_pin_log_trans(struct btrfs_root *root) 206 { 207 int ret = -ENOENT; 208 209 mutex_lock(&root->log_mutex); 210 atomic_inc(&root->log_writers); 211 mutex_unlock(&root->log_mutex); 212 return ret; 213 } 214 215 /* 216 * indicate we're done making changes to the log tree 217 * and wake up anyone waiting to do a sync 218 */ 219 void btrfs_end_log_trans(struct btrfs_root *root) 220 { 221 if (atomic_dec_and_test(&root->log_writers)) { 222 smp_mb(); 223 if (waitqueue_active(&root->log_writer_wait)) 224 wake_up(&root->log_writer_wait); 225 } 226 } 227 228 229 /* 230 * the walk control struct is used to pass state down the chain when 231 * processing the log tree. The stage field tells us which part 232 * of the log tree processing we are currently doing. The others 233 * are state fields used for that specific part 234 */ 235 struct walk_control { 236 /* should we free the extent on disk when done? This is used 237 * at transaction commit time while freeing a log tree 238 */ 239 int free; 240 241 /* should we write out the extent buffer? This is used 242 * while flushing the log tree to disk during a sync 243 */ 244 int write; 245 246 /* should we wait for the extent buffer io to finish? Also used 247 * while flushing the log tree to disk for a sync 248 */ 249 int wait; 250 251 /* pin only walk, we record which extents on disk belong to the 252 * log trees 253 */ 254 int pin; 255 256 /* what stage of the replay code we're currently in */ 257 int stage; 258 259 /* the root we are currently replaying */ 260 struct btrfs_root *replay_dest; 261 262 /* the trans handle for the current replay */ 263 struct btrfs_trans_handle *trans; 264 265 /* the function that gets used to process blocks we find in the 266 * tree. Note the extent_buffer might not be up to date when it is 267 * passed in, and it must be checked or read if you need the data 268 * inside it 269 */ 270 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, 271 struct walk_control *wc, u64 gen); 272 }; 273 274 /* 275 * process_func used to pin down extents, write them or wait on them 276 */ 277 static int process_one_buffer(struct btrfs_root *log, 278 struct extent_buffer *eb, 279 struct walk_control *wc, u64 gen) 280 { 281 int ret = 0; 282 283 /* 284 * If this fs is mixed then we need to be able to process the leaves to 285 * pin down any logged extents, so we have to read the block. 286 */ 287 if (btrfs_fs_incompat(log->fs_info, MIXED_GROUPS)) { 288 ret = btrfs_read_buffer(eb, gen); 289 if (ret) 290 return ret; 291 } 292 293 if (wc->pin) 294 ret = btrfs_pin_extent_for_log_replay(log->fs_info->extent_root, 295 eb->start, eb->len); 296 297 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { 298 if (wc->pin && btrfs_header_level(eb) == 0) 299 ret = btrfs_exclude_logged_extents(log, eb); 300 if (wc->write) 301 btrfs_write_tree_block(eb); 302 if (wc->wait) 303 btrfs_wait_tree_block_writeback(eb); 304 } 305 return ret; 306 } 307 308 /* 309 * Item overwrite used by replay and tree logging. eb, slot and key all refer 310 * to the src data we are copying out. 311 * 312 * root is the tree we are copying into, and path is a scratch 313 * path for use in this function (it should be released on entry and 314 * will be released on exit). 315 * 316 * If the key is already in the destination tree the existing item is 317 * overwritten. If the existing item isn't big enough, it is extended. 318 * If it is too large, it is truncated. 319 * 320 * If the key isn't in the destination yet, a new item is inserted. 321 */ 322 static noinline int overwrite_item(struct btrfs_trans_handle *trans, 323 struct btrfs_root *root, 324 struct btrfs_path *path, 325 struct extent_buffer *eb, int slot, 326 struct btrfs_key *key) 327 { 328 int ret; 329 u32 item_size; 330 u64 saved_i_size = 0; 331 int save_old_i_size = 0; 332 unsigned long src_ptr; 333 unsigned long dst_ptr; 334 int overwrite_root = 0; 335 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; 336 337 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 338 overwrite_root = 1; 339 340 item_size = btrfs_item_size_nr(eb, slot); 341 src_ptr = btrfs_item_ptr_offset(eb, slot); 342 343 /* look for the key in the destination tree */ 344 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 345 if (ret < 0) 346 return ret; 347 348 if (ret == 0) { 349 char *src_copy; 350 char *dst_copy; 351 u32 dst_size = btrfs_item_size_nr(path->nodes[0], 352 path->slots[0]); 353 if (dst_size != item_size) 354 goto insert; 355 356 if (item_size == 0) { 357 btrfs_release_path(path); 358 return 0; 359 } 360 dst_copy = kmalloc(item_size, GFP_NOFS); 361 src_copy = kmalloc(item_size, GFP_NOFS); 362 if (!dst_copy || !src_copy) { 363 btrfs_release_path(path); 364 kfree(dst_copy); 365 kfree(src_copy); 366 return -ENOMEM; 367 } 368 369 read_extent_buffer(eb, src_copy, src_ptr, item_size); 370 371 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 372 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, 373 item_size); 374 ret = memcmp(dst_copy, src_copy, item_size); 375 376 kfree(dst_copy); 377 kfree(src_copy); 378 /* 379 * they have the same contents, just return, this saves 380 * us from cowing blocks in the destination tree and doing 381 * extra writes that may not have been done by a previous 382 * sync 383 */ 384 if (ret == 0) { 385 btrfs_release_path(path); 386 return 0; 387 } 388 389 /* 390 * We need to load the old nbytes into the inode so when we 391 * replay the extents we've logged we get the right nbytes. 392 */ 393 if (inode_item) { 394 struct btrfs_inode_item *item; 395 u64 nbytes; 396 u32 mode; 397 398 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 399 struct btrfs_inode_item); 400 nbytes = btrfs_inode_nbytes(path->nodes[0], item); 401 item = btrfs_item_ptr(eb, slot, 402 struct btrfs_inode_item); 403 btrfs_set_inode_nbytes(eb, item, nbytes); 404 405 /* 406 * If this is a directory we need to reset the i_size to 407 * 0 so that we can set it up properly when replaying 408 * the rest of the items in this log. 409 */ 410 mode = btrfs_inode_mode(eb, item); 411 if (S_ISDIR(mode)) 412 btrfs_set_inode_size(eb, item, 0); 413 } 414 } else if (inode_item) { 415 struct btrfs_inode_item *item; 416 u32 mode; 417 418 /* 419 * New inode, set nbytes to 0 so that the nbytes comes out 420 * properly when we replay the extents. 421 */ 422 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 423 btrfs_set_inode_nbytes(eb, item, 0); 424 425 /* 426 * If this is a directory we need to reset the i_size to 0 so 427 * that we can set it up properly when replaying the rest of 428 * the items in this log. 429 */ 430 mode = btrfs_inode_mode(eb, item); 431 if (S_ISDIR(mode)) 432 btrfs_set_inode_size(eb, item, 0); 433 } 434 insert: 435 btrfs_release_path(path); 436 /* try to insert the key into the destination tree */ 437 ret = btrfs_insert_empty_item(trans, root, path, 438 key, item_size); 439 440 /* make sure any existing item is the correct size */ 441 if (ret == -EEXIST) { 442 u32 found_size; 443 found_size = btrfs_item_size_nr(path->nodes[0], 444 path->slots[0]); 445 if (found_size > item_size) 446 btrfs_truncate_item(root, path, item_size, 1); 447 else if (found_size < item_size) 448 btrfs_extend_item(root, path, 449 item_size - found_size); 450 } else if (ret) { 451 return ret; 452 } 453 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 454 path->slots[0]); 455 456 /* don't overwrite an existing inode if the generation number 457 * was logged as zero. This is done when the tree logging code 458 * is just logging an inode to make sure it exists after recovery. 459 * 460 * Also, don't overwrite i_size on directories during replay. 461 * log replay inserts and removes directory items based on the 462 * state of the tree found in the subvolume, and i_size is modified 463 * as it goes 464 */ 465 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { 466 struct btrfs_inode_item *src_item; 467 struct btrfs_inode_item *dst_item; 468 469 src_item = (struct btrfs_inode_item *)src_ptr; 470 dst_item = (struct btrfs_inode_item *)dst_ptr; 471 472 if (btrfs_inode_generation(eb, src_item) == 0) 473 goto no_copy; 474 475 if (overwrite_root && 476 S_ISDIR(btrfs_inode_mode(eb, src_item)) && 477 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { 478 save_old_i_size = 1; 479 saved_i_size = btrfs_inode_size(path->nodes[0], 480 dst_item); 481 } 482 } 483 484 copy_extent_buffer(path->nodes[0], eb, dst_ptr, 485 src_ptr, item_size); 486 487 if (save_old_i_size) { 488 struct btrfs_inode_item *dst_item; 489 dst_item = (struct btrfs_inode_item *)dst_ptr; 490 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); 491 } 492 493 /* make sure the generation is filled in */ 494 if (key->type == BTRFS_INODE_ITEM_KEY) { 495 struct btrfs_inode_item *dst_item; 496 dst_item = (struct btrfs_inode_item *)dst_ptr; 497 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { 498 btrfs_set_inode_generation(path->nodes[0], dst_item, 499 trans->transid); 500 } 501 } 502 no_copy: 503 btrfs_mark_buffer_dirty(path->nodes[0]); 504 btrfs_release_path(path); 505 return 0; 506 } 507 508 /* 509 * simple helper to read an inode off the disk from a given root 510 * This can only be called for subvolume roots and not for the log 511 */ 512 static noinline struct inode *read_one_inode(struct btrfs_root *root, 513 u64 objectid) 514 { 515 struct btrfs_key key; 516 struct inode *inode; 517 518 key.objectid = objectid; 519 key.type = BTRFS_INODE_ITEM_KEY; 520 key.offset = 0; 521 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 522 if (IS_ERR(inode)) { 523 inode = NULL; 524 } else if (is_bad_inode(inode)) { 525 iput(inode); 526 inode = NULL; 527 } 528 return inode; 529 } 530 531 /* replays a single extent in 'eb' at 'slot' with 'key' into the 532 * subvolume 'root'. path is released on entry and should be released 533 * on exit. 534 * 535 * extents in the log tree have not been allocated out of the extent 536 * tree yet. So, this completes the allocation, taking a reference 537 * as required if the extent already exists or creating a new extent 538 * if it isn't in the extent allocation tree yet. 539 * 540 * The extent is inserted into the file, dropping any existing extents 541 * from the file that overlap the new one. 542 */ 543 static noinline int replay_one_extent(struct btrfs_trans_handle *trans, 544 struct btrfs_root *root, 545 struct btrfs_path *path, 546 struct extent_buffer *eb, int slot, 547 struct btrfs_key *key) 548 { 549 int found_type; 550 u64 extent_end; 551 u64 start = key->offset; 552 u64 nbytes = 0; 553 struct btrfs_file_extent_item *item; 554 struct inode *inode = NULL; 555 unsigned long size; 556 int ret = 0; 557 558 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 559 found_type = btrfs_file_extent_type(eb, item); 560 561 if (found_type == BTRFS_FILE_EXTENT_REG || 562 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 563 nbytes = btrfs_file_extent_num_bytes(eb, item); 564 extent_end = start + nbytes; 565 566 /* 567 * We don't add to the inodes nbytes if we are prealloc or a 568 * hole. 569 */ 570 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 571 nbytes = 0; 572 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 573 size = btrfs_file_extent_inline_len(eb, slot, item); 574 nbytes = btrfs_file_extent_ram_bytes(eb, item); 575 extent_end = ALIGN(start + size, root->sectorsize); 576 } else { 577 ret = 0; 578 goto out; 579 } 580 581 inode = read_one_inode(root, key->objectid); 582 if (!inode) { 583 ret = -EIO; 584 goto out; 585 } 586 587 /* 588 * first check to see if we already have this extent in the 589 * file. This must be done before the btrfs_drop_extents run 590 * so we don't try to drop this extent. 591 */ 592 ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), 593 start, 0); 594 595 if (ret == 0 && 596 (found_type == BTRFS_FILE_EXTENT_REG || 597 found_type == BTRFS_FILE_EXTENT_PREALLOC)) { 598 struct btrfs_file_extent_item cmp1; 599 struct btrfs_file_extent_item cmp2; 600 struct btrfs_file_extent_item *existing; 601 struct extent_buffer *leaf; 602 603 leaf = path->nodes[0]; 604 existing = btrfs_item_ptr(leaf, path->slots[0], 605 struct btrfs_file_extent_item); 606 607 read_extent_buffer(eb, &cmp1, (unsigned long)item, 608 sizeof(cmp1)); 609 read_extent_buffer(leaf, &cmp2, (unsigned long)existing, 610 sizeof(cmp2)); 611 612 /* 613 * we already have a pointer to this exact extent, 614 * we don't have to do anything 615 */ 616 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 617 btrfs_release_path(path); 618 goto out; 619 } 620 } 621 btrfs_release_path(path); 622 623 /* drop any overlapping extents */ 624 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); 625 if (ret) 626 goto out; 627 628 if (found_type == BTRFS_FILE_EXTENT_REG || 629 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 630 u64 offset; 631 unsigned long dest_offset; 632 struct btrfs_key ins; 633 634 ret = btrfs_insert_empty_item(trans, root, path, key, 635 sizeof(*item)); 636 if (ret) 637 goto out; 638 dest_offset = btrfs_item_ptr_offset(path->nodes[0], 639 path->slots[0]); 640 copy_extent_buffer(path->nodes[0], eb, dest_offset, 641 (unsigned long)item, sizeof(*item)); 642 643 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); 644 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); 645 ins.type = BTRFS_EXTENT_ITEM_KEY; 646 offset = key->offset - btrfs_file_extent_offset(eb, item); 647 648 if (ins.objectid > 0) { 649 u64 csum_start; 650 u64 csum_end; 651 LIST_HEAD(ordered_sums); 652 /* 653 * is this extent already allocated in the extent 654 * allocation tree? If so, just add a reference 655 */ 656 ret = btrfs_lookup_extent(root, ins.objectid, 657 ins.offset); 658 if (ret == 0) { 659 ret = btrfs_inc_extent_ref(trans, root, 660 ins.objectid, ins.offset, 661 0, root->root_key.objectid, 662 key->objectid, offset, 0); 663 if (ret) 664 goto out; 665 } else { 666 /* 667 * insert the extent pointer in the extent 668 * allocation tree 669 */ 670 ret = btrfs_alloc_logged_file_extent(trans, 671 root, root->root_key.objectid, 672 key->objectid, offset, &ins); 673 if (ret) 674 goto out; 675 } 676 btrfs_release_path(path); 677 678 if (btrfs_file_extent_compression(eb, item)) { 679 csum_start = ins.objectid; 680 csum_end = csum_start + ins.offset; 681 } else { 682 csum_start = ins.objectid + 683 btrfs_file_extent_offset(eb, item); 684 csum_end = csum_start + 685 btrfs_file_extent_num_bytes(eb, item); 686 } 687 688 ret = btrfs_lookup_csums_range(root->log_root, 689 csum_start, csum_end - 1, 690 &ordered_sums, 0); 691 if (ret) 692 goto out; 693 while (!list_empty(&ordered_sums)) { 694 struct btrfs_ordered_sum *sums; 695 sums = list_entry(ordered_sums.next, 696 struct btrfs_ordered_sum, 697 list); 698 if (!ret) 699 ret = btrfs_csum_file_blocks(trans, 700 root->fs_info->csum_root, 701 sums); 702 list_del(&sums->list); 703 kfree(sums); 704 } 705 if (ret) 706 goto out; 707 } else { 708 btrfs_release_path(path); 709 } 710 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 711 /* inline extents are easy, we just overwrite them */ 712 ret = overwrite_item(trans, root, path, eb, slot, key); 713 if (ret) 714 goto out; 715 } 716 717 inode_add_bytes(inode, nbytes); 718 ret = btrfs_update_inode(trans, root, inode); 719 out: 720 if (inode) 721 iput(inode); 722 return ret; 723 } 724 725 /* 726 * when cleaning up conflicts between the directory names in the 727 * subvolume, directory names in the log and directory names in the 728 * inode back references, we may have to unlink inodes from directories. 729 * 730 * This is a helper function to do the unlink of a specific directory 731 * item 732 */ 733 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, 734 struct btrfs_root *root, 735 struct btrfs_path *path, 736 struct inode *dir, 737 struct btrfs_dir_item *di) 738 { 739 struct inode *inode; 740 char *name; 741 int name_len; 742 struct extent_buffer *leaf; 743 struct btrfs_key location; 744 int ret; 745 746 leaf = path->nodes[0]; 747 748 btrfs_dir_item_key_to_cpu(leaf, di, &location); 749 name_len = btrfs_dir_name_len(leaf, di); 750 name = kmalloc(name_len, GFP_NOFS); 751 if (!name) 752 return -ENOMEM; 753 754 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); 755 btrfs_release_path(path); 756 757 inode = read_one_inode(root, location.objectid); 758 if (!inode) { 759 ret = -EIO; 760 goto out; 761 } 762 763 ret = link_to_fixup_dir(trans, root, path, location.objectid); 764 if (ret) 765 goto out; 766 767 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 768 if (ret) 769 goto out; 770 else 771 ret = btrfs_run_delayed_items(trans, root); 772 out: 773 kfree(name); 774 iput(inode); 775 return ret; 776 } 777 778 /* 779 * helper function to see if a given name and sequence number found 780 * in an inode back reference are already in a directory and correctly 781 * point to this inode 782 */ 783 static noinline int inode_in_dir(struct btrfs_root *root, 784 struct btrfs_path *path, 785 u64 dirid, u64 objectid, u64 index, 786 const char *name, int name_len) 787 { 788 struct btrfs_dir_item *di; 789 struct btrfs_key location; 790 int match = 0; 791 792 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, 793 index, name, name_len, 0); 794 if (di && !IS_ERR(di)) { 795 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 796 if (location.objectid != objectid) 797 goto out; 798 } else 799 goto out; 800 btrfs_release_path(path); 801 802 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); 803 if (di && !IS_ERR(di)) { 804 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 805 if (location.objectid != objectid) 806 goto out; 807 } else 808 goto out; 809 match = 1; 810 out: 811 btrfs_release_path(path); 812 return match; 813 } 814 815 /* 816 * helper function to check a log tree for a named back reference in 817 * an inode. This is used to decide if a back reference that is 818 * found in the subvolume conflicts with what we find in the log. 819 * 820 * inode backreferences may have multiple refs in a single item, 821 * during replay we process one reference at a time, and we don't 822 * want to delete valid links to a file from the subvolume if that 823 * link is also in the log. 824 */ 825 static noinline int backref_in_log(struct btrfs_root *log, 826 struct btrfs_key *key, 827 u64 ref_objectid, 828 char *name, int namelen) 829 { 830 struct btrfs_path *path; 831 struct btrfs_inode_ref *ref; 832 unsigned long ptr; 833 unsigned long ptr_end; 834 unsigned long name_ptr; 835 int found_name_len; 836 int item_size; 837 int ret; 838 int match = 0; 839 840 path = btrfs_alloc_path(); 841 if (!path) 842 return -ENOMEM; 843 844 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); 845 if (ret != 0) 846 goto out; 847 848 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 849 850 if (key->type == BTRFS_INODE_EXTREF_KEY) { 851 if (btrfs_find_name_in_ext_backref(path, ref_objectid, 852 name, namelen, NULL)) 853 match = 1; 854 855 goto out; 856 } 857 858 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 859 ptr_end = ptr + item_size; 860 while (ptr < ptr_end) { 861 ref = (struct btrfs_inode_ref *)ptr; 862 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); 863 if (found_name_len == namelen) { 864 name_ptr = (unsigned long)(ref + 1); 865 ret = memcmp_extent_buffer(path->nodes[0], name, 866 name_ptr, namelen); 867 if (ret == 0) { 868 match = 1; 869 goto out; 870 } 871 } 872 ptr = (unsigned long)(ref + 1) + found_name_len; 873 } 874 out: 875 btrfs_free_path(path); 876 return match; 877 } 878 879 static inline int __add_inode_ref(struct btrfs_trans_handle *trans, 880 struct btrfs_root *root, 881 struct btrfs_path *path, 882 struct btrfs_root *log_root, 883 struct inode *dir, struct inode *inode, 884 struct extent_buffer *eb, 885 u64 inode_objectid, u64 parent_objectid, 886 u64 ref_index, char *name, int namelen, 887 int *search_done) 888 { 889 int ret; 890 char *victim_name; 891 int victim_name_len; 892 struct extent_buffer *leaf; 893 struct btrfs_dir_item *di; 894 struct btrfs_key search_key; 895 struct btrfs_inode_extref *extref; 896 897 again: 898 /* Search old style refs */ 899 search_key.objectid = inode_objectid; 900 search_key.type = BTRFS_INODE_REF_KEY; 901 search_key.offset = parent_objectid; 902 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 903 if (ret == 0) { 904 struct btrfs_inode_ref *victim_ref; 905 unsigned long ptr; 906 unsigned long ptr_end; 907 908 leaf = path->nodes[0]; 909 910 /* are we trying to overwrite a back ref for the root directory 911 * if so, just jump out, we're done 912 */ 913 if (search_key.objectid == search_key.offset) 914 return 1; 915 916 /* check all the names in this back reference to see 917 * if they are in the log. if so, we allow them to stay 918 * otherwise they must be unlinked as a conflict 919 */ 920 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 921 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); 922 while (ptr < ptr_end) { 923 victim_ref = (struct btrfs_inode_ref *)ptr; 924 victim_name_len = btrfs_inode_ref_name_len(leaf, 925 victim_ref); 926 victim_name = kmalloc(victim_name_len, GFP_NOFS); 927 if (!victim_name) 928 return -ENOMEM; 929 930 read_extent_buffer(leaf, victim_name, 931 (unsigned long)(victim_ref + 1), 932 victim_name_len); 933 934 if (!backref_in_log(log_root, &search_key, 935 parent_objectid, 936 victim_name, 937 victim_name_len)) { 938 inc_nlink(inode); 939 btrfs_release_path(path); 940 941 ret = btrfs_unlink_inode(trans, root, dir, 942 inode, victim_name, 943 victim_name_len); 944 kfree(victim_name); 945 if (ret) 946 return ret; 947 ret = btrfs_run_delayed_items(trans, root); 948 if (ret) 949 return ret; 950 *search_done = 1; 951 goto again; 952 } 953 kfree(victim_name); 954 955 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 956 } 957 958 /* 959 * NOTE: we have searched root tree and checked the 960 * coresponding ref, it does not need to check again. 961 */ 962 *search_done = 1; 963 } 964 btrfs_release_path(path); 965 966 /* Same search but for extended refs */ 967 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, 968 inode_objectid, parent_objectid, 0, 969 0); 970 if (!IS_ERR_OR_NULL(extref)) { 971 u32 item_size; 972 u32 cur_offset = 0; 973 unsigned long base; 974 struct inode *victim_parent; 975 976 leaf = path->nodes[0]; 977 978 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 979 base = btrfs_item_ptr_offset(leaf, path->slots[0]); 980 981 while (cur_offset < item_size) { 982 extref = (struct btrfs_inode_extref *)base + cur_offset; 983 984 victim_name_len = btrfs_inode_extref_name_len(leaf, extref); 985 986 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) 987 goto next; 988 989 victim_name = kmalloc(victim_name_len, GFP_NOFS); 990 if (!victim_name) 991 return -ENOMEM; 992 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name, 993 victim_name_len); 994 995 search_key.objectid = inode_objectid; 996 search_key.type = BTRFS_INODE_EXTREF_KEY; 997 search_key.offset = btrfs_extref_hash(parent_objectid, 998 victim_name, 999 victim_name_len); 1000 ret = 0; 1001 if (!backref_in_log(log_root, &search_key, 1002 parent_objectid, victim_name, 1003 victim_name_len)) { 1004 ret = -ENOENT; 1005 victim_parent = read_one_inode(root, 1006 parent_objectid); 1007 if (victim_parent) { 1008 inc_nlink(inode); 1009 btrfs_release_path(path); 1010 1011 ret = btrfs_unlink_inode(trans, root, 1012 victim_parent, 1013 inode, 1014 victim_name, 1015 victim_name_len); 1016 if (!ret) 1017 ret = btrfs_run_delayed_items( 1018 trans, root); 1019 } 1020 iput(victim_parent); 1021 kfree(victim_name); 1022 if (ret) 1023 return ret; 1024 *search_done = 1; 1025 goto again; 1026 } 1027 kfree(victim_name); 1028 if (ret) 1029 return ret; 1030 next: 1031 cur_offset += victim_name_len + sizeof(*extref); 1032 } 1033 *search_done = 1; 1034 } 1035 btrfs_release_path(path); 1036 1037 /* look for a conflicting sequence number */ 1038 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), 1039 ref_index, name, namelen, 0); 1040 if (di && !IS_ERR(di)) { 1041 ret = drop_one_dir_item(trans, root, path, dir, di); 1042 if (ret) 1043 return ret; 1044 } 1045 btrfs_release_path(path); 1046 1047 /* look for a conflicing name */ 1048 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), 1049 name, namelen, 0); 1050 if (di && !IS_ERR(di)) { 1051 ret = drop_one_dir_item(trans, root, path, dir, di); 1052 if (ret) 1053 return ret; 1054 } 1055 btrfs_release_path(path); 1056 1057 return 0; 1058 } 1059 1060 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1061 u32 *namelen, char **name, u64 *index, 1062 u64 *parent_objectid) 1063 { 1064 struct btrfs_inode_extref *extref; 1065 1066 extref = (struct btrfs_inode_extref *)ref_ptr; 1067 1068 *namelen = btrfs_inode_extref_name_len(eb, extref); 1069 *name = kmalloc(*namelen, GFP_NOFS); 1070 if (*name == NULL) 1071 return -ENOMEM; 1072 1073 read_extent_buffer(eb, *name, (unsigned long)&extref->name, 1074 *namelen); 1075 1076 *index = btrfs_inode_extref_index(eb, extref); 1077 if (parent_objectid) 1078 *parent_objectid = btrfs_inode_extref_parent(eb, extref); 1079 1080 return 0; 1081 } 1082 1083 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1084 u32 *namelen, char **name, u64 *index) 1085 { 1086 struct btrfs_inode_ref *ref; 1087 1088 ref = (struct btrfs_inode_ref *)ref_ptr; 1089 1090 *namelen = btrfs_inode_ref_name_len(eb, ref); 1091 *name = kmalloc(*namelen, GFP_NOFS); 1092 if (*name == NULL) 1093 return -ENOMEM; 1094 1095 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); 1096 1097 *index = btrfs_inode_ref_index(eb, ref); 1098 1099 return 0; 1100 } 1101 1102 /* 1103 * replay one inode back reference item found in the log tree. 1104 * eb, slot and key refer to the buffer and key found in the log tree. 1105 * root is the destination we are replaying into, and path is for temp 1106 * use by this function. (it should be released on return). 1107 */ 1108 static noinline int add_inode_ref(struct btrfs_trans_handle *trans, 1109 struct btrfs_root *root, 1110 struct btrfs_root *log, 1111 struct btrfs_path *path, 1112 struct extent_buffer *eb, int slot, 1113 struct btrfs_key *key) 1114 { 1115 struct inode *dir = NULL; 1116 struct inode *inode = NULL; 1117 unsigned long ref_ptr; 1118 unsigned long ref_end; 1119 char *name = NULL; 1120 int namelen; 1121 int ret; 1122 int search_done = 0; 1123 int log_ref_ver = 0; 1124 u64 parent_objectid; 1125 u64 inode_objectid; 1126 u64 ref_index = 0; 1127 int ref_struct_size; 1128 1129 ref_ptr = btrfs_item_ptr_offset(eb, slot); 1130 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 1131 1132 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1133 struct btrfs_inode_extref *r; 1134 1135 ref_struct_size = sizeof(struct btrfs_inode_extref); 1136 log_ref_ver = 1; 1137 r = (struct btrfs_inode_extref *)ref_ptr; 1138 parent_objectid = btrfs_inode_extref_parent(eb, r); 1139 } else { 1140 ref_struct_size = sizeof(struct btrfs_inode_ref); 1141 parent_objectid = key->offset; 1142 } 1143 inode_objectid = key->objectid; 1144 1145 /* 1146 * it is possible that we didn't log all the parent directories 1147 * for a given inode. If we don't find the dir, just don't 1148 * copy the back ref in. The link count fixup code will take 1149 * care of the rest 1150 */ 1151 dir = read_one_inode(root, parent_objectid); 1152 if (!dir) { 1153 ret = -ENOENT; 1154 goto out; 1155 } 1156 1157 inode = read_one_inode(root, inode_objectid); 1158 if (!inode) { 1159 ret = -EIO; 1160 goto out; 1161 } 1162 1163 while (ref_ptr < ref_end) { 1164 if (log_ref_ver) { 1165 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1166 &ref_index, &parent_objectid); 1167 /* 1168 * parent object can change from one array 1169 * item to another. 1170 */ 1171 if (!dir) 1172 dir = read_one_inode(root, parent_objectid); 1173 if (!dir) { 1174 ret = -ENOENT; 1175 goto out; 1176 } 1177 } else { 1178 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1179 &ref_index); 1180 } 1181 if (ret) 1182 goto out; 1183 1184 /* if we already have a perfect match, we're done */ 1185 if (!inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), 1186 ref_index, name, namelen)) { 1187 /* 1188 * look for a conflicting back reference in the 1189 * metadata. if we find one we have to unlink that name 1190 * of the file before we add our new link. Later on, we 1191 * overwrite any existing back reference, and we don't 1192 * want to create dangling pointers in the directory. 1193 */ 1194 1195 if (!search_done) { 1196 ret = __add_inode_ref(trans, root, path, log, 1197 dir, inode, eb, 1198 inode_objectid, 1199 parent_objectid, 1200 ref_index, name, namelen, 1201 &search_done); 1202 if (ret) { 1203 if (ret == 1) 1204 ret = 0; 1205 goto out; 1206 } 1207 } 1208 1209 /* insert our name */ 1210 ret = btrfs_add_link(trans, dir, inode, name, namelen, 1211 0, ref_index); 1212 if (ret) 1213 goto out; 1214 1215 btrfs_update_inode(trans, root, inode); 1216 } 1217 1218 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; 1219 kfree(name); 1220 name = NULL; 1221 if (log_ref_ver) { 1222 iput(dir); 1223 dir = NULL; 1224 } 1225 } 1226 1227 /* finally write the back reference in the inode */ 1228 ret = overwrite_item(trans, root, path, eb, slot, key); 1229 out: 1230 btrfs_release_path(path); 1231 kfree(name); 1232 iput(dir); 1233 iput(inode); 1234 return ret; 1235 } 1236 1237 static int insert_orphan_item(struct btrfs_trans_handle *trans, 1238 struct btrfs_root *root, u64 offset) 1239 { 1240 int ret; 1241 ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID, 1242 offset, BTRFS_ORPHAN_ITEM_KEY, NULL); 1243 if (ret > 0) 1244 ret = btrfs_insert_orphan_item(trans, root, offset); 1245 return ret; 1246 } 1247 1248 static int count_inode_extrefs(struct btrfs_root *root, 1249 struct inode *inode, struct btrfs_path *path) 1250 { 1251 int ret = 0; 1252 int name_len; 1253 unsigned int nlink = 0; 1254 u32 item_size; 1255 u32 cur_offset = 0; 1256 u64 inode_objectid = btrfs_ino(inode); 1257 u64 offset = 0; 1258 unsigned long ptr; 1259 struct btrfs_inode_extref *extref; 1260 struct extent_buffer *leaf; 1261 1262 while (1) { 1263 ret = btrfs_find_one_extref(root, inode_objectid, offset, path, 1264 &extref, &offset); 1265 if (ret) 1266 break; 1267 1268 leaf = path->nodes[0]; 1269 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1270 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1271 1272 while (cur_offset < item_size) { 1273 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 1274 name_len = btrfs_inode_extref_name_len(leaf, extref); 1275 1276 nlink++; 1277 1278 cur_offset += name_len + sizeof(*extref); 1279 } 1280 1281 offset++; 1282 btrfs_release_path(path); 1283 } 1284 btrfs_release_path(path); 1285 1286 if (ret < 0) 1287 return ret; 1288 return nlink; 1289 } 1290 1291 static int count_inode_refs(struct btrfs_root *root, 1292 struct inode *inode, struct btrfs_path *path) 1293 { 1294 int ret; 1295 struct btrfs_key key; 1296 unsigned int nlink = 0; 1297 unsigned long ptr; 1298 unsigned long ptr_end; 1299 int name_len; 1300 u64 ino = btrfs_ino(inode); 1301 1302 key.objectid = ino; 1303 key.type = BTRFS_INODE_REF_KEY; 1304 key.offset = (u64)-1; 1305 1306 while (1) { 1307 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1308 if (ret < 0) 1309 break; 1310 if (ret > 0) { 1311 if (path->slots[0] == 0) 1312 break; 1313 path->slots[0]--; 1314 } 1315 process_slot: 1316 btrfs_item_key_to_cpu(path->nodes[0], &key, 1317 path->slots[0]); 1318 if (key.objectid != ino || 1319 key.type != BTRFS_INODE_REF_KEY) 1320 break; 1321 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 1322 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], 1323 path->slots[0]); 1324 while (ptr < ptr_end) { 1325 struct btrfs_inode_ref *ref; 1326 1327 ref = (struct btrfs_inode_ref *)ptr; 1328 name_len = btrfs_inode_ref_name_len(path->nodes[0], 1329 ref); 1330 ptr = (unsigned long)(ref + 1) + name_len; 1331 nlink++; 1332 } 1333 1334 if (key.offset == 0) 1335 break; 1336 if (path->slots[0] > 0) { 1337 path->slots[0]--; 1338 goto process_slot; 1339 } 1340 key.offset--; 1341 btrfs_release_path(path); 1342 } 1343 btrfs_release_path(path); 1344 1345 return nlink; 1346 } 1347 1348 /* 1349 * There are a few corners where the link count of the file can't 1350 * be properly maintained during replay. So, instead of adding 1351 * lots of complexity to the log code, we just scan the backrefs 1352 * for any file that has been through replay. 1353 * 1354 * The scan will update the link count on the inode to reflect the 1355 * number of back refs found. If it goes down to zero, the iput 1356 * will free the inode. 1357 */ 1358 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, 1359 struct btrfs_root *root, 1360 struct inode *inode) 1361 { 1362 struct btrfs_path *path; 1363 int ret; 1364 u64 nlink = 0; 1365 u64 ino = btrfs_ino(inode); 1366 1367 path = btrfs_alloc_path(); 1368 if (!path) 1369 return -ENOMEM; 1370 1371 ret = count_inode_refs(root, inode, path); 1372 if (ret < 0) 1373 goto out; 1374 1375 nlink = ret; 1376 1377 ret = count_inode_extrefs(root, inode, path); 1378 if (ret == -ENOENT) 1379 ret = 0; 1380 1381 if (ret < 0) 1382 goto out; 1383 1384 nlink += ret; 1385 1386 ret = 0; 1387 1388 if (nlink != inode->i_nlink) { 1389 set_nlink(inode, nlink); 1390 btrfs_update_inode(trans, root, inode); 1391 } 1392 BTRFS_I(inode)->index_cnt = (u64)-1; 1393 1394 if (inode->i_nlink == 0) { 1395 if (S_ISDIR(inode->i_mode)) { 1396 ret = replay_dir_deletes(trans, root, NULL, path, 1397 ino, 1); 1398 if (ret) 1399 goto out; 1400 } 1401 ret = insert_orphan_item(trans, root, ino); 1402 } 1403 1404 out: 1405 btrfs_free_path(path); 1406 return ret; 1407 } 1408 1409 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, 1410 struct btrfs_root *root, 1411 struct btrfs_path *path) 1412 { 1413 int ret; 1414 struct btrfs_key key; 1415 struct inode *inode; 1416 1417 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1418 key.type = BTRFS_ORPHAN_ITEM_KEY; 1419 key.offset = (u64)-1; 1420 while (1) { 1421 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1422 if (ret < 0) 1423 break; 1424 1425 if (ret == 1) { 1426 if (path->slots[0] == 0) 1427 break; 1428 path->slots[0]--; 1429 } 1430 1431 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1432 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || 1433 key.type != BTRFS_ORPHAN_ITEM_KEY) 1434 break; 1435 1436 ret = btrfs_del_item(trans, root, path); 1437 if (ret) 1438 goto out; 1439 1440 btrfs_release_path(path); 1441 inode = read_one_inode(root, key.offset); 1442 if (!inode) 1443 return -EIO; 1444 1445 ret = fixup_inode_link_count(trans, root, inode); 1446 iput(inode); 1447 if (ret) 1448 goto out; 1449 1450 /* 1451 * fixup on a directory may create new entries, 1452 * make sure we always look for the highset possible 1453 * offset 1454 */ 1455 key.offset = (u64)-1; 1456 } 1457 ret = 0; 1458 out: 1459 btrfs_release_path(path); 1460 return ret; 1461 } 1462 1463 1464 /* 1465 * record a given inode in the fixup dir so we can check its link 1466 * count when replay is done. The link count is incremented here 1467 * so the inode won't go away until we check it 1468 */ 1469 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, 1470 struct btrfs_root *root, 1471 struct btrfs_path *path, 1472 u64 objectid) 1473 { 1474 struct btrfs_key key; 1475 int ret = 0; 1476 struct inode *inode; 1477 1478 inode = read_one_inode(root, objectid); 1479 if (!inode) 1480 return -EIO; 1481 1482 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1483 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); 1484 key.offset = objectid; 1485 1486 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1487 1488 btrfs_release_path(path); 1489 if (ret == 0) { 1490 if (!inode->i_nlink) 1491 set_nlink(inode, 1); 1492 else 1493 inc_nlink(inode); 1494 ret = btrfs_update_inode(trans, root, inode); 1495 } else if (ret == -EEXIST) { 1496 ret = 0; 1497 } else { 1498 BUG(); /* Logic Error */ 1499 } 1500 iput(inode); 1501 1502 return ret; 1503 } 1504 1505 /* 1506 * when replaying the log for a directory, we only insert names 1507 * for inodes that actually exist. This means an fsync on a directory 1508 * does not implicitly fsync all the new files in it 1509 */ 1510 static noinline int insert_one_name(struct btrfs_trans_handle *trans, 1511 struct btrfs_root *root, 1512 struct btrfs_path *path, 1513 u64 dirid, u64 index, 1514 char *name, int name_len, u8 type, 1515 struct btrfs_key *location) 1516 { 1517 struct inode *inode; 1518 struct inode *dir; 1519 int ret; 1520 1521 inode = read_one_inode(root, location->objectid); 1522 if (!inode) 1523 return -ENOENT; 1524 1525 dir = read_one_inode(root, dirid); 1526 if (!dir) { 1527 iput(inode); 1528 return -EIO; 1529 } 1530 1531 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index); 1532 1533 /* FIXME, put inode into FIXUP list */ 1534 1535 iput(inode); 1536 iput(dir); 1537 return ret; 1538 } 1539 1540 /* 1541 * take a single entry in a log directory item and replay it into 1542 * the subvolume. 1543 * 1544 * if a conflicting item exists in the subdirectory already, 1545 * the inode it points to is unlinked and put into the link count 1546 * fix up tree. 1547 * 1548 * If a name from the log points to a file or directory that does 1549 * not exist in the FS, it is skipped. fsyncs on directories 1550 * do not force down inodes inside that directory, just changes to the 1551 * names or unlinks in a directory. 1552 */ 1553 static noinline int replay_one_name(struct btrfs_trans_handle *trans, 1554 struct btrfs_root *root, 1555 struct btrfs_path *path, 1556 struct extent_buffer *eb, 1557 struct btrfs_dir_item *di, 1558 struct btrfs_key *key) 1559 { 1560 char *name; 1561 int name_len; 1562 struct btrfs_dir_item *dst_di; 1563 struct btrfs_key found_key; 1564 struct btrfs_key log_key; 1565 struct inode *dir; 1566 u8 log_type; 1567 int exists; 1568 int ret = 0; 1569 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY); 1570 1571 dir = read_one_inode(root, key->objectid); 1572 if (!dir) 1573 return -EIO; 1574 1575 name_len = btrfs_dir_name_len(eb, di); 1576 name = kmalloc(name_len, GFP_NOFS); 1577 if (!name) { 1578 ret = -ENOMEM; 1579 goto out; 1580 } 1581 1582 log_type = btrfs_dir_type(eb, di); 1583 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1584 name_len); 1585 1586 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1587 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); 1588 if (exists == 0) 1589 exists = 1; 1590 else 1591 exists = 0; 1592 btrfs_release_path(path); 1593 1594 if (key->type == BTRFS_DIR_ITEM_KEY) { 1595 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1596 name, name_len, 1); 1597 } else if (key->type == BTRFS_DIR_INDEX_KEY) { 1598 dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1599 key->objectid, 1600 key->offset, name, 1601 name_len, 1); 1602 } else { 1603 /* Corruption */ 1604 ret = -EINVAL; 1605 goto out; 1606 } 1607 if (IS_ERR_OR_NULL(dst_di)) { 1608 /* we need a sequence number to insert, so we only 1609 * do inserts for the BTRFS_DIR_INDEX_KEY types 1610 */ 1611 if (key->type != BTRFS_DIR_INDEX_KEY) 1612 goto out; 1613 goto insert; 1614 } 1615 1616 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); 1617 /* the existing item matches the logged item */ 1618 if (found_key.objectid == log_key.objectid && 1619 found_key.type == log_key.type && 1620 found_key.offset == log_key.offset && 1621 btrfs_dir_type(path->nodes[0], dst_di) == log_type) { 1622 goto out; 1623 } 1624 1625 /* 1626 * don't drop the conflicting directory entry if the inode 1627 * for the new entry doesn't exist 1628 */ 1629 if (!exists) 1630 goto out; 1631 1632 ret = drop_one_dir_item(trans, root, path, dir, dst_di); 1633 if (ret) 1634 goto out; 1635 1636 if (key->type == BTRFS_DIR_INDEX_KEY) 1637 goto insert; 1638 out: 1639 btrfs_release_path(path); 1640 if (!ret && update_size) { 1641 btrfs_i_size_write(dir, dir->i_size + name_len * 2); 1642 ret = btrfs_update_inode(trans, root, dir); 1643 } 1644 kfree(name); 1645 iput(dir); 1646 return ret; 1647 1648 insert: 1649 btrfs_release_path(path); 1650 ret = insert_one_name(trans, root, path, key->objectid, key->offset, 1651 name, name_len, log_type, &log_key); 1652 if (ret && ret != -ENOENT) 1653 goto out; 1654 update_size = false; 1655 ret = 0; 1656 goto out; 1657 } 1658 1659 /* 1660 * find all the names in a directory item and reconcile them into 1661 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than 1662 * one name in a directory item, but the same code gets used for 1663 * both directory index types 1664 */ 1665 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, 1666 struct btrfs_root *root, 1667 struct btrfs_path *path, 1668 struct extent_buffer *eb, int slot, 1669 struct btrfs_key *key) 1670 { 1671 int ret; 1672 u32 item_size = btrfs_item_size_nr(eb, slot); 1673 struct btrfs_dir_item *di; 1674 int name_len; 1675 unsigned long ptr; 1676 unsigned long ptr_end; 1677 1678 ptr = btrfs_item_ptr_offset(eb, slot); 1679 ptr_end = ptr + item_size; 1680 while (ptr < ptr_end) { 1681 di = (struct btrfs_dir_item *)ptr; 1682 if (verify_dir_item(root, eb, di)) 1683 return -EIO; 1684 name_len = btrfs_dir_name_len(eb, di); 1685 ret = replay_one_name(trans, root, path, eb, di, key); 1686 if (ret) 1687 return ret; 1688 ptr = (unsigned long)(di + 1); 1689 ptr += name_len; 1690 } 1691 return 0; 1692 } 1693 1694 /* 1695 * directory replay has two parts. There are the standard directory 1696 * items in the log copied from the subvolume, and range items 1697 * created in the log while the subvolume was logged. 1698 * 1699 * The range items tell us which parts of the key space the log 1700 * is authoritative for. During replay, if a key in the subvolume 1701 * directory is in a logged range item, but not actually in the log 1702 * that means it was deleted from the directory before the fsync 1703 * and should be removed. 1704 */ 1705 static noinline int find_dir_range(struct btrfs_root *root, 1706 struct btrfs_path *path, 1707 u64 dirid, int key_type, 1708 u64 *start_ret, u64 *end_ret) 1709 { 1710 struct btrfs_key key; 1711 u64 found_end; 1712 struct btrfs_dir_log_item *item; 1713 int ret; 1714 int nritems; 1715 1716 if (*start_ret == (u64)-1) 1717 return 1; 1718 1719 key.objectid = dirid; 1720 key.type = key_type; 1721 key.offset = *start_ret; 1722 1723 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1724 if (ret < 0) 1725 goto out; 1726 if (ret > 0) { 1727 if (path->slots[0] == 0) 1728 goto out; 1729 path->slots[0]--; 1730 } 1731 if (ret != 0) 1732 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1733 1734 if (key.type != key_type || key.objectid != dirid) { 1735 ret = 1; 1736 goto next; 1737 } 1738 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 1739 struct btrfs_dir_log_item); 1740 found_end = btrfs_dir_log_end(path->nodes[0], item); 1741 1742 if (*start_ret >= key.offset && *start_ret <= found_end) { 1743 ret = 0; 1744 *start_ret = key.offset; 1745 *end_ret = found_end; 1746 goto out; 1747 } 1748 ret = 1; 1749 next: 1750 /* check the next slot in the tree to see if it is a valid item */ 1751 nritems = btrfs_header_nritems(path->nodes[0]); 1752 if (path->slots[0] >= nritems) { 1753 ret = btrfs_next_leaf(root, path); 1754 if (ret) 1755 goto out; 1756 } else { 1757 path->slots[0]++; 1758 } 1759 1760 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1761 1762 if (key.type != key_type || key.objectid != dirid) { 1763 ret = 1; 1764 goto out; 1765 } 1766 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 1767 struct btrfs_dir_log_item); 1768 found_end = btrfs_dir_log_end(path->nodes[0], item); 1769 *start_ret = key.offset; 1770 *end_ret = found_end; 1771 ret = 0; 1772 out: 1773 btrfs_release_path(path); 1774 return ret; 1775 } 1776 1777 /* 1778 * this looks for a given directory item in the log. If the directory 1779 * item is not in the log, the item is removed and the inode it points 1780 * to is unlinked 1781 */ 1782 static noinline int check_item_in_log(struct btrfs_trans_handle *trans, 1783 struct btrfs_root *root, 1784 struct btrfs_root *log, 1785 struct btrfs_path *path, 1786 struct btrfs_path *log_path, 1787 struct inode *dir, 1788 struct btrfs_key *dir_key) 1789 { 1790 int ret; 1791 struct extent_buffer *eb; 1792 int slot; 1793 u32 item_size; 1794 struct btrfs_dir_item *di; 1795 struct btrfs_dir_item *log_di; 1796 int name_len; 1797 unsigned long ptr; 1798 unsigned long ptr_end; 1799 char *name; 1800 struct inode *inode; 1801 struct btrfs_key location; 1802 1803 again: 1804 eb = path->nodes[0]; 1805 slot = path->slots[0]; 1806 item_size = btrfs_item_size_nr(eb, slot); 1807 ptr = btrfs_item_ptr_offset(eb, slot); 1808 ptr_end = ptr + item_size; 1809 while (ptr < ptr_end) { 1810 di = (struct btrfs_dir_item *)ptr; 1811 if (verify_dir_item(root, eb, di)) { 1812 ret = -EIO; 1813 goto out; 1814 } 1815 1816 name_len = btrfs_dir_name_len(eb, di); 1817 name = kmalloc(name_len, GFP_NOFS); 1818 if (!name) { 1819 ret = -ENOMEM; 1820 goto out; 1821 } 1822 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1823 name_len); 1824 log_di = NULL; 1825 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { 1826 log_di = btrfs_lookup_dir_item(trans, log, log_path, 1827 dir_key->objectid, 1828 name, name_len, 0); 1829 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { 1830 log_di = btrfs_lookup_dir_index_item(trans, log, 1831 log_path, 1832 dir_key->objectid, 1833 dir_key->offset, 1834 name, name_len, 0); 1835 } 1836 if (!log_di || (IS_ERR(log_di) && PTR_ERR(log_di) == -ENOENT)) { 1837 btrfs_dir_item_key_to_cpu(eb, di, &location); 1838 btrfs_release_path(path); 1839 btrfs_release_path(log_path); 1840 inode = read_one_inode(root, location.objectid); 1841 if (!inode) { 1842 kfree(name); 1843 return -EIO; 1844 } 1845 1846 ret = link_to_fixup_dir(trans, root, 1847 path, location.objectid); 1848 if (ret) { 1849 kfree(name); 1850 iput(inode); 1851 goto out; 1852 } 1853 1854 inc_nlink(inode); 1855 ret = btrfs_unlink_inode(trans, root, dir, inode, 1856 name, name_len); 1857 if (!ret) 1858 ret = btrfs_run_delayed_items(trans, root); 1859 kfree(name); 1860 iput(inode); 1861 if (ret) 1862 goto out; 1863 1864 /* there might still be more names under this key 1865 * check and repeat if required 1866 */ 1867 ret = btrfs_search_slot(NULL, root, dir_key, path, 1868 0, 0); 1869 if (ret == 0) 1870 goto again; 1871 ret = 0; 1872 goto out; 1873 } else if (IS_ERR(log_di)) { 1874 kfree(name); 1875 return PTR_ERR(log_di); 1876 } 1877 btrfs_release_path(log_path); 1878 kfree(name); 1879 1880 ptr = (unsigned long)(di + 1); 1881 ptr += name_len; 1882 } 1883 ret = 0; 1884 out: 1885 btrfs_release_path(path); 1886 btrfs_release_path(log_path); 1887 return ret; 1888 } 1889 1890 /* 1891 * deletion replay happens before we copy any new directory items 1892 * out of the log or out of backreferences from inodes. It 1893 * scans the log to find ranges of keys that log is authoritative for, 1894 * and then scans the directory to find items in those ranges that are 1895 * not present in the log. 1896 * 1897 * Anything we don't find in the log is unlinked and removed from the 1898 * directory. 1899 */ 1900 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 1901 struct btrfs_root *root, 1902 struct btrfs_root *log, 1903 struct btrfs_path *path, 1904 u64 dirid, int del_all) 1905 { 1906 u64 range_start; 1907 u64 range_end; 1908 int key_type = BTRFS_DIR_LOG_ITEM_KEY; 1909 int ret = 0; 1910 struct btrfs_key dir_key; 1911 struct btrfs_key found_key; 1912 struct btrfs_path *log_path; 1913 struct inode *dir; 1914 1915 dir_key.objectid = dirid; 1916 dir_key.type = BTRFS_DIR_ITEM_KEY; 1917 log_path = btrfs_alloc_path(); 1918 if (!log_path) 1919 return -ENOMEM; 1920 1921 dir = read_one_inode(root, dirid); 1922 /* it isn't an error if the inode isn't there, that can happen 1923 * because we replay the deletes before we copy in the inode item 1924 * from the log 1925 */ 1926 if (!dir) { 1927 btrfs_free_path(log_path); 1928 return 0; 1929 } 1930 again: 1931 range_start = 0; 1932 range_end = 0; 1933 while (1) { 1934 if (del_all) 1935 range_end = (u64)-1; 1936 else { 1937 ret = find_dir_range(log, path, dirid, key_type, 1938 &range_start, &range_end); 1939 if (ret != 0) 1940 break; 1941 } 1942 1943 dir_key.offset = range_start; 1944 while (1) { 1945 int nritems; 1946 ret = btrfs_search_slot(NULL, root, &dir_key, path, 1947 0, 0); 1948 if (ret < 0) 1949 goto out; 1950 1951 nritems = btrfs_header_nritems(path->nodes[0]); 1952 if (path->slots[0] >= nritems) { 1953 ret = btrfs_next_leaf(root, path); 1954 if (ret) 1955 break; 1956 } 1957 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1958 path->slots[0]); 1959 if (found_key.objectid != dirid || 1960 found_key.type != dir_key.type) 1961 goto next_type; 1962 1963 if (found_key.offset > range_end) 1964 break; 1965 1966 ret = check_item_in_log(trans, root, log, path, 1967 log_path, dir, 1968 &found_key); 1969 if (ret) 1970 goto out; 1971 if (found_key.offset == (u64)-1) 1972 break; 1973 dir_key.offset = found_key.offset + 1; 1974 } 1975 btrfs_release_path(path); 1976 if (range_end == (u64)-1) 1977 break; 1978 range_start = range_end + 1; 1979 } 1980 1981 next_type: 1982 ret = 0; 1983 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { 1984 key_type = BTRFS_DIR_LOG_INDEX_KEY; 1985 dir_key.type = BTRFS_DIR_INDEX_KEY; 1986 btrfs_release_path(path); 1987 goto again; 1988 } 1989 out: 1990 btrfs_release_path(path); 1991 btrfs_free_path(log_path); 1992 iput(dir); 1993 return ret; 1994 } 1995 1996 /* 1997 * the process_func used to replay items from the log tree. This 1998 * gets called in two different stages. The first stage just looks 1999 * for inodes and makes sure they are all copied into the subvolume. 2000 * 2001 * The second stage copies all the other item types from the log into 2002 * the subvolume. The two stage approach is slower, but gets rid of 2003 * lots of complexity around inodes referencing other inodes that exist 2004 * only in the log (references come from either directory items or inode 2005 * back refs). 2006 */ 2007 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, 2008 struct walk_control *wc, u64 gen) 2009 { 2010 int nritems; 2011 struct btrfs_path *path; 2012 struct btrfs_root *root = wc->replay_dest; 2013 struct btrfs_key key; 2014 int level; 2015 int i; 2016 int ret; 2017 2018 ret = btrfs_read_buffer(eb, gen); 2019 if (ret) 2020 return ret; 2021 2022 level = btrfs_header_level(eb); 2023 2024 if (level != 0) 2025 return 0; 2026 2027 path = btrfs_alloc_path(); 2028 if (!path) 2029 return -ENOMEM; 2030 2031 nritems = btrfs_header_nritems(eb); 2032 for (i = 0; i < nritems; i++) { 2033 btrfs_item_key_to_cpu(eb, &key, i); 2034 2035 /* inode keys are done during the first stage */ 2036 if (key.type == BTRFS_INODE_ITEM_KEY && 2037 wc->stage == LOG_WALK_REPLAY_INODES) { 2038 struct btrfs_inode_item *inode_item; 2039 u32 mode; 2040 2041 inode_item = btrfs_item_ptr(eb, i, 2042 struct btrfs_inode_item); 2043 mode = btrfs_inode_mode(eb, inode_item); 2044 if (S_ISDIR(mode)) { 2045 ret = replay_dir_deletes(wc->trans, 2046 root, log, path, key.objectid, 0); 2047 if (ret) 2048 break; 2049 } 2050 ret = overwrite_item(wc->trans, root, path, 2051 eb, i, &key); 2052 if (ret) 2053 break; 2054 2055 /* for regular files, make sure corresponding 2056 * orhpan item exist. extents past the new EOF 2057 * will be truncated later by orphan cleanup. 2058 */ 2059 if (S_ISREG(mode)) { 2060 ret = insert_orphan_item(wc->trans, root, 2061 key.objectid); 2062 if (ret) 2063 break; 2064 } 2065 2066 ret = link_to_fixup_dir(wc->trans, root, 2067 path, key.objectid); 2068 if (ret) 2069 break; 2070 } 2071 2072 if (key.type == BTRFS_DIR_INDEX_KEY && 2073 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { 2074 ret = replay_one_dir_item(wc->trans, root, path, 2075 eb, i, &key); 2076 if (ret) 2077 break; 2078 } 2079 2080 if (wc->stage < LOG_WALK_REPLAY_ALL) 2081 continue; 2082 2083 /* these keys are simply copied */ 2084 if (key.type == BTRFS_XATTR_ITEM_KEY) { 2085 ret = overwrite_item(wc->trans, root, path, 2086 eb, i, &key); 2087 if (ret) 2088 break; 2089 } else if (key.type == BTRFS_INODE_REF_KEY || 2090 key.type == BTRFS_INODE_EXTREF_KEY) { 2091 ret = add_inode_ref(wc->trans, root, log, path, 2092 eb, i, &key); 2093 if (ret && ret != -ENOENT) 2094 break; 2095 ret = 0; 2096 } else if (key.type == BTRFS_EXTENT_DATA_KEY) { 2097 ret = replay_one_extent(wc->trans, root, path, 2098 eb, i, &key); 2099 if (ret) 2100 break; 2101 } else if (key.type == BTRFS_DIR_ITEM_KEY) { 2102 ret = replay_one_dir_item(wc->trans, root, path, 2103 eb, i, &key); 2104 if (ret) 2105 break; 2106 } 2107 } 2108 btrfs_free_path(path); 2109 return ret; 2110 } 2111 2112 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, 2113 struct btrfs_root *root, 2114 struct btrfs_path *path, int *level, 2115 struct walk_control *wc) 2116 { 2117 u64 root_owner; 2118 u64 bytenr; 2119 u64 ptr_gen; 2120 struct extent_buffer *next; 2121 struct extent_buffer *cur; 2122 struct extent_buffer *parent; 2123 u32 blocksize; 2124 int ret = 0; 2125 2126 WARN_ON(*level < 0); 2127 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2128 2129 while (*level > 0) { 2130 WARN_ON(*level < 0); 2131 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2132 cur = path->nodes[*level]; 2133 2134 WARN_ON(btrfs_header_level(cur) != *level); 2135 2136 if (path->slots[*level] >= 2137 btrfs_header_nritems(cur)) 2138 break; 2139 2140 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 2141 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 2142 blocksize = btrfs_level_size(root, *level - 1); 2143 2144 parent = path->nodes[*level]; 2145 root_owner = btrfs_header_owner(parent); 2146 2147 next = btrfs_find_create_tree_block(root, bytenr, blocksize); 2148 if (!next) 2149 return -ENOMEM; 2150 2151 if (*level == 1) { 2152 ret = wc->process_func(root, next, wc, ptr_gen); 2153 if (ret) { 2154 free_extent_buffer(next); 2155 return ret; 2156 } 2157 2158 path->slots[*level]++; 2159 if (wc->free) { 2160 ret = btrfs_read_buffer(next, ptr_gen); 2161 if (ret) { 2162 free_extent_buffer(next); 2163 return ret; 2164 } 2165 2166 if (trans) { 2167 btrfs_tree_lock(next); 2168 btrfs_set_lock_blocking(next); 2169 clean_tree_block(trans, root, next); 2170 btrfs_wait_tree_block_writeback(next); 2171 btrfs_tree_unlock(next); 2172 } 2173 2174 WARN_ON(root_owner != 2175 BTRFS_TREE_LOG_OBJECTID); 2176 ret = btrfs_free_and_pin_reserved_extent(root, 2177 bytenr, blocksize); 2178 if (ret) { 2179 free_extent_buffer(next); 2180 return ret; 2181 } 2182 } 2183 free_extent_buffer(next); 2184 continue; 2185 } 2186 ret = btrfs_read_buffer(next, ptr_gen); 2187 if (ret) { 2188 free_extent_buffer(next); 2189 return ret; 2190 } 2191 2192 WARN_ON(*level <= 0); 2193 if (path->nodes[*level-1]) 2194 free_extent_buffer(path->nodes[*level-1]); 2195 path->nodes[*level-1] = next; 2196 *level = btrfs_header_level(next); 2197 path->slots[*level] = 0; 2198 cond_resched(); 2199 } 2200 WARN_ON(*level < 0); 2201 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2202 2203 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); 2204 2205 cond_resched(); 2206 return 0; 2207 } 2208 2209 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, 2210 struct btrfs_root *root, 2211 struct btrfs_path *path, int *level, 2212 struct walk_control *wc) 2213 { 2214 u64 root_owner; 2215 int i; 2216 int slot; 2217 int ret; 2218 2219 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 2220 slot = path->slots[i]; 2221 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 2222 path->slots[i]++; 2223 *level = i; 2224 WARN_ON(*level == 0); 2225 return 0; 2226 } else { 2227 struct extent_buffer *parent; 2228 if (path->nodes[*level] == root->node) 2229 parent = path->nodes[*level]; 2230 else 2231 parent = path->nodes[*level + 1]; 2232 2233 root_owner = btrfs_header_owner(parent); 2234 ret = wc->process_func(root, path->nodes[*level], wc, 2235 btrfs_header_generation(path->nodes[*level])); 2236 if (ret) 2237 return ret; 2238 2239 if (wc->free) { 2240 struct extent_buffer *next; 2241 2242 next = path->nodes[*level]; 2243 2244 if (trans) { 2245 btrfs_tree_lock(next); 2246 btrfs_set_lock_blocking(next); 2247 clean_tree_block(trans, root, next); 2248 btrfs_wait_tree_block_writeback(next); 2249 btrfs_tree_unlock(next); 2250 } 2251 2252 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); 2253 ret = btrfs_free_and_pin_reserved_extent(root, 2254 path->nodes[*level]->start, 2255 path->nodes[*level]->len); 2256 if (ret) 2257 return ret; 2258 } 2259 free_extent_buffer(path->nodes[*level]); 2260 path->nodes[*level] = NULL; 2261 *level = i + 1; 2262 } 2263 } 2264 return 1; 2265 } 2266 2267 /* 2268 * drop the reference count on the tree rooted at 'snap'. This traverses 2269 * the tree freeing any blocks that have a ref count of zero after being 2270 * decremented. 2271 */ 2272 static int walk_log_tree(struct btrfs_trans_handle *trans, 2273 struct btrfs_root *log, struct walk_control *wc) 2274 { 2275 int ret = 0; 2276 int wret; 2277 int level; 2278 struct btrfs_path *path; 2279 int orig_level; 2280 2281 path = btrfs_alloc_path(); 2282 if (!path) 2283 return -ENOMEM; 2284 2285 level = btrfs_header_level(log->node); 2286 orig_level = level; 2287 path->nodes[level] = log->node; 2288 extent_buffer_get(log->node); 2289 path->slots[level] = 0; 2290 2291 while (1) { 2292 wret = walk_down_log_tree(trans, log, path, &level, wc); 2293 if (wret > 0) 2294 break; 2295 if (wret < 0) { 2296 ret = wret; 2297 goto out; 2298 } 2299 2300 wret = walk_up_log_tree(trans, log, path, &level, wc); 2301 if (wret > 0) 2302 break; 2303 if (wret < 0) { 2304 ret = wret; 2305 goto out; 2306 } 2307 } 2308 2309 /* was the root node processed? if not, catch it here */ 2310 if (path->nodes[orig_level]) { 2311 ret = wc->process_func(log, path->nodes[orig_level], wc, 2312 btrfs_header_generation(path->nodes[orig_level])); 2313 if (ret) 2314 goto out; 2315 if (wc->free) { 2316 struct extent_buffer *next; 2317 2318 next = path->nodes[orig_level]; 2319 2320 if (trans) { 2321 btrfs_tree_lock(next); 2322 btrfs_set_lock_blocking(next); 2323 clean_tree_block(trans, log, next); 2324 btrfs_wait_tree_block_writeback(next); 2325 btrfs_tree_unlock(next); 2326 } 2327 2328 WARN_ON(log->root_key.objectid != 2329 BTRFS_TREE_LOG_OBJECTID); 2330 ret = btrfs_free_and_pin_reserved_extent(log, next->start, 2331 next->len); 2332 if (ret) 2333 goto out; 2334 } 2335 } 2336 2337 out: 2338 btrfs_free_path(path); 2339 return ret; 2340 } 2341 2342 /* 2343 * helper function to update the item for a given subvolumes log root 2344 * in the tree of log roots 2345 */ 2346 static int update_log_root(struct btrfs_trans_handle *trans, 2347 struct btrfs_root *log) 2348 { 2349 int ret; 2350 2351 if (log->log_transid == 1) { 2352 /* insert root item on the first sync */ 2353 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree, 2354 &log->root_key, &log->root_item); 2355 } else { 2356 ret = btrfs_update_root(trans, log->fs_info->log_root_tree, 2357 &log->root_key, &log->root_item); 2358 } 2359 return ret; 2360 } 2361 2362 static int wait_log_commit(struct btrfs_trans_handle *trans, 2363 struct btrfs_root *root, unsigned long transid) 2364 { 2365 DEFINE_WAIT(wait); 2366 int index = transid % 2; 2367 2368 /* 2369 * we only allow two pending log transactions at a time, 2370 * so we know that if ours is more than 2 older than the 2371 * current transaction, we're done 2372 */ 2373 do { 2374 prepare_to_wait(&root->log_commit_wait[index], 2375 &wait, TASK_UNINTERRUPTIBLE); 2376 mutex_unlock(&root->log_mutex); 2377 2378 if (root->fs_info->last_trans_log_full_commit != 2379 trans->transid && root->log_transid < transid + 2 && 2380 atomic_read(&root->log_commit[index])) 2381 schedule(); 2382 2383 finish_wait(&root->log_commit_wait[index], &wait); 2384 mutex_lock(&root->log_mutex); 2385 } while (root->fs_info->last_trans_log_full_commit != 2386 trans->transid && root->log_transid < transid + 2 && 2387 atomic_read(&root->log_commit[index])); 2388 return 0; 2389 } 2390 2391 static void wait_for_writer(struct btrfs_trans_handle *trans, 2392 struct btrfs_root *root) 2393 { 2394 DEFINE_WAIT(wait); 2395 while (root->fs_info->last_trans_log_full_commit != 2396 trans->transid && atomic_read(&root->log_writers)) { 2397 prepare_to_wait(&root->log_writer_wait, 2398 &wait, TASK_UNINTERRUPTIBLE); 2399 mutex_unlock(&root->log_mutex); 2400 if (root->fs_info->last_trans_log_full_commit != 2401 trans->transid && atomic_read(&root->log_writers)) 2402 schedule(); 2403 mutex_lock(&root->log_mutex); 2404 finish_wait(&root->log_writer_wait, &wait); 2405 } 2406 } 2407 2408 /* 2409 * btrfs_sync_log does sends a given tree log down to the disk and 2410 * updates the super blocks to record it. When this call is done, 2411 * you know that any inodes previously logged are safely on disk only 2412 * if it returns 0. 2413 * 2414 * Any other return value means you need to call btrfs_commit_transaction. 2415 * Some of the edge cases for fsyncing directories that have had unlinks 2416 * or renames done in the past mean that sometimes the only safe 2417 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, 2418 * that has happened. 2419 */ 2420 int btrfs_sync_log(struct btrfs_trans_handle *trans, 2421 struct btrfs_root *root) 2422 { 2423 int index1; 2424 int index2; 2425 int mark; 2426 int ret; 2427 struct btrfs_root *log = root->log_root; 2428 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; 2429 unsigned long log_transid = 0; 2430 struct blk_plug plug; 2431 2432 mutex_lock(&root->log_mutex); 2433 log_transid = root->log_transid; 2434 index1 = root->log_transid % 2; 2435 if (atomic_read(&root->log_commit[index1])) { 2436 wait_log_commit(trans, root, root->log_transid); 2437 mutex_unlock(&root->log_mutex); 2438 return 0; 2439 } 2440 atomic_set(&root->log_commit[index1], 1); 2441 2442 /* wait for previous tree log sync to complete */ 2443 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 2444 wait_log_commit(trans, root, root->log_transid - 1); 2445 while (1) { 2446 int batch = atomic_read(&root->log_batch); 2447 /* when we're on an ssd, just kick the log commit out */ 2448 if (!btrfs_test_opt(root, SSD) && root->log_multiple_pids) { 2449 mutex_unlock(&root->log_mutex); 2450 schedule_timeout_uninterruptible(1); 2451 mutex_lock(&root->log_mutex); 2452 } 2453 wait_for_writer(trans, root); 2454 if (batch == atomic_read(&root->log_batch)) 2455 break; 2456 } 2457 2458 /* bail out if we need to do a full commit */ 2459 if (root->fs_info->last_trans_log_full_commit == trans->transid) { 2460 ret = -EAGAIN; 2461 btrfs_free_logged_extents(log, log_transid); 2462 mutex_unlock(&root->log_mutex); 2463 goto out; 2464 } 2465 2466 if (log_transid % 2 == 0) 2467 mark = EXTENT_DIRTY; 2468 else 2469 mark = EXTENT_NEW; 2470 2471 /* we start IO on all the marked extents here, but we don't actually 2472 * wait for them until later. 2473 */ 2474 blk_start_plug(&plug); 2475 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark); 2476 if (ret) { 2477 blk_finish_plug(&plug); 2478 btrfs_abort_transaction(trans, root, ret); 2479 btrfs_free_logged_extents(log, log_transid); 2480 mutex_unlock(&root->log_mutex); 2481 goto out; 2482 } 2483 2484 btrfs_set_root_node(&log->root_item, log->node); 2485 2486 root->log_transid++; 2487 log->log_transid = root->log_transid; 2488 root->log_start_pid = 0; 2489 smp_mb(); 2490 /* 2491 * IO has been started, blocks of the log tree have WRITTEN flag set 2492 * in their headers. new modifications of the log will be written to 2493 * new positions. so it's safe to allow log writers to go in. 2494 */ 2495 mutex_unlock(&root->log_mutex); 2496 2497 mutex_lock(&log_root_tree->log_mutex); 2498 atomic_inc(&log_root_tree->log_batch); 2499 atomic_inc(&log_root_tree->log_writers); 2500 mutex_unlock(&log_root_tree->log_mutex); 2501 2502 ret = update_log_root(trans, log); 2503 2504 mutex_lock(&log_root_tree->log_mutex); 2505 if (atomic_dec_and_test(&log_root_tree->log_writers)) { 2506 smp_mb(); 2507 if (waitqueue_active(&log_root_tree->log_writer_wait)) 2508 wake_up(&log_root_tree->log_writer_wait); 2509 } 2510 2511 if (ret) { 2512 blk_finish_plug(&plug); 2513 if (ret != -ENOSPC) { 2514 btrfs_abort_transaction(trans, root, ret); 2515 mutex_unlock(&log_root_tree->log_mutex); 2516 goto out; 2517 } 2518 root->fs_info->last_trans_log_full_commit = trans->transid; 2519 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2520 btrfs_free_logged_extents(log, log_transid); 2521 mutex_unlock(&log_root_tree->log_mutex); 2522 ret = -EAGAIN; 2523 goto out; 2524 } 2525 2526 index2 = log_root_tree->log_transid % 2; 2527 if (atomic_read(&log_root_tree->log_commit[index2])) { 2528 blk_finish_plug(&plug); 2529 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2530 wait_log_commit(trans, log_root_tree, 2531 log_root_tree->log_transid); 2532 btrfs_free_logged_extents(log, log_transid); 2533 mutex_unlock(&log_root_tree->log_mutex); 2534 ret = 0; 2535 goto out; 2536 } 2537 atomic_set(&log_root_tree->log_commit[index2], 1); 2538 2539 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { 2540 wait_log_commit(trans, log_root_tree, 2541 log_root_tree->log_transid - 1); 2542 } 2543 2544 wait_for_writer(trans, log_root_tree); 2545 2546 /* 2547 * now that we've moved on to the tree of log tree roots, 2548 * check the full commit flag again 2549 */ 2550 if (root->fs_info->last_trans_log_full_commit == trans->transid) { 2551 blk_finish_plug(&plug); 2552 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2553 btrfs_free_logged_extents(log, log_transid); 2554 mutex_unlock(&log_root_tree->log_mutex); 2555 ret = -EAGAIN; 2556 goto out_wake_log_root; 2557 } 2558 2559 ret = btrfs_write_marked_extents(log_root_tree, 2560 &log_root_tree->dirty_log_pages, 2561 EXTENT_DIRTY | EXTENT_NEW); 2562 blk_finish_plug(&plug); 2563 if (ret) { 2564 btrfs_abort_transaction(trans, root, ret); 2565 btrfs_free_logged_extents(log, log_transid); 2566 mutex_unlock(&log_root_tree->log_mutex); 2567 goto out_wake_log_root; 2568 } 2569 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2570 btrfs_wait_marked_extents(log_root_tree, 2571 &log_root_tree->dirty_log_pages, 2572 EXTENT_NEW | EXTENT_DIRTY); 2573 btrfs_wait_logged_extents(log, log_transid); 2574 2575 btrfs_set_super_log_root(root->fs_info->super_for_commit, 2576 log_root_tree->node->start); 2577 btrfs_set_super_log_root_level(root->fs_info->super_for_commit, 2578 btrfs_header_level(log_root_tree->node)); 2579 2580 log_root_tree->log_transid++; 2581 smp_mb(); 2582 2583 mutex_unlock(&log_root_tree->log_mutex); 2584 2585 /* 2586 * nobody else is going to jump in and write the the ctree 2587 * super here because the log_commit atomic below is protecting 2588 * us. We must be called with a transaction handle pinning 2589 * the running transaction open, so a full commit can't hop 2590 * in and cause problems either. 2591 */ 2592 ret = write_ctree_super(trans, root->fs_info->tree_root, 1); 2593 if (ret) { 2594 btrfs_abort_transaction(trans, root, ret); 2595 goto out_wake_log_root; 2596 } 2597 2598 mutex_lock(&root->log_mutex); 2599 if (root->last_log_commit < log_transid) 2600 root->last_log_commit = log_transid; 2601 mutex_unlock(&root->log_mutex); 2602 2603 out_wake_log_root: 2604 atomic_set(&log_root_tree->log_commit[index2], 0); 2605 smp_mb(); 2606 if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) 2607 wake_up(&log_root_tree->log_commit_wait[index2]); 2608 out: 2609 atomic_set(&root->log_commit[index1], 0); 2610 smp_mb(); 2611 if (waitqueue_active(&root->log_commit_wait[index1])) 2612 wake_up(&root->log_commit_wait[index1]); 2613 return ret; 2614 } 2615 2616 static void free_log_tree(struct btrfs_trans_handle *trans, 2617 struct btrfs_root *log) 2618 { 2619 int ret; 2620 u64 start; 2621 u64 end; 2622 struct walk_control wc = { 2623 .free = 1, 2624 .process_func = process_one_buffer 2625 }; 2626 2627 ret = walk_log_tree(trans, log, &wc); 2628 /* I don't think this can happen but just in case */ 2629 if (ret) 2630 btrfs_abort_transaction(trans, log, ret); 2631 2632 while (1) { 2633 ret = find_first_extent_bit(&log->dirty_log_pages, 2634 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW, 2635 NULL); 2636 if (ret) 2637 break; 2638 2639 clear_extent_bits(&log->dirty_log_pages, start, end, 2640 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); 2641 } 2642 2643 /* 2644 * We may have short-circuited the log tree with the full commit logic 2645 * and left ordered extents on our list, so clear these out to keep us 2646 * from leaking inodes and memory. 2647 */ 2648 btrfs_free_logged_extents(log, 0); 2649 btrfs_free_logged_extents(log, 1); 2650 2651 free_extent_buffer(log->node); 2652 kfree(log); 2653 } 2654 2655 /* 2656 * free all the extents used by the tree log. This should be called 2657 * at commit time of the full transaction 2658 */ 2659 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) 2660 { 2661 if (root->log_root) { 2662 free_log_tree(trans, root->log_root); 2663 root->log_root = NULL; 2664 } 2665 return 0; 2666 } 2667 2668 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, 2669 struct btrfs_fs_info *fs_info) 2670 { 2671 if (fs_info->log_root_tree) { 2672 free_log_tree(trans, fs_info->log_root_tree); 2673 fs_info->log_root_tree = NULL; 2674 } 2675 return 0; 2676 } 2677 2678 /* 2679 * If both a file and directory are logged, and unlinks or renames are 2680 * mixed in, we have a few interesting corners: 2681 * 2682 * create file X in dir Y 2683 * link file X to X.link in dir Y 2684 * fsync file X 2685 * unlink file X but leave X.link 2686 * fsync dir Y 2687 * 2688 * After a crash we would expect only X.link to exist. But file X 2689 * didn't get fsync'd again so the log has back refs for X and X.link. 2690 * 2691 * We solve this by removing directory entries and inode backrefs from the 2692 * log when a file that was logged in the current transaction is 2693 * unlinked. Any later fsync will include the updated log entries, and 2694 * we'll be able to reconstruct the proper directory items from backrefs. 2695 * 2696 * This optimizations allows us to avoid relogging the entire inode 2697 * or the entire directory. 2698 */ 2699 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, 2700 struct btrfs_root *root, 2701 const char *name, int name_len, 2702 struct inode *dir, u64 index) 2703 { 2704 struct btrfs_root *log; 2705 struct btrfs_dir_item *di; 2706 struct btrfs_path *path; 2707 int ret; 2708 int err = 0; 2709 int bytes_del = 0; 2710 u64 dir_ino = btrfs_ino(dir); 2711 2712 if (BTRFS_I(dir)->logged_trans < trans->transid) 2713 return 0; 2714 2715 ret = join_running_log_trans(root); 2716 if (ret) 2717 return 0; 2718 2719 mutex_lock(&BTRFS_I(dir)->log_mutex); 2720 2721 log = root->log_root; 2722 path = btrfs_alloc_path(); 2723 if (!path) { 2724 err = -ENOMEM; 2725 goto out_unlock; 2726 } 2727 2728 di = btrfs_lookup_dir_item(trans, log, path, dir_ino, 2729 name, name_len, -1); 2730 if (IS_ERR(di)) { 2731 err = PTR_ERR(di); 2732 goto fail; 2733 } 2734 if (di) { 2735 ret = btrfs_delete_one_dir_name(trans, log, path, di); 2736 bytes_del += name_len; 2737 if (ret) { 2738 err = ret; 2739 goto fail; 2740 } 2741 } 2742 btrfs_release_path(path); 2743 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, 2744 index, name, name_len, -1); 2745 if (IS_ERR(di)) { 2746 err = PTR_ERR(di); 2747 goto fail; 2748 } 2749 if (di) { 2750 ret = btrfs_delete_one_dir_name(trans, log, path, di); 2751 bytes_del += name_len; 2752 if (ret) { 2753 err = ret; 2754 goto fail; 2755 } 2756 } 2757 2758 /* update the directory size in the log to reflect the names 2759 * we have removed 2760 */ 2761 if (bytes_del) { 2762 struct btrfs_key key; 2763 2764 key.objectid = dir_ino; 2765 key.offset = 0; 2766 key.type = BTRFS_INODE_ITEM_KEY; 2767 btrfs_release_path(path); 2768 2769 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); 2770 if (ret < 0) { 2771 err = ret; 2772 goto fail; 2773 } 2774 if (ret == 0) { 2775 struct btrfs_inode_item *item; 2776 u64 i_size; 2777 2778 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2779 struct btrfs_inode_item); 2780 i_size = btrfs_inode_size(path->nodes[0], item); 2781 if (i_size > bytes_del) 2782 i_size -= bytes_del; 2783 else 2784 i_size = 0; 2785 btrfs_set_inode_size(path->nodes[0], item, i_size); 2786 btrfs_mark_buffer_dirty(path->nodes[0]); 2787 } else 2788 ret = 0; 2789 btrfs_release_path(path); 2790 } 2791 fail: 2792 btrfs_free_path(path); 2793 out_unlock: 2794 mutex_unlock(&BTRFS_I(dir)->log_mutex); 2795 if (ret == -ENOSPC) { 2796 root->fs_info->last_trans_log_full_commit = trans->transid; 2797 ret = 0; 2798 } else if (ret < 0) 2799 btrfs_abort_transaction(trans, root, ret); 2800 2801 btrfs_end_log_trans(root); 2802 2803 return err; 2804 } 2805 2806 /* see comments for btrfs_del_dir_entries_in_log */ 2807 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, 2808 struct btrfs_root *root, 2809 const char *name, int name_len, 2810 struct inode *inode, u64 dirid) 2811 { 2812 struct btrfs_root *log; 2813 u64 index; 2814 int ret; 2815 2816 if (BTRFS_I(inode)->logged_trans < trans->transid) 2817 return 0; 2818 2819 ret = join_running_log_trans(root); 2820 if (ret) 2821 return 0; 2822 log = root->log_root; 2823 mutex_lock(&BTRFS_I(inode)->log_mutex); 2824 2825 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), 2826 dirid, &index); 2827 mutex_unlock(&BTRFS_I(inode)->log_mutex); 2828 if (ret == -ENOSPC) { 2829 root->fs_info->last_trans_log_full_commit = trans->transid; 2830 ret = 0; 2831 } else if (ret < 0 && ret != -ENOENT) 2832 btrfs_abort_transaction(trans, root, ret); 2833 btrfs_end_log_trans(root); 2834 2835 return ret; 2836 } 2837 2838 /* 2839 * creates a range item in the log for 'dirid'. first_offset and 2840 * last_offset tell us which parts of the key space the log should 2841 * be considered authoritative for. 2842 */ 2843 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, 2844 struct btrfs_root *log, 2845 struct btrfs_path *path, 2846 int key_type, u64 dirid, 2847 u64 first_offset, u64 last_offset) 2848 { 2849 int ret; 2850 struct btrfs_key key; 2851 struct btrfs_dir_log_item *item; 2852 2853 key.objectid = dirid; 2854 key.offset = first_offset; 2855 if (key_type == BTRFS_DIR_ITEM_KEY) 2856 key.type = BTRFS_DIR_LOG_ITEM_KEY; 2857 else 2858 key.type = BTRFS_DIR_LOG_INDEX_KEY; 2859 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 2860 if (ret) 2861 return ret; 2862 2863 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2864 struct btrfs_dir_log_item); 2865 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 2866 btrfs_mark_buffer_dirty(path->nodes[0]); 2867 btrfs_release_path(path); 2868 return 0; 2869 } 2870 2871 /* 2872 * log all the items included in the current transaction for a given 2873 * directory. This also creates the range items in the log tree required 2874 * to replay anything deleted before the fsync 2875 */ 2876 static noinline int log_dir_items(struct btrfs_trans_handle *trans, 2877 struct btrfs_root *root, struct inode *inode, 2878 struct btrfs_path *path, 2879 struct btrfs_path *dst_path, int key_type, 2880 u64 min_offset, u64 *last_offset_ret) 2881 { 2882 struct btrfs_key min_key; 2883 struct btrfs_root *log = root->log_root; 2884 struct extent_buffer *src; 2885 int err = 0; 2886 int ret; 2887 int i; 2888 int nritems; 2889 u64 first_offset = min_offset; 2890 u64 last_offset = (u64)-1; 2891 u64 ino = btrfs_ino(inode); 2892 2893 log = root->log_root; 2894 2895 min_key.objectid = ino; 2896 min_key.type = key_type; 2897 min_key.offset = min_offset; 2898 2899 path->keep_locks = 1; 2900 2901 ret = btrfs_search_forward(root, &min_key, path, trans->transid); 2902 2903 /* 2904 * we didn't find anything from this transaction, see if there 2905 * is anything at all 2906 */ 2907 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { 2908 min_key.objectid = ino; 2909 min_key.type = key_type; 2910 min_key.offset = (u64)-1; 2911 btrfs_release_path(path); 2912 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 2913 if (ret < 0) { 2914 btrfs_release_path(path); 2915 return ret; 2916 } 2917 ret = btrfs_previous_item(root, path, ino, key_type); 2918 2919 /* if ret == 0 there are items for this type, 2920 * create a range to tell us the last key of this type. 2921 * otherwise, there are no items in this directory after 2922 * *min_offset, and we create a range to indicate that. 2923 */ 2924 if (ret == 0) { 2925 struct btrfs_key tmp; 2926 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 2927 path->slots[0]); 2928 if (key_type == tmp.type) 2929 first_offset = max(min_offset, tmp.offset) + 1; 2930 } 2931 goto done; 2932 } 2933 2934 /* go backward to find any previous key */ 2935 ret = btrfs_previous_item(root, path, ino, key_type); 2936 if (ret == 0) { 2937 struct btrfs_key tmp; 2938 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 2939 if (key_type == tmp.type) { 2940 first_offset = tmp.offset; 2941 ret = overwrite_item(trans, log, dst_path, 2942 path->nodes[0], path->slots[0], 2943 &tmp); 2944 if (ret) { 2945 err = ret; 2946 goto done; 2947 } 2948 } 2949 } 2950 btrfs_release_path(path); 2951 2952 /* find the first key from this transaction again */ 2953 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 2954 if (WARN_ON(ret != 0)) 2955 goto done; 2956 2957 /* 2958 * we have a block from this transaction, log every item in it 2959 * from our directory 2960 */ 2961 while (1) { 2962 struct btrfs_key tmp; 2963 src = path->nodes[0]; 2964 nritems = btrfs_header_nritems(src); 2965 for (i = path->slots[0]; i < nritems; i++) { 2966 btrfs_item_key_to_cpu(src, &min_key, i); 2967 2968 if (min_key.objectid != ino || min_key.type != key_type) 2969 goto done; 2970 ret = overwrite_item(trans, log, dst_path, src, i, 2971 &min_key); 2972 if (ret) { 2973 err = ret; 2974 goto done; 2975 } 2976 } 2977 path->slots[0] = nritems; 2978 2979 /* 2980 * look ahead to the next item and see if it is also 2981 * from this directory and from this transaction 2982 */ 2983 ret = btrfs_next_leaf(root, path); 2984 if (ret == 1) { 2985 last_offset = (u64)-1; 2986 goto done; 2987 } 2988 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 2989 if (tmp.objectid != ino || tmp.type != key_type) { 2990 last_offset = (u64)-1; 2991 goto done; 2992 } 2993 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 2994 ret = overwrite_item(trans, log, dst_path, 2995 path->nodes[0], path->slots[0], 2996 &tmp); 2997 if (ret) 2998 err = ret; 2999 else 3000 last_offset = tmp.offset; 3001 goto done; 3002 } 3003 } 3004 done: 3005 btrfs_release_path(path); 3006 btrfs_release_path(dst_path); 3007 3008 if (err == 0) { 3009 *last_offset_ret = last_offset; 3010 /* 3011 * insert the log range keys to indicate where the log 3012 * is valid 3013 */ 3014 ret = insert_dir_log_key(trans, log, path, key_type, 3015 ino, first_offset, last_offset); 3016 if (ret) 3017 err = ret; 3018 } 3019 return err; 3020 } 3021 3022 /* 3023 * logging directories is very similar to logging inodes, We find all the items 3024 * from the current transaction and write them to the log. 3025 * 3026 * The recovery code scans the directory in the subvolume, and if it finds a 3027 * key in the range logged that is not present in the log tree, then it means 3028 * that dir entry was unlinked during the transaction. 3029 * 3030 * In order for that scan to work, we must include one key smaller than 3031 * the smallest logged by this transaction and one key larger than the largest 3032 * key logged by this transaction. 3033 */ 3034 static noinline int log_directory_changes(struct btrfs_trans_handle *trans, 3035 struct btrfs_root *root, struct inode *inode, 3036 struct btrfs_path *path, 3037 struct btrfs_path *dst_path) 3038 { 3039 u64 min_key; 3040 u64 max_key; 3041 int ret; 3042 int key_type = BTRFS_DIR_ITEM_KEY; 3043 3044 again: 3045 min_key = 0; 3046 max_key = 0; 3047 while (1) { 3048 ret = log_dir_items(trans, root, inode, path, 3049 dst_path, key_type, min_key, 3050 &max_key); 3051 if (ret) 3052 return ret; 3053 if (max_key == (u64)-1) 3054 break; 3055 min_key = max_key + 1; 3056 } 3057 3058 if (key_type == BTRFS_DIR_ITEM_KEY) { 3059 key_type = BTRFS_DIR_INDEX_KEY; 3060 goto again; 3061 } 3062 return 0; 3063 } 3064 3065 /* 3066 * a helper function to drop items from the log before we relog an 3067 * inode. max_key_type indicates the highest item type to remove. 3068 * This cannot be run for file data extents because it does not 3069 * free the extents they point to. 3070 */ 3071 static int drop_objectid_items(struct btrfs_trans_handle *trans, 3072 struct btrfs_root *log, 3073 struct btrfs_path *path, 3074 u64 objectid, int max_key_type) 3075 { 3076 int ret; 3077 struct btrfs_key key; 3078 struct btrfs_key found_key; 3079 int start_slot; 3080 3081 key.objectid = objectid; 3082 key.type = max_key_type; 3083 key.offset = (u64)-1; 3084 3085 while (1) { 3086 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 3087 BUG_ON(ret == 0); /* Logic error */ 3088 if (ret < 0) 3089 break; 3090 3091 if (path->slots[0] == 0) 3092 break; 3093 3094 path->slots[0]--; 3095 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 3096 path->slots[0]); 3097 3098 if (found_key.objectid != objectid) 3099 break; 3100 3101 found_key.offset = 0; 3102 found_key.type = 0; 3103 ret = btrfs_bin_search(path->nodes[0], &found_key, 0, 3104 &start_slot); 3105 3106 ret = btrfs_del_items(trans, log, path, start_slot, 3107 path->slots[0] - start_slot + 1); 3108 /* 3109 * If start slot isn't 0 then we don't need to re-search, we've 3110 * found the last guy with the objectid in this tree. 3111 */ 3112 if (ret || start_slot != 0) 3113 break; 3114 btrfs_release_path(path); 3115 } 3116 btrfs_release_path(path); 3117 if (ret > 0) 3118 ret = 0; 3119 return ret; 3120 } 3121 3122 static void fill_inode_item(struct btrfs_trans_handle *trans, 3123 struct extent_buffer *leaf, 3124 struct btrfs_inode_item *item, 3125 struct inode *inode, int log_inode_only) 3126 { 3127 struct btrfs_map_token token; 3128 3129 btrfs_init_map_token(&token); 3130 3131 if (log_inode_only) { 3132 /* set the generation to zero so the recover code 3133 * can tell the difference between an logging 3134 * just to say 'this inode exists' and a logging 3135 * to say 'update this inode with these values' 3136 */ 3137 btrfs_set_token_inode_generation(leaf, item, 0, &token); 3138 btrfs_set_token_inode_size(leaf, item, 0, &token); 3139 } else { 3140 btrfs_set_token_inode_generation(leaf, item, 3141 BTRFS_I(inode)->generation, 3142 &token); 3143 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token); 3144 } 3145 3146 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3147 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3148 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3149 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3150 3151 btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item), 3152 inode->i_atime.tv_sec, &token); 3153 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item), 3154 inode->i_atime.tv_nsec, &token); 3155 3156 btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item), 3157 inode->i_mtime.tv_sec, &token); 3158 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item), 3159 inode->i_mtime.tv_nsec, &token); 3160 3161 btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item), 3162 inode->i_ctime.tv_sec, &token); 3163 btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item), 3164 inode->i_ctime.tv_nsec, &token); 3165 3166 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3167 &token); 3168 3169 btrfs_set_token_inode_sequence(leaf, item, inode->i_version, &token); 3170 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3171 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3172 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3173 btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3174 } 3175 3176 static int log_inode_item(struct btrfs_trans_handle *trans, 3177 struct btrfs_root *log, struct btrfs_path *path, 3178 struct inode *inode) 3179 { 3180 struct btrfs_inode_item *inode_item; 3181 int ret; 3182 3183 ret = btrfs_insert_empty_item(trans, log, path, 3184 &BTRFS_I(inode)->location, 3185 sizeof(*inode_item)); 3186 if (ret && ret != -EEXIST) 3187 return ret; 3188 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3189 struct btrfs_inode_item); 3190 fill_inode_item(trans, path->nodes[0], inode_item, inode, 0); 3191 btrfs_release_path(path); 3192 return 0; 3193 } 3194 3195 static noinline int copy_items(struct btrfs_trans_handle *trans, 3196 struct inode *inode, 3197 struct btrfs_path *dst_path, 3198 struct btrfs_path *src_path, u64 *last_extent, 3199 int start_slot, int nr, int inode_only) 3200 { 3201 unsigned long src_offset; 3202 unsigned long dst_offset; 3203 struct btrfs_root *log = BTRFS_I(inode)->root->log_root; 3204 struct btrfs_file_extent_item *extent; 3205 struct btrfs_inode_item *inode_item; 3206 struct extent_buffer *src = src_path->nodes[0]; 3207 struct btrfs_key first_key, last_key, key; 3208 int ret; 3209 struct btrfs_key *ins_keys; 3210 u32 *ins_sizes; 3211 char *ins_data; 3212 int i; 3213 struct list_head ordered_sums; 3214 int skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 3215 bool has_extents = false; 3216 bool need_find_last_extent = (*last_extent == 0); 3217 bool done = false; 3218 3219 INIT_LIST_HEAD(&ordered_sums); 3220 3221 ins_data = kmalloc(nr * sizeof(struct btrfs_key) + 3222 nr * sizeof(u32), GFP_NOFS); 3223 if (!ins_data) 3224 return -ENOMEM; 3225 3226 first_key.objectid = (u64)-1; 3227 3228 ins_sizes = (u32 *)ins_data; 3229 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); 3230 3231 for (i = 0; i < nr; i++) { 3232 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); 3233 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); 3234 } 3235 ret = btrfs_insert_empty_items(trans, log, dst_path, 3236 ins_keys, ins_sizes, nr); 3237 if (ret) { 3238 kfree(ins_data); 3239 return ret; 3240 } 3241 3242 for (i = 0; i < nr; i++, dst_path->slots[0]++) { 3243 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], 3244 dst_path->slots[0]); 3245 3246 src_offset = btrfs_item_ptr_offset(src, start_slot + i); 3247 3248 if ((i == (nr - 1))) 3249 last_key = ins_keys[i]; 3250 3251 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { 3252 inode_item = btrfs_item_ptr(dst_path->nodes[0], 3253 dst_path->slots[0], 3254 struct btrfs_inode_item); 3255 fill_inode_item(trans, dst_path->nodes[0], inode_item, 3256 inode, inode_only == LOG_INODE_EXISTS); 3257 } else { 3258 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 3259 src_offset, ins_sizes[i]); 3260 } 3261 3262 /* 3263 * We set need_find_last_extent here in case we know we were 3264 * processing other items and then walk into the first extent in 3265 * the inode. If we don't hit an extent then nothing changes, 3266 * we'll do the last search the next time around. 3267 */ 3268 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) { 3269 has_extents = true; 3270 if (need_find_last_extent && 3271 first_key.objectid == (u64)-1) 3272 first_key = ins_keys[i]; 3273 } else { 3274 need_find_last_extent = false; 3275 } 3276 3277 /* take a reference on file data extents so that truncates 3278 * or deletes of this inode don't have to relog the inode 3279 * again 3280 */ 3281 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY && 3282 !skip_csum) { 3283 int found_type; 3284 extent = btrfs_item_ptr(src, start_slot + i, 3285 struct btrfs_file_extent_item); 3286 3287 if (btrfs_file_extent_generation(src, extent) < trans->transid) 3288 continue; 3289 3290 found_type = btrfs_file_extent_type(src, extent); 3291 if (found_type == BTRFS_FILE_EXTENT_REG) { 3292 u64 ds, dl, cs, cl; 3293 ds = btrfs_file_extent_disk_bytenr(src, 3294 extent); 3295 /* ds == 0 is a hole */ 3296 if (ds == 0) 3297 continue; 3298 3299 dl = btrfs_file_extent_disk_num_bytes(src, 3300 extent); 3301 cs = btrfs_file_extent_offset(src, extent); 3302 cl = btrfs_file_extent_num_bytes(src, 3303 extent); 3304 if (btrfs_file_extent_compression(src, 3305 extent)) { 3306 cs = 0; 3307 cl = dl; 3308 } 3309 3310 ret = btrfs_lookup_csums_range( 3311 log->fs_info->csum_root, 3312 ds + cs, ds + cs + cl - 1, 3313 &ordered_sums, 0); 3314 if (ret) { 3315 btrfs_release_path(dst_path); 3316 kfree(ins_data); 3317 return ret; 3318 } 3319 } 3320 } 3321 } 3322 3323 btrfs_mark_buffer_dirty(dst_path->nodes[0]); 3324 btrfs_release_path(dst_path); 3325 kfree(ins_data); 3326 3327 /* 3328 * we have to do this after the loop above to avoid changing the 3329 * log tree while trying to change the log tree. 3330 */ 3331 ret = 0; 3332 while (!list_empty(&ordered_sums)) { 3333 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 3334 struct btrfs_ordered_sum, 3335 list); 3336 if (!ret) 3337 ret = btrfs_csum_file_blocks(trans, log, sums); 3338 list_del(&sums->list); 3339 kfree(sums); 3340 } 3341 3342 if (!has_extents) 3343 return ret; 3344 3345 /* 3346 * Because we use btrfs_search_forward we could skip leaves that were 3347 * not modified and then assume *last_extent is valid when it really 3348 * isn't. So back up to the previous leaf and read the end of the last 3349 * extent before we go and fill in holes. 3350 */ 3351 if (need_find_last_extent) { 3352 u64 len; 3353 3354 ret = btrfs_prev_leaf(BTRFS_I(inode)->root, src_path); 3355 if (ret < 0) 3356 return ret; 3357 if (ret) 3358 goto fill_holes; 3359 if (src_path->slots[0]) 3360 src_path->slots[0]--; 3361 src = src_path->nodes[0]; 3362 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]); 3363 if (key.objectid != btrfs_ino(inode) || 3364 key.type != BTRFS_EXTENT_DATA_KEY) 3365 goto fill_holes; 3366 extent = btrfs_item_ptr(src, src_path->slots[0], 3367 struct btrfs_file_extent_item); 3368 if (btrfs_file_extent_type(src, extent) == 3369 BTRFS_FILE_EXTENT_INLINE) { 3370 len = btrfs_file_extent_inline_len(src, 3371 src_path->slots[0], 3372 extent); 3373 *last_extent = ALIGN(key.offset + len, 3374 log->sectorsize); 3375 } else { 3376 len = btrfs_file_extent_num_bytes(src, extent); 3377 *last_extent = key.offset + len; 3378 } 3379 } 3380 fill_holes: 3381 /* So we did prev_leaf, now we need to move to the next leaf, but a few 3382 * things could have happened 3383 * 3384 * 1) A merge could have happened, so we could currently be on a leaf 3385 * that holds what we were copying in the first place. 3386 * 2) A split could have happened, and now not all of the items we want 3387 * are on the same leaf. 3388 * 3389 * So we need to adjust how we search for holes, we need to drop the 3390 * path and re-search for the first extent key we found, and then walk 3391 * forward until we hit the last one we copied. 3392 */ 3393 if (need_find_last_extent) { 3394 /* btrfs_prev_leaf could return 1 without releasing the path */ 3395 btrfs_release_path(src_path); 3396 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &first_key, 3397 src_path, 0, 0); 3398 if (ret < 0) 3399 return ret; 3400 ASSERT(ret == 0); 3401 src = src_path->nodes[0]; 3402 i = src_path->slots[0]; 3403 } else { 3404 i = start_slot; 3405 } 3406 3407 /* 3408 * Ok so here we need to go through and fill in any holes we may have 3409 * to make sure that holes are punched for those areas in case they had 3410 * extents previously. 3411 */ 3412 while (!done) { 3413 u64 offset, len; 3414 u64 extent_end; 3415 3416 if (i >= btrfs_header_nritems(src_path->nodes[0])) { 3417 ret = btrfs_next_leaf(BTRFS_I(inode)->root, src_path); 3418 if (ret < 0) 3419 return ret; 3420 ASSERT(ret == 0); 3421 src = src_path->nodes[0]; 3422 i = 0; 3423 } 3424 3425 btrfs_item_key_to_cpu(src, &key, i); 3426 if (!btrfs_comp_cpu_keys(&key, &last_key)) 3427 done = true; 3428 if (key.objectid != btrfs_ino(inode) || 3429 key.type != BTRFS_EXTENT_DATA_KEY) { 3430 i++; 3431 continue; 3432 } 3433 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item); 3434 if (btrfs_file_extent_type(src, extent) == 3435 BTRFS_FILE_EXTENT_INLINE) { 3436 len = btrfs_file_extent_inline_len(src, i, extent); 3437 extent_end = ALIGN(key.offset + len, log->sectorsize); 3438 } else { 3439 len = btrfs_file_extent_num_bytes(src, extent); 3440 extent_end = key.offset + len; 3441 } 3442 i++; 3443 3444 if (*last_extent == key.offset) { 3445 *last_extent = extent_end; 3446 continue; 3447 } 3448 offset = *last_extent; 3449 len = key.offset - *last_extent; 3450 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode), 3451 offset, 0, 0, len, 0, len, 0, 3452 0, 0); 3453 if (ret) 3454 break; 3455 *last_extent = offset + len; 3456 } 3457 /* 3458 * Need to let the callers know we dropped the path so they should 3459 * re-search. 3460 */ 3461 if (!ret && need_find_last_extent) 3462 ret = 1; 3463 return ret; 3464 } 3465 3466 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b) 3467 { 3468 struct extent_map *em1, *em2; 3469 3470 em1 = list_entry(a, struct extent_map, list); 3471 em2 = list_entry(b, struct extent_map, list); 3472 3473 if (em1->start < em2->start) 3474 return -1; 3475 else if (em1->start > em2->start) 3476 return 1; 3477 return 0; 3478 } 3479 3480 static int log_one_extent(struct btrfs_trans_handle *trans, 3481 struct inode *inode, struct btrfs_root *root, 3482 struct extent_map *em, struct btrfs_path *path) 3483 { 3484 struct btrfs_root *log = root->log_root; 3485 struct btrfs_file_extent_item *fi; 3486 struct extent_buffer *leaf; 3487 struct btrfs_ordered_extent *ordered; 3488 struct list_head ordered_sums; 3489 struct btrfs_map_token token; 3490 struct btrfs_key key; 3491 u64 mod_start = em->mod_start; 3492 u64 mod_len = em->mod_len; 3493 u64 csum_offset; 3494 u64 csum_len; 3495 u64 extent_offset = em->start - em->orig_start; 3496 u64 block_len; 3497 int ret; 3498 int index = log->log_transid % 2; 3499 bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; 3500 int extent_inserted = 0; 3501 3502 INIT_LIST_HEAD(&ordered_sums); 3503 btrfs_init_map_token(&token); 3504 3505 ret = __btrfs_drop_extents(trans, log, inode, path, em->start, 3506 em->start + em->len, NULL, 0, 1, 3507 sizeof(*fi), &extent_inserted); 3508 if (ret) 3509 return ret; 3510 3511 if (!extent_inserted) { 3512 key.objectid = btrfs_ino(inode); 3513 key.type = BTRFS_EXTENT_DATA_KEY; 3514 key.offset = em->start; 3515 3516 ret = btrfs_insert_empty_item(trans, log, path, &key, 3517 sizeof(*fi)); 3518 if (ret) 3519 return ret; 3520 } 3521 leaf = path->nodes[0]; 3522 fi = btrfs_item_ptr(leaf, path->slots[0], 3523 struct btrfs_file_extent_item); 3524 3525 btrfs_set_token_file_extent_generation(leaf, fi, em->generation, 3526 &token); 3527 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { 3528 skip_csum = true; 3529 btrfs_set_token_file_extent_type(leaf, fi, 3530 BTRFS_FILE_EXTENT_PREALLOC, 3531 &token); 3532 } else { 3533 btrfs_set_token_file_extent_type(leaf, fi, 3534 BTRFS_FILE_EXTENT_REG, 3535 &token); 3536 if (em->block_start == EXTENT_MAP_HOLE) 3537 skip_csum = true; 3538 } 3539 3540 block_len = max(em->block_len, em->orig_block_len); 3541 if (em->compress_type != BTRFS_COMPRESS_NONE) { 3542 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 3543 em->block_start, 3544 &token); 3545 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 3546 &token); 3547 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { 3548 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 3549 em->block_start - 3550 extent_offset, &token); 3551 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 3552 &token); 3553 } else { 3554 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token); 3555 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0, 3556 &token); 3557 } 3558 3559 btrfs_set_token_file_extent_offset(leaf, fi, 3560 em->start - em->orig_start, 3561 &token); 3562 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token); 3563 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token); 3564 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type, 3565 &token); 3566 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token); 3567 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token); 3568 btrfs_mark_buffer_dirty(leaf); 3569 3570 btrfs_release_path(path); 3571 if (ret) { 3572 return ret; 3573 } 3574 3575 if (skip_csum) 3576 return 0; 3577 3578 /* 3579 * First check and see if our csums are on our outstanding ordered 3580 * extents. 3581 */ 3582 again: 3583 spin_lock_irq(&log->log_extents_lock[index]); 3584 list_for_each_entry(ordered, &log->logged_list[index], log_list) { 3585 struct btrfs_ordered_sum *sum; 3586 3587 if (!mod_len) 3588 break; 3589 3590 if (ordered->inode != inode) 3591 continue; 3592 3593 if (ordered->file_offset + ordered->len <= mod_start || 3594 mod_start + mod_len <= ordered->file_offset) 3595 continue; 3596 3597 /* 3598 * We are going to copy all the csums on this ordered extent, so 3599 * go ahead and adjust mod_start and mod_len in case this 3600 * ordered extent has already been logged. 3601 */ 3602 if (ordered->file_offset > mod_start) { 3603 if (ordered->file_offset + ordered->len >= 3604 mod_start + mod_len) 3605 mod_len = ordered->file_offset - mod_start; 3606 /* 3607 * If we have this case 3608 * 3609 * |--------- logged extent ---------| 3610 * |----- ordered extent ----| 3611 * 3612 * Just don't mess with mod_start and mod_len, we'll 3613 * just end up logging more csums than we need and it 3614 * will be ok. 3615 */ 3616 } else { 3617 if (ordered->file_offset + ordered->len < 3618 mod_start + mod_len) { 3619 mod_len = (mod_start + mod_len) - 3620 (ordered->file_offset + ordered->len); 3621 mod_start = ordered->file_offset + 3622 ordered->len; 3623 } else { 3624 mod_len = 0; 3625 } 3626 } 3627 3628 /* 3629 * To keep us from looping for the above case of an ordered 3630 * extent that falls inside of the logged extent. 3631 */ 3632 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, 3633 &ordered->flags)) 3634 continue; 3635 atomic_inc(&ordered->refs); 3636 spin_unlock_irq(&log->log_extents_lock[index]); 3637 /* 3638 * we've dropped the lock, we must either break or 3639 * start over after this. 3640 */ 3641 3642 if (ordered->csum_bytes_left) { 3643 btrfs_start_ordered_extent(inode, ordered, 0); 3644 wait_event(ordered->wait, 3645 ordered->csum_bytes_left == 0); 3646 } 3647 3648 list_for_each_entry(sum, &ordered->list, list) { 3649 ret = btrfs_csum_file_blocks(trans, log, sum); 3650 if (ret) { 3651 btrfs_put_ordered_extent(ordered); 3652 goto unlocked; 3653 } 3654 } 3655 btrfs_put_ordered_extent(ordered); 3656 goto again; 3657 3658 } 3659 spin_unlock_irq(&log->log_extents_lock[index]); 3660 unlocked: 3661 3662 if (!mod_len || ret) 3663 return ret; 3664 3665 if (em->compress_type) { 3666 csum_offset = 0; 3667 csum_len = block_len; 3668 } else { 3669 csum_offset = mod_start - em->start; 3670 csum_len = mod_len; 3671 } 3672 3673 /* block start is already adjusted for the file extent offset. */ 3674 ret = btrfs_lookup_csums_range(log->fs_info->csum_root, 3675 em->block_start + csum_offset, 3676 em->block_start + csum_offset + 3677 csum_len - 1, &ordered_sums, 0); 3678 if (ret) 3679 return ret; 3680 3681 while (!list_empty(&ordered_sums)) { 3682 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 3683 struct btrfs_ordered_sum, 3684 list); 3685 if (!ret) 3686 ret = btrfs_csum_file_blocks(trans, log, sums); 3687 list_del(&sums->list); 3688 kfree(sums); 3689 } 3690 3691 return ret; 3692 } 3693 3694 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, 3695 struct btrfs_root *root, 3696 struct inode *inode, 3697 struct btrfs_path *path) 3698 { 3699 struct extent_map *em, *n; 3700 struct list_head extents; 3701 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree; 3702 u64 test_gen; 3703 int ret = 0; 3704 int num = 0; 3705 3706 INIT_LIST_HEAD(&extents); 3707 3708 write_lock(&tree->lock); 3709 test_gen = root->fs_info->last_trans_committed; 3710 3711 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { 3712 list_del_init(&em->list); 3713 3714 /* 3715 * Just an arbitrary number, this can be really CPU intensive 3716 * once we start getting a lot of extents, and really once we 3717 * have a bunch of extents we just want to commit since it will 3718 * be faster. 3719 */ 3720 if (++num > 32768) { 3721 list_del_init(&tree->modified_extents); 3722 ret = -EFBIG; 3723 goto process; 3724 } 3725 3726 if (em->generation <= test_gen) 3727 continue; 3728 /* Need a ref to keep it from getting evicted from cache */ 3729 atomic_inc(&em->refs); 3730 set_bit(EXTENT_FLAG_LOGGING, &em->flags); 3731 list_add_tail(&em->list, &extents); 3732 num++; 3733 } 3734 3735 list_sort(NULL, &extents, extent_cmp); 3736 3737 process: 3738 while (!list_empty(&extents)) { 3739 em = list_entry(extents.next, struct extent_map, list); 3740 3741 list_del_init(&em->list); 3742 3743 /* 3744 * If we had an error we just need to delete everybody from our 3745 * private list. 3746 */ 3747 if (ret) { 3748 clear_em_logging(tree, em); 3749 free_extent_map(em); 3750 continue; 3751 } 3752 3753 write_unlock(&tree->lock); 3754 3755 ret = log_one_extent(trans, inode, root, em, path); 3756 write_lock(&tree->lock); 3757 clear_em_logging(tree, em); 3758 free_extent_map(em); 3759 } 3760 WARN_ON(!list_empty(&extents)); 3761 write_unlock(&tree->lock); 3762 3763 btrfs_release_path(path); 3764 return ret; 3765 } 3766 3767 /* log a single inode in the tree log. 3768 * At least one parent directory for this inode must exist in the tree 3769 * or be logged already. 3770 * 3771 * Any items from this inode changed by the current transaction are copied 3772 * to the log tree. An extra reference is taken on any extents in this 3773 * file, allowing us to avoid a whole pile of corner cases around logging 3774 * blocks that have been removed from the tree. 3775 * 3776 * See LOG_INODE_ALL and related defines for a description of what inode_only 3777 * does. 3778 * 3779 * This handles both files and directories. 3780 */ 3781 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 3782 struct btrfs_root *root, struct inode *inode, 3783 int inode_only) 3784 { 3785 struct btrfs_path *path; 3786 struct btrfs_path *dst_path; 3787 struct btrfs_key min_key; 3788 struct btrfs_key max_key; 3789 struct btrfs_root *log = root->log_root; 3790 struct extent_buffer *src = NULL; 3791 u64 last_extent = 0; 3792 int err = 0; 3793 int ret; 3794 int nritems; 3795 int ins_start_slot = 0; 3796 int ins_nr; 3797 bool fast_search = false; 3798 u64 ino = btrfs_ino(inode); 3799 3800 path = btrfs_alloc_path(); 3801 if (!path) 3802 return -ENOMEM; 3803 dst_path = btrfs_alloc_path(); 3804 if (!dst_path) { 3805 btrfs_free_path(path); 3806 return -ENOMEM; 3807 } 3808 3809 min_key.objectid = ino; 3810 min_key.type = BTRFS_INODE_ITEM_KEY; 3811 min_key.offset = 0; 3812 3813 max_key.objectid = ino; 3814 3815 3816 /* today the code can only do partial logging of directories */ 3817 if (S_ISDIR(inode->i_mode) || 3818 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3819 &BTRFS_I(inode)->runtime_flags) && 3820 inode_only == LOG_INODE_EXISTS)) 3821 max_key.type = BTRFS_XATTR_ITEM_KEY; 3822 else 3823 max_key.type = (u8)-1; 3824 max_key.offset = (u64)-1; 3825 3826 /* Only run delayed items if we are a dir or a new file */ 3827 if (S_ISDIR(inode->i_mode) || 3828 BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) { 3829 ret = btrfs_commit_inode_delayed_items(trans, inode); 3830 if (ret) { 3831 btrfs_free_path(path); 3832 btrfs_free_path(dst_path); 3833 return ret; 3834 } 3835 } 3836 3837 mutex_lock(&BTRFS_I(inode)->log_mutex); 3838 3839 btrfs_get_logged_extents(log, inode); 3840 3841 /* 3842 * a brute force approach to making sure we get the most uptodate 3843 * copies of everything. 3844 */ 3845 if (S_ISDIR(inode->i_mode)) { 3846 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; 3847 3848 if (inode_only == LOG_INODE_EXISTS) 3849 max_key_type = BTRFS_XATTR_ITEM_KEY; 3850 ret = drop_objectid_items(trans, log, path, ino, max_key_type); 3851 } else { 3852 if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 3853 &BTRFS_I(inode)->runtime_flags)) { 3854 clear_bit(BTRFS_INODE_COPY_EVERYTHING, 3855 &BTRFS_I(inode)->runtime_flags); 3856 ret = btrfs_truncate_inode_items(trans, log, 3857 inode, 0, 0); 3858 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, 3859 &BTRFS_I(inode)->runtime_flags) || 3860 inode_only == LOG_INODE_EXISTS) { 3861 if (inode_only == LOG_INODE_ALL) 3862 fast_search = true; 3863 max_key.type = BTRFS_XATTR_ITEM_KEY; 3864 ret = drop_objectid_items(trans, log, path, ino, 3865 max_key.type); 3866 } else { 3867 if (inode_only == LOG_INODE_ALL) 3868 fast_search = true; 3869 ret = log_inode_item(trans, log, dst_path, inode); 3870 if (ret) { 3871 err = ret; 3872 goto out_unlock; 3873 } 3874 goto log_extents; 3875 } 3876 3877 } 3878 if (ret) { 3879 err = ret; 3880 goto out_unlock; 3881 } 3882 path->keep_locks = 1; 3883 3884 while (1) { 3885 ins_nr = 0; 3886 ret = btrfs_search_forward(root, &min_key, 3887 path, trans->transid); 3888 if (ret != 0) 3889 break; 3890 again: 3891 /* note, ins_nr might be > 0 here, cleanup outside the loop */ 3892 if (min_key.objectid != ino) 3893 break; 3894 if (min_key.type > max_key.type) 3895 break; 3896 3897 src = path->nodes[0]; 3898 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 3899 ins_nr++; 3900 goto next_slot; 3901 } else if (!ins_nr) { 3902 ins_start_slot = path->slots[0]; 3903 ins_nr = 1; 3904 goto next_slot; 3905 } 3906 3907 ret = copy_items(trans, inode, dst_path, path, &last_extent, 3908 ins_start_slot, ins_nr, inode_only); 3909 if (ret < 0) { 3910 err = ret; 3911 goto out_unlock; 3912 } if (ret) { 3913 ins_nr = 0; 3914 btrfs_release_path(path); 3915 continue; 3916 } 3917 ins_nr = 1; 3918 ins_start_slot = path->slots[0]; 3919 next_slot: 3920 3921 nritems = btrfs_header_nritems(path->nodes[0]); 3922 path->slots[0]++; 3923 if (path->slots[0] < nritems) { 3924 btrfs_item_key_to_cpu(path->nodes[0], &min_key, 3925 path->slots[0]); 3926 goto again; 3927 } 3928 if (ins_nr) { 3929 ret = copy_items(trans, inode, dst_path, path, 3930 &last_extent, ins_start_slot, 3931 ins_nr, inode_only); 3932 if (ret < 0) { 3933 err = ret; 3934 goto out_unlock; 3935 } 3936 ret = 0; 3937 ins_nr = 0; 3938 } 3939 btrfs_release_path(path); 3940 3941 if (min_key.offset < (u64)-1) { 3942 min_key.offset++; 3943 } else if (min_key.type < max_key.type) { 3944 min_key.type++; 3945 min_key.offset = 0; 3946 } else { 3947 break; 3948 } 3949 } 3950 if (ins_nr) { 3951 ret = copy_items(trans, inode, dst_path, path, &last_extent, 3952 ins_start_slot, ins_nr, inode_only); 3953 if (ret < 0) { 3954 err = ret; 3955 goto out_unlock; 3956 } 3957 ret = 0; 3958 ins_nr = 0; 3959 } 3960 3961 log_extents: 3962 btrfs_release_path(path); 3963 btrfs_release_path(dst_path); 3964 if (fast_search) { 3965 ret = btrfs_log_changed_extents(trans, root, inode, dst_path); 3966 if (ret) { 3967 err = ret; 3968 goto out_unlock; 3969 } 3970 } else if (inode_only == LOG_INODE_ALL) { 3971 struct extent_map_tree *tree = &BTRFS_I(inode)->extent_tree; 3972 struct extent_map *em, *n; 3973 3974 write_lock(&tree->lock); 3975 list_for_each_entry_safe(em, n, &tree->modified_extents, list) 3976 list_del_init(&em->list); 3977 write_unlock(&tree->lock); 3978 } 3979 3980 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { 3981 ret = log_directory_changes(trans, root, inode, path, dst_path); 3982 if (ret) { 3983 err = ret; 3984 goto out_unlock; 3985 } 3986 } 3987 BTRFS_I(inode)->logged_trans = trans->transid; 3988 BTRFS_I(inode)->last_log_commit = BTRFS_I(inode)->last_sub_trans; 3989 out_unlock: 3990 if (err) 3991 btrfs_free_logged_extents(log, log->log_transid); 3992 mutex_unlock(&BTRFS_I(inode)->log_mutex); 3993 3994 btrfs_free_path(path); 3995 btrfs_free_path(dst_path); 3996 return err; 3997 } 3998 3999 /* 4000 * follow the dentry parent pointers up the chain and see if any 4001 * of the directories in it require a full commit before they can 4002 * be logged. Returns zero if nothing special needs to be done or 1 if 4003 * a full commit is required. 4004 */ 4005 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, 4006 struct inode *inode, 4007 struct dentry *parent, 4008 struct super_block *sb, 4009 u64 last_committed) 4010 { 4011 int ret = 0; 4012 struct btrfs_root *root; 4013 struct dentry *old_parent = NULL; 4014 struct inode *orig_inode = inode; 4015 4016 /* 4017 * for regular files, if its inode is already on disk, we don't 4018 * have to worry about the parents at all. This is because 4019 * we can use the last_unlink_trans field to record renames 4020 * and other fun in this file. 4021 */ 4022 if (S_ISREG(inode->i_mode) && 4023 BTRFS_I(inode)->generation <= last_committed && 4024 BTRFS_I(inode)->last_unlink_trans <= last_committed) 4025 goto out; 4026 4027 if (!S_ISDIR(inode->i_mode)) { 4028 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 4029 goto out; 4030 inode = parent->d_inode; 4031 } 4032 4033 while (1) { 4034 /* 4035 * If we are logging a directory then we start with our inode, 4036 * not our parents inode, so we need to skipp setting the 4037 * logged_trans so that further down in the log code we don't 4038 * think this inode has already been logged. 4039 */ 4040 if (inode != orig_inode) 4041 BTRFS_I(inode)->logged_trans = trans->transid; 4042 smp_mb(); 4043 4044 if (BTRFS_I(inode)->last_unlink_trans > last_committed) { 4045 root = BTRFS_I(inode)->root; 4046 4047 /* 4048 * make sure any commits to the log are forced 4049 * to be full commits 4050 */ 4051 root->fs_info->last_trans_log_full_commit = 4052 trans->transid; 4053 ret = 1; 4054 break; 4055 } 4056 4057 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 4058 break; 4059 4060 if (IS_ROOT(parent)) 4061 break; 4062 4063 parent = dget_parent(parent); 4064 dput(old_parent); 4065 old_parent = parent; 4066 inode = parent->d_inode; 4067 4068 } 4069 dput(old_parent); 4070 out: 4071 return ret; 4072 } 4073 4074 /* 4075 * helper function around btrfs_log_inode to make sure newly created 4076 * parent directories also end up in the log. A minimal inode and backref 4077 * only logging is done of any parent directories that are older than 4078 * the last committed transaction 4079 */ 4080 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 4081 struct btrfs_root *root, struct inode *inode, 4082 struct dentry *parent, int exists_only) 4083 { 4084 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; 4085 struct super_block *sb; 4086 struct dentry *old_parent = NULL; 4087 int ret = 0; 4088 u64 last_committed = root->fs_info->last_trans_committed; 4089 4090 sb = inode->i_sb; 4091 4092 if (btrfs_test_opt(root, NOTREELOG)) { 4093 ret = 1; 4094 goto end_no_trans; 4095 } 4096 4097 if (root->fs_info->last_trans_log_full_commit > 4098 root->fs_info->last_trans_committed) { 4099 ret = 1; 4100 goto end_no_trans; 4101 } 4102 4103 if (root != BTRFS_I(inode)->root || 4104 btrfs_root_refs(&root->root_item) == 0) { 4105 ret = 1; 4106 goto end_no_trans; 4107 } 4108 4109 ret = check_parent_dirs_for_sync(trans, inode, parent, 4110 sb, last_committed); 4111 if (ret) 4112 goto end_no_trans; 4113 4114 if (btrfs_inode_in_log(inode, trans->transid)) { 4115 ret = BTRFS_NO_LOG_SYNC; 4116 goto end_no_trans; 4117 } 4118 4119 ret = start_log_trans(trans, root); 4120 if (ret) 4121 goto end_trans; 4122 4123 ret = btrfs_log_inode(trans, root, inode, inode_only); 4124 if (ret) 4125 goto end_trans; 4126 4127 /* 4128 * for regular files, if its inode is already on disk, we don't 4129 * have to worry about the parents at all. This is because 4130 * we can use the last_unlink_trans field to record renames 4131 * and other fun in this file. 4132 */ 4133 if (S_ISREG(inode->i_mode) && 4134 BTRFS_I(inode)->generation <= last_committed && 4135 BTRFS_I(inode)->last_unlink_trans <= last_committed) { 4136 ret = 0; 4137 goto end_trans; 4138 } 4139 4140 inode_only = LOG_INODE_EXISTS; 4141 while (1) { 4142 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 4143 break; 4144 4145 inode = parent->d_inode; 4146 if (root != BTRFS_I(inode)->root) 4147 break; 4148 4149 if (BTRFS_I(inode)->generation > 4150 root->fs_info->last_trans_committed) { 4151 ret = btrfs_log_inode(trans, root, inode, inode_only); 4152 if (ret) 4153 goto end_trans; 4154 } 4155 if (IS_ROOT(parent)) 4156 break; 4157 4158 parent = dget_parent(parent); 4159 dput(old_parent); 4160 old_parent = parent; 4161 } 4162 ret = 0; 4163 end_trans: 4164 dput(old_parent); 4165 if (ret < 0) { 4166 root->fs_info->last_trans_log_full_commit = trans->transid; 4167 ret = 1; 4168 } 4169 btrfs_end_log_trans(root); 4170 end_no_trans: 4171 return ret; 4172 } 4173 4174 /* 4175 * it is not safe to log dentry if the chunk root has added new 4176 * chunks. This returns 0 if the dentry was logged, and 1 otherwise. 4177 * If this returns 1, you must commit the transaction to safely get your 4178 * data on disk. 4179 */ 4180 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 4181 struct btrfs_root *root, struct dentry *dentry) 4182 { 4183 struct dentry *parent = dget_parent(dentry); 4184 int ret; 4185 4186 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0); 4187 dput(parent); 4188 4189 return ret; 4190 } 4191 4192 /* 4193 * should be called during mount to recover any replay any log trees 4194 * from the FS 4195 */ 4196 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) 4197 { 4198 int ret; 4199 struct btrfs_path *path; 4200 struct btrfs_trans_handle *trans; 4201 struct btrfs_key key; 4202 struct btrfs_key found_key; 4203 struct btrfs_key tmp_key; 4204 struct btrfs_root *log; 4205 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 4206 struct walk_control wc = { 4207 .process_func = process_one_buffer, 4208 .stage = 0, 4209 }; 4210 4211 path = btrfs_alloc_path(); 4212 if (!path) 4213 return -ENOMEM; 4214 4215 fs_info->log_root_recovering = 1; 4216 4217 trans = btrfs_start_transaction(fs_info->tree_root, 0); 4218 if (IS_ERR(trans)) { 4219 ret = PTR_ERR(trans); 4220 goto error; 4221 } 4222 4223 wc.trans = trans; 4224 wc.pin = 1; 4225 4226 ret = walk_log_tree(trans, log_root_tree, &wc); 4227 if (ret) { 4228 btrfs_error(fs_info, ret, "Failed to pin buffers while " 4229 "recovering log root tree."); 4230 goto error; 4231 } 4232 4233 again: 4234 key.objectid = BTRFS_TREE_LOG_OBJECTID; 4235 key.offset = (u64)-1; 4236 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); 4237 4238 while (1) { 4239 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 4240 4241 if (ret < 0) { 4242 btrfs_error(fs_info, ret, 4243 "Couldn't find tree log root."); 4244 goto error; 4245 } 4246 if (ret > 0) { 4247 if (path->slots[0] == 0) 4248 break; 4249 path->slots[0]--; 4250 } 4251 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 4252 path->slots[0]); 4253 btrfs_release_path(path); 4254 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 4255 break; 4256 4257 log = btrfs_read_fs_root(log_root_tree, &found_key); 4258 if (IS_ERR(log)) { 4259 ret = PTR_ERR(log); 4260 btrfs_error(fs_info, ret, 4261 "Couldn't read tree log root."); 4262 goto error; 4263 } 4264 4265 tmp_key.objectid = found_key.offset; 4266 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 4267 tmp_key.offset = (u64)-1; 4268 4269 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 4270 if (IS_ERR(wc.replay_dest)) { 4271 ret = PTR_ERR(wc.replay_dest); 4272 free_extent_buffer(log->node); 4273 free_extent_buffer(log->commit_root); 4274 kfree(log); 4275 btrfs_error(fs_info, ret, "Couldn't read target root " 4276 "for tree log recovery."); 4277 goto error; 4278 } 4279 4280 wc.replay_dest->log_root = log; 4281 btrfs_record_root_in_trans(trans, wc.replay_dest); 4282 ret = walk_log_tree(trans, log, &wc); 4283 4284 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 4285 ret = fixup_inode_link_counts(trans, wc.replay_dest, 4286 path); 4287 } 4288 4289 key.offset = found_key.offset - 1; 4290 wc.replay_dest->log_root = NULL; 4291 free_extent_buffer(log->node); 4292 free_extent_buffer(log->commit_root); 4293 kfree(log); 4294 4295 if (ret) 4296 goto error; 4297 4298 if (found_key.offset == 0) 4299 break; 4300 } 4301 btrfs_release_path(path); 4302 4303 /* step one is to pin it all, step two is to replay just inodes */ 4304 if (wc.pin) { 4305 wc.pin = 0; 4306 wc.process_func = replay_one_buffer; 4307 wc.stage = LOG_WALK_REPLAY_INODES; 4308 goto again; 4309 } 4310 /* step three is to replay everything */ 4311 if (wc.stage < LOG_WALK_REPLAY_ALL) { 4312 wc.stage++; 4313 goto again; 4314 } 4315 4316 btrfs_free_path(path); 4317 4318 /* step 4: commit the transaction, which also unpins the blocks */ 4319 ret = btrfs_commit_transaction(trans, fs_info->tree_root); 4320 if (ret) 4321 return ret; 4322 4323 free_extent_buffer(log_root_tree->node); 4324 log_root_tree->log_root = NULL; 4325 fs_info->log_root_recovering = 0; 4326 kfree(log_root_tree); 4327 4328 return 0; 4329 error: 4330 if (wc.trans) 4331 btrfs_end_transaction(wc.trans, fs_info->tree_root); 4332 btrfs_free_path(path); 4333 return ret; 4334 } 4335 4336 /* 4337 * there are some corner cases where we want to force a full 4338 * commit instead of allowing a directory to be logged. 4339 * 4340 * They revolve around files there were unlinked from the directory, and 4341 * this function updates the parent directory so that a full commit is 4342 * properly done if it is fsync'd later after the unlinks are done. 4343 */ 4344 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 4345 struct inode *dir, struct inode *inode, 4346 int for_rename) 4347 { 4348 /* 4349 * when we're logging a file, if it hasn't been renamed 4350 * or unlinked, and its inode is fully committed on disk, 4351 * we don't have to worry about walking up the directory chain 4352 * to log its parents. 4353 * 4354 * So, we use the last_unlink_trans field to put this transid 4355 * into the file. When the file is logged we check it and 4356 * don't log the parents if the file is fully on disk. 4357 */ 4358 if (S_ISREG(inode->i_mode)) 4359 BTRFS_I(inode)->last_unlink_trans = trans->transid; 4360 4361 /* 4362 * if this directory was already logged any new 4363 * names for this file/dir will get recorded 4364 */ 4365 smp_mb(); 4366 if (BTRFS_I(dir)->logged_trans == trans->transid) 4367 return; 4368 4369 /* 4370 * if the inode we're about to unlink was logged, 4371 * the log will be properly updated for any new names 4372 */ 4373 if (BTRFS_I(inode)->logged_trans == trans->transid) 4374 return; 4375 4376 /* 4377 * when renaming files across directories, if the directory 4378 * there we're unlinking from gets fsync'd later on, there's 4379 * no way to find the destination directory later and fsync it 4380 * properly. So, we have to be conservative and force commits 4381 * so the new name gets discovered. 4382 */ 4383 if (for_rename) 4384 goto record; 4385 4386 /* we can safely do the unlink without any special recording */ 4387 return; 4388 4389 record: 4390 BTRFS_I(dir)->last_unlink_trans = trans->transid; 4391 } 4392 4393 /* 4394 * Call this after adding a new name for a file and it will properly 4395 * update the log to reflect the new name. 4396 * 4397 * It will return zero if all goes well, and it will return 1 if a 4398 * full transaction commit is required. 4399 */ 4400 int btrfs_log_new_name(struct btrfs_trans_handle *trans, 4401 struct inode *inode, struct inode *old_dir, 4402 struct dentry *parent) 4403 { 4404 struct btrfs_root * root = BTRFS_I(inode)->root; 4405 4406 /* 4407 * this will force the logging code to walk the dentry chain 4408 * up for the file 4409 */ 4410 if (S_ISREG(inode->i_mode)) 4411 BTRFS_I(inode)->last_unlink_trans = trans->transid; 4412 4413 /* 4414 * if this inode hasn't been logged and directory we're renaming it 4415 * from hasn't been logged, we don't need to log it 4416 */ 4417 if (BTRFS_I(inode)->logged_trans <= 4418 root->fs_info->last_trans_committed && 4419 (!old_dir || BTRFS_I(old_dir)->logged_trans <= 4420 root->fs_info->last_trans_committed)) 4421 return 0; 4422 4423 return btrfs_log_inode_parent(trans, root, inode, parent, 1); 4424 } 4425 4426