1 /* 2 * Copyright (C) 2008 Oracle. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public 6 * License v2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License for more details. 12 * 13 * You should have received a copy of the GNU General Public 14 * License along with this program; if not, write to the 15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330, 16 * Boston, MA 021110-1307, USA. 17 */ 18 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include "ctree.h" 22 #include "transaction.h" 23 #include "disk-io.h" 24 #include "locking.h" 25 #include "print-tree.h" 26 #include "compat.h" 27 #include "tree-log.h" 28 29 /* magic values for the inode_only field in btrfs_log_inode: 30 * 31 * LOG_INODE_ALL means to log everything 32 * LOG_INODE_EXISTS means to log just enough to recreate the inode 33 * during log replay 34 */ 35 #define LOG_INODE_ALL 0 36 #define LOG_INODE_EXISTS 1 37 38 /* 39 * directory trouble cases 40 * 41 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync 42 * log, we must force a full commit before doing an fsync of the directory 43 * where the unlink was done. 44 * ---> record transid of last unlink/rename per directory 45 * 46 * mkdir foo/some_dir 47 * normal commit 48 * rename foo/some_dir foo2/some_dir 49 * mkdir foo/some_dir 50 * fsync foo/some_dir/some_file 51 * 52 * The fsync above will unlink the original some_dir without recording 53 * it in its new location (foo2). After a crash, some_dir will be gone 54 * unless the fsync of some_file forces a full commit 55 * 56 * 2) we must log any new names for any file or dir that is in the fsync 57 * log. ---> check inode while renaming/linking. 58 * 59 * 2a) we must log any new names for any file or dir during rename 60 * when the directory they are being removed from was logged. 61 * ---> check inode and old parent dir during rename 62 * 63 * 2a is actually the more important variant. With the extra logging 64 * a crash might unlink the old name without recreating the new one 65 * 66 * 3) after a crash, we must go through any directories with a link count 67 * of zero and redo the rm -rf 68 * 69 * mkdir f1/foo 70 * normal commit 71 * rm -rf f1/foo 72 * fsync(f1) 73 * 74 * The directory f1 was fully removed from the FS, but fsync was never 75 * called on f1, only its parent dir. After a crash the rm -rf must 76 * be replayed. This must be able to recurse down the entire 77 * directory tree. The inode link count fixup code takes care of the 78 * ugly details. 79 */ 80 81 /* 82 * stages for the tree walking. The first 83 * stage (0) is to only pin down the blocks we find 84 * the second stage (1) is to make sure that all the inodes 85 * we find in the log are created in the subvolume. 86 * 87 * The last stage is to deal with directories and links and extents 88 * and all the other fun semantics 89 */ 90 #define LOG_WALK_PIN_ONLY 0 91 #define LOG_WALK_REPLAY_INODES 1 92 #define LOG_WALK_REPLAY_ALL 2 93 94 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 95 struct btrfs_root *root, struct inode *inode, 96 int inode_only); 97 static int link_to_fixup_dir(struct btrfs_trans_handle *trans, 98 struct btrfs_root *root, 99 struct btrfs_path *path, u64 objectid); 100 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 101 struct btrfs_root *root, 102 struct btrfs_root *log, 103 struct btrfs_path *path, 104 u64 dirid, int del_all); 105 106 /* 107 * tree logging is a special write ahead log used to make sure that 108 * fsyncs and O_SYNCs can happen without doing full tree commits. 109 * 110 * Full tree commits are expensive because they require commonly 111 * modified blocks to be recowed, creating many dirty pages in the 112 * extent tree an 4x-6x higher write load than ext3. 113 * 114 * Instead of doing a tree commit on every fsync, we use the 115 * key ranges and transaction ids to find items for a given file or directory 116 * that have changed in this transaction. Those items are copied into 117 * a special tree (one per subvolume root), that tree is written to disk 118 * and then the fsync is considered complete. 119 * 120 * After a crash, items are copied out of the log-tree back into the 121 * subvolume tree. Any file data extents found are recorded in the extent 122 * allocation tree, and the log-tree freed. 123 * 124 * The log tree is read three times, once to pin down all the extents it is 125 * using in ram and once, once to create all the inodes logged in the tree 126 * and once to do all the other items. 127 */ 128 129 /* 130 * start a sub transaction and setup the log tree 131 * this increments the log tree writer count to make the people 132 * syncing the tree wait for us to finish 133 */ 134 static int start_log_trans(struct btrfs_trans_handle *trans, 135 struct btrfs_root *root) 136 { 137 int ret; 138 int err = 0; 139 140 mutex_lock(&root->log_mutex); 141 if (root->log_root) { 142 if (!root->log_start_pid) { 143 root->log_start_pid = current->pid; 144 root->log_multiple_pids = false; 145 } else if (root->log_start_pid != current->pid) { 146 root->log_multiple_pids = true; 147 } 148 149 root->log_batch++; 150 atomic_inc(&root->log_writers); 151 mutex_unlock(&root->log_mutex); 152 return 0; 153 } 154 root->log_multiple_pids = false; 155 root->log_start_pid = current->pid; 156 mutex_lock(&root->fs_info->tree_log_mutex); 157 if (!root->fs_info->log_root_tree) { 158 ret = btrfs_init_log_root_tree(trans, root->fs_info); 159 if (ret) 160 err = ret; 161 } 162 if (err == 0 && !root->log_root) { 163 ret = btrfs_add_log_tree(trans, root); 164 if (ret) 165 err = ret; 166 } 167 mutex_unlock(&root->fs_info->tree_log_mutex); 168 root->log_batch++; 169 atomic_inc(&root->log_writers); 170 mutex_unlock(&root->log_mutex); 171 return err; 172 } 173 174 /* 175 * returns 0 if there was a log transaction running and we were able 176 * to join, or returns -ENOENT if there were not transactions 177 * in progress 178 */ 179 static int join_running_log_trans(struct btrfs_root *root) 180 { 181 int ret = -ENOENT; 182 183 smp_mb(); 184 if (!root->log_root) 185 return -ENOENT; 186 187 mutex_lock(&root->log_mutex); 188 if (root->log_root) { 189 ret = 0; 190 atomic_inc(&root->log_writers); 191 } 192 mutex_unlock(&root->log_mutex); 193 return ret; 194 } 195 196 /* 197 * This either makes the current running log transaction wait 198 * until you call btrfs_end_log_trans() or it makes any future 199 * log transactions wait until you call btrfs_end_log_trans() 200 */ 201 int btrfs_pin_log_trans(struct btrfs_root *root) 202 { 203 int ret = -ENOENT; 204 205 mutex_lock(&root->log_mutex); 206 atomic_inc(&root->log_writers); 207 mutex_unlock(&root->log_mutex); 208 return ret; 209 } 210 211 /* 212 * indicate we're done making changes to the log tree 213 * and wake up anyone waiting to do a sync 214 */ 215 int btrfs_end_log_trans(struct btrfs_root *root) 216 { 217 if (atomic_dec_and_test(&root->log_writers)) { 218 smp_mb(); 219 if (waitqueue_active(&root->log_writer_wait)) 220 wake_up(&root->log_writer_wait); 221 } 222 return 0; 223 } 224 225 226 /* 227 * the walk control struct is used to pass state down the chain when 228 * processing the log tree. The stage field tells us which part 229 * of the log tree processing we are currently doing. The others 230 * are state fields used for that specific part 231 */ 232 struct walk_control { 233 /* should we free the extent on disk when done? This is used 234 * at transaction commit time while freeing a log tree 235 */ 236 int free; 237 238 /* should we write out the extent buffer? This is used 239 * while flushing the log tree to disk during a sync 240 */ 241 int write; 242 243 /* should we wait for the extent buffer io to finish? Also used 244 * while flushing the log tree to disk for a sync 245 */ 246 int wait; 247 248 /* pin only walk, we record which extents on disk belong to the 249 * log trees 250 */ 251 int pin; 252 253 /* what stage of the replay code we're currently in */ 254 int stage; 255 256 /* the root we are currently replaying */ 257 struct btrfs_root *replay_dest; 258 259 /* the trans handle for the current replay */ 260 struct btrfs_trans_handle *trans; 261 262 /* the function that gets used to process blocks we find in the 263 * tree. Note the extent_buffer might not be up to date when it is 264 * passed in, and it must be checked or read if you need the data 265 * inside it 266 */ 267 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, 268 struct walk_control *wc, u64 gen); 269 }; 270 271 /* 272 * process_func used to pin down extents, write them or wait on them 273 */ 274 static int process_one_buffer(struct btrfs_root *log, 275 struct extent_buffer *eb, 276 struct walk_control *wc, u64 gen) 277 { 278 if (wc->pin) 279 btrfs_pin_extent(log->fs_info->extent_root, 280 eb->start, eb->len, 0); 281 282 if (btrfs_buffer_uptodate(eb, gen)) { 283 if (wc->write) 284 btrfs_write_tree_block(eb); 285 if (wc->wait) 286 btrfs_wait_tree_block_writeback(eb); 287 } 288 return 0; 289 } 290 291 /* 292 * Item overwrite used by replay and tree logging. eb, slot and key all refer 293 * to the src data we are copying out. 294 * 295 * root is the tree we are copying into, and path is a scratch 296 * path for use in this function (it should be released on entry and 297 * will be released on exit). 298 * 299 * If the key is already in the destination tree the existing item is 300 * overwritten. If the existing item isn't big enough, it is extended. 301 * If it is too large, it is truncated. 302 * 303 * If the key isn't in the destination yet, a new item is inserted. 304 */ 305 static noinline int overwrite_item(struct btrfs_trans_handle *trans, 306 struct btrfs_root *root, 307 struct btrfs_path *path, 308 struct extent_buffer *eb, int slot, 309 struct btrfs_key *key) 310 { 311 int ret; 312 u32 item_size; 313 u64 saved_i_size = 0; 314 int save_old_i_size = 0; 315 unsigned long src_ptr; 316 unsigned long dst_ptr; 317 int overwrite_root = 0; 318 319 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 320 overwrite_root = 1; 321 322 item_size = btrfs_item_size_nr(eb, slot); 323 src_ptr = btrfs_item_ptr_offset(eb, slot); 324 325 /* look for the key in the destination tree */ 326 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 327 if (ret == 0) { 328 char *src_copy; 329 char *dst_copy; 330 u32 dst_size = btrfs_item_size_nr(path->nodes[0], 331 path->slots[0]); 332 if (dst_size != item_size) 333 goto insert; 334 335 if (item_size == 0) { 336 btrfs_release_path(root, path); 337 return 0; 338 } 339 dst_copy = kmalloc(item_size, GFP_NOFS); 340 src_copy = kmalloc(item_size, GFP_NOFS); 341 342 read_extent_buffer(eb, src_copy, src_ptr, item_size); 343 344 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 345 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, 346 item_size); 347 ret = memcmp(dst_copy, src_copy, item_size); 348 349 kfree(dst_copy); 350 kfree(src_copy); 351 /* 352 * they have the same contents, just return, this saves 353 * us from cowing blocks in the destination tree and doing 354 * extra writes that may not have been done by a previous 355 * sync 356 */ 357 if (ret == 0) { 358 btrfs_release_path(root, path); 359 return 0; 360 } 361 362 } 363 insert: 364 btrfs_release_path(root, path); 365 /* try to insert the key into the destination tree */ 366 ret = btrfs_insert_empty_item(trans, root, path, 367 key, item_size); 368 369 /* make sure any existing item is the correct size */ 370 if (ret == -EEXIST) { 371 u32 found_size; 372 found_size = btrfs_item_size_nr(path->nodes[0], 373 path->slots[0]); 374 if (found_size > item_size) { 375 btrfs_truncate_item(trans, root, path, item_size, 1); 376 } else if (found_size < item_size) { 377 ret = btrfs_extend_item(trans, root, path, 378 item_size - found_size); 379 BUG_ON(ret); 380 } 381 } else if (ret) { 382 return ret; 383 } 384 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 385 path->slots[0]); 386 387 /* don't overwrite an existing inode if the generation number 388 * was logged as zero. This is done when the tree logging code 389 * is just logging an inode to make sure it exists after recovery. 390 * 391 * Also, don't overwrite i_size on directories during replay. 392 * log replay inserts and removes directory items based on the 393 * state of the tree found in the subvolume, and i_size is modified 394 * as it goes 395 */ 396 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { 397 struct btrfs_inode_item *src_item; 398 struct btrfs_inode_item *dst_item; 399 400 src_item = (struct btrfs_inode_item *)src_ptr; 401 dst_item = (struct btrfs_inode_item *)dst_ptr; 402 403 if (btrfs_inode_generation(eb, src_item) == 0) 404 goto no_copy; 405 406 if (overwrite_root && 407 S_ISDIR(btrfs_inode_mode(eb, src_item)) && 408 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { 409 save_old_i_size = 1; 410 saved_i_size = btrfs_inode_size(path->nodes[0], 411 dst_item); 412 } 413 } 414 415 copy_extent_buffer(path->nodes[0], eb, dst_ptr, 416 src_ptr, item_size); 417 418 if (save_old_i_size) { 419 struct btrfs_inode_item *dst_item; 420 dst_item = (struct btrfs_inode_item *)dst_ptr; 421 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); 422 } 423 424 /* make sure the generation is filled in */ 425 if (key->type == BTRFS_INODE_ITEM_KEY) { 426 struct btrfs_inode_item *dst_item; 427 dst_item = (struct btrfs_inode_item *)dst_ptr; 428 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { 429 btrfs_set_inode_generation(path->nodes[0], dst_item, 430 trans->transid); 431 } 432 } 433 no_copy: 434 btrfs_mark_buffer_dirty(path->nodes[0]); 435 btrfs_release_path(root, path); 436 return 0; 437 } 438 439 /* 440 * simple helper to read an inode off the disk from a given root 441 * This can only be called for subvolume roots and not for the log 442 */ 443 static noinline struct inode *read_one_inode(struct btrfs_root *root, 444 u64 objectid) 445 { 446 struct btrfs_key key; 447 struct inode *inode; 448 449 key.objectid = objectid; 450 key.type = BTRFS_INODE_ITEM_KEY; 451 key.offset = 0; 452 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 453 if (IS_ERR(inode)) { 454 inode = NULL; 455 } else if (is_bad_inode(inode)) { 456 iput(inode); 457 inode = NULL; 458 } 459 return inode; 460 } 461 462 /* replays a single extent in 'eb' at 'slot' with 'key' into the 463 * subvolume 'root'. path is released on entry and should be released 464 * on exit. 465 * 466 * extents in the log tree have not been allocated out of the extent 467 * tree yet. So, this completes the allocation, taking a reference 468 * as required if the extent already exists or creating a new extent 469 * if it isn't in the extent allocation tree yet. 470 * 471 * The extent is inserted into the file, dropping any existing extents 472 * from the file that overlap the new one. 473 */ 474 static noinline int replay_one_extent(struct btrfs_trans_handle *trans, 475 struct btrfs_root *root, 476 struct btrfs_path *path, 477 struct extent_buffer *eb, int slot, 478 struct btrfs_key *key) 479 { 480 int found_type; 481 u64 mask = root->sectorsize - 1; 482 u64 extent_end; 483 u64 alloc_hint; 484 u64 start = key->offset; 485 u64 saved_nbytes; 486 struct btrfs_file_extent_item *item; 487 struct inode *inode = NULL; 488 unsigned long size; 489 int ret = 0; 490 491 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 492 found_type = btrfs_file_extent_type(eb, item); 493 494 if (found_type == BTRFS_FILE_EXTENT_REG || 495 found_type == BTRFS_FILE_EXTENT_PREALLOC) 496 extent_end = start + btrfs_file_extent_num_bytes(eb, item); 497 else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 498 size = btrfs_file_extent_inline_len(eb, item); 499 extent_end = (start + size + mask) & ~mask; 500 } else { 501 ret = 0; 502 goto out; 503 } 504 505 inode = read_one_inode(root, key->objectid); 506 if (!inode) { 507 ret = -EIO; 508 goto out; 509 } 510 511 /* 512 * first check to see if we already have this extent in the 513 * file. This must be done before the btrfs_drop_extents run 514 * so we don't try to drop this extent. 515 */ 516 ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, 517 start, 0); 518 519 if (ret == 0 && 520 (found_type == BTRFS_FILE_EXTENT_REG || 521 found_type == BTRFS_FILE_EXTENT_PREALLOC)) { 522 struct btrfs_file_extent_item cmp1; 523 struct btrfs_file_extent_item cmp2; 524 struct btrfs_file_extent_item *existing; 525 struct extent_buffer *leaf; 526 527 leaf = path->nodes[0]; 528 existing = btrfs_item_ptr(leaf, path->slots[0], 529 struct btrfs_file_extent_item); 530 531 read_extent_buffer(eb, &cmp1, (unsigned long)item, 532 sizeof(cmp1)); 533 read_extent_buffer(leaf, &cmp2, (unsigned long)existing, 534 sizeof(cmp2)); 535 536 /* 537 * we already have a pointer to this exact extent, 538 * we don't have to do anything 539 */ 540 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 541 btrfs_release_path(root, path); 542 goto out; 543 } 544 } 545 btrfs_release_path(root, path); 546 547 saved_nbytes = inode_get_bytes(inode); 548 /* drop any overlapping extents */ 549 ret = btrfs_drop_extents(trans, inode, start, extent_end, 550 &alloc_hint, 1); 551 BUG_ON(ret); 552 553 if (found_type == BTRFS_FILE_EXTENT_REG || 554 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 555 u64 offset; 556 unsigned long dest_offset; 557 struct btrfs_key ins; 558 559 ret = btrfs_insert_empty_item(trans, root, path, key, 560 sizeof(*item)); 561 BUG_ON(ret); 562 dest_offset = btrfs_item_ptr_offset(path->nodes[0], 563 path->slots[0]); 564 copy_extent_buffer(path->nodes[0], eb, dest_offset, 565 (unsigned long)item, sizeof(*item)); 566 567 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); 568 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); 569 ins.type = BTRFS_EXTENT_ITEM_KEY; 570 offset = key->offset - btrfs_file_extent_offset(eb, item); 571 572 if (ins.objectid > 0) { 573 u64 csum_start; 574 u64 csum_end; 575 LIST_HEAD(ordered_sums); 576 /* 577 * is this extent already allocated in the extent 578 * allocation tree? If so, just add a reference 579 */ 580 ret = btrfs_lookup_extent(root, ins.objectid, 581 ins.offset); 582 if (ret == 0) { 583 ret = btrfs_inc_extent_ref(trans, root, 584 ins.objectid, ins.offset, 585 0, root->root_key.objectid, 586 key->objectid, offset); 587 } else { 588 /* 589 * insert the extent pointer in the extent 590 * allocation tree 591 */ 592 ret = btrfs_alloc_logged_file_extent(trans, 593 root, root->root_key.objectid, 594 key->objectid, offset, &ins); 595 BUG_ON(ret); 596 } 597 btrfs_release_path(root, path); 598 599 if (btrfs_file_extent_compression(eb, item)) { 600 csum_start = ins.objectid; 601 csum_end = csum_start + ins.offset; 602 } else { 603 csum_start = ins.objectid + 604 btrfs_file_extent_offset(eb, item); 605 csum_end = csum_start + 606 btrfs_file_extent_num_bytes(eb, item); 607 } 608 609 ret = btrfs_lookup_csums_range(root->log_root, 610 csum_start, csum_end - 1, 611 &ordered_sums); 612 BUG_ON(ret); 613 while (!list_empty(&ordered_sums)) { 614 struct btrfs_ordered_sum *sums; 615 sums = list_entry(ordered_sums.next, 616 struct btrfs_ordered_sum, 617 list); 618 ret = btrfs_csum_file_blocks(trans, 619 root->fs_info->csum_root, 620 sums); 621 BUG_ON(ret); 622 list_del(&sums->list); 623 kfree(sums); 624 } 625 } else { 626 btrfs_release_path(root, path); 627 } 628 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 629 /* inline extents are easy, we just overwrite them */ 630 ret = overwrite_item(trans, root, path, eb, slot, key); 631 BUG_ON(ret); 632 } 633 634 inode_set_bytes(inode, saved_nbytes); 635 btrfs_update_inode(trans, root, inode); 636 out: 637 if (inode) 638 iput(inode); 639 return ret; 640 } 641 642 /* 643 * when cleaning up conflicts between the directory names in the 644 * subvolume, directory names in the log and directory names in the 645 * inode back references, we may have to unlink inodes from directories. 646 * 647 * This is a helper function to do the unlink of a specific directory 648 * item 649 */ 650 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, 651 struct btrfs_root *root, 652 struct btrfs_path *path, 653 struct inode *dir, 654 struct btrfs_dir_item *di) 655 { 656 struct inode *inode; 657 char *name; 658 int name_len; 659 struct extent_buffer *leaf; 660 struct btrfs_key location; 661 int ret; 662 663 leaf = path->nodes[0]; 664 665 btrfs_dir_item_key_to_cpu(leaf, di, &location); 666 name_len = btrfs_dir_name_len(leaf, di); 667 name = kmalloc(name_len, GFP_NOFS); 668 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); 669 btrfs_release_path(root, path); 670 671 inode = read_one_inode(root, location.objectid); 672 BUG_ON(!inode); 673 674 ret = link_to_fixup_dir(trans, root, path, location.objectid); 675 BUG_ON(ret); 676 677 ret = btrfs_unlink_inode(trans, root, dir, inode, name, name_len); 678 BUG_ON(ret); 679 kfree(name); 680 681 iput(inode); 682 return ret; 683 } 684 685 /* 686 * helper function to see if a given name and sequence number found 687 * in an inode back reference are already in a directory and correctly 688 * point to this inode 689 */ 690 static noinline int inode_in_dir(struct btrfs_root *root, 691 struct btrfs_path *path, 692 u64 dirid, u64 objectid, u64 index, 693 const char *name, int name_len) 694 { 695 struct btrfs_dir_item *di; 696 struct btrfs_key location; 697 int match = 0; 698 699 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, 700 index, name, name_len, 0); 701 if (di && !IS_ERR(di)) { 702 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 703 if (location.objectid != objectid) 704 goto out; 705 } else 706 goto out; 707 btrfs_release_path(root, path); 708 709 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); 710 if (di && !IS_ERR(di)) { 711 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 712 if (location.objectid != objectid) 713 goto out; 714 } else 715 goto out; 716 match = 1; 717 out: 718 btrfs_release_path(root, path); 719 return match; 720 } 721 722 /* 723 * helper function to check a log tree for a named back reference in 724 * an inode. This is used to decide if a back reference that is 725 * found in the subvolume conflicts with what we find in the log. 726 * 727 * inode backreferences may have multiple refs in a single item, 728 * during replay we process one reference at a time, and we don't 729 * want to delete valid links to a file from the subvolume if that 730 * link is also in the log. 731 */ 732 static noinline int backref_in_log(struct btrfs_root *log, 733 struct btrfs_key *key, 734 char *name, int namelen) 735 { 736 struct btrfs_path *path; 737 struct btrfs_inode_ref *ref; 738 unsigned long ptr; 739 unsigned long ptr_end; 740 unsigned long name_ptr; 741 int found_name_len; 742 int item_size; 743 int ret; 744 int match = 0; 745 746 path = btrfs_alloc_path(); 747 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); 748 if (ret != 0) 749 goto out; 750 751 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 752 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 753 ptr_end = ptr + item_size; 754 while (ptr < ptr_end) { 755 ref = (struct btrfs_inode_ref *)ptr; 756 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); 757 if (found_name_len == namelen) { 758 name_ptr = (unsigned long)(ref + 1); 759 ret = memcmp_extent_buffer(path->nodes[0], name, 760 name_ptr, namelen); 761 if (ret == 0) { 762 match = 1; 763 goto out; 764 } 765 } 766 ptr = (unsigned long)(ref + 1) + found_name_len; 767 } 768 out: 769 btrfs_free_path(path); 770 return match; 771 } 772 773 774 /* 775 * replay one inode back reference item found in the log tree. 776 * eb, slot and key refer to the buffer and key found in the log tree. 777 * root is the destination we are replaying into, and path is for temp 778 * use by this function. (it should be released on return). 779 */ 780 static noinline int add_inode_ref(struct btrfs_trans_handle *trans, 781 struct btrfs_root *root, 782 struct btrfs_root *log, 783 struct btrfs_path *path, 784 struct extent_buffer *eb, int slot, 785 struct btrfs_key *key) 786 { 787 struct inode *dir; 788 int ret; 789 struct btrfs_inode_ref *ref; 790 struct btrfs_dir_item *di; 791 struct inode *inode; 792 char *name; 793 int namelen; 794 unsigned long ref_ptr; 795 unsigned long ref_end; 796 797 /* 798 * it is possible that we didn't log all the parent directories 799 * for a given inode. If we don't find the dir, just don't 800 * copy the back ref in. The link count fixup code will take 801 * care of the rest 802 */ 803 dir = read_one_inode(root, key->offset); 804 if (!dir) 805 return -ENOENT; 806 807 inode = read_one_inode(root, key->objectid); 808 BUG_ON(!inode); 809 810 ref_ptr = btrfs_item_ptr_offset(eb, slot); 811 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 812 813 again: 814 ref = (struct btrfs_inode_ref *)ref_ptr; 815 816 namelen = btrfs_inode_ref_name_len(eb, ref); 817 name = kmalloc(namelen, GFP_NOFS); 818 BUG_ON(!name); 819 820 read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen); 821 822 /* if we already have a perfect match, we're done */ 823 if (inode_in_dir(root, path, dir->i_ino, inode->i_ino, 824 btrfs_inode_ref_index(eb, ref), 825 name, namelen)) { 826 goto out; 827 } 828 829 /* 830 * look for a conflicting back reference in the metadata. 831 * if we find one we have to unlink that name of the file 832 * before we add our new link. Later on, we overwrite any 833 * existing back reference, and we don't want to create 834 * dangling pointers in the directory. 835 */ 836 conflict_again: 837 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 838 if (ret == 0) { 839 char *victim_name; 840 int victim_name_len; 841 struct btrfs_inode_ref *victim_ref; 842 unsigned long ptr; 843 unsigned long ptr_end; 844 struct extent_buffer *leaf = path->nodes[0]; 845 846 /* are we trying to overwrite a back ref for the root directory 847 * if so, just jump out, we're done 848 */ 849 if (key->objectid == key->offset) 850 goto out_nowrite; 851 852 /* check all the names in this back reference to see 853 * if they are in the log. if so, we allow them to stay 854 * otherwise they must be unlinked as a conflict 855 */ 856 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 857 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); 858 while (ptr < ptr_end) { 859 victim_ref = (struct btrfs_inode_ref *)ptr; 860 victim_name_len = btrfs_inode_ref_name_len(leaf, 861 victim_ref); 862 victim_name = kmalloc(victim_name_len, GFP_NOFS); 863 BUG_ON(!victim_name); 864 865 read_extent_buffer(leaf, victim_name, 866 (unsigned long)(victim_ref + 1), 867 victim_name_len); 868 869 if (!backref_in_log(log, key, victim_name, 870 victim_name_len)) { 871 btrfs_inc_nlink(inode); 872 btrfs_release_path(root, path); 873 874 ret = btrfs_unlink_inode(trans, root, dir, 875 inode, victim_name, 876 victim_name_len); 877 kfree(victim_name); 878 btrfs_release_path(root, path); 879 goto conflict_again; 880 } 881 kfree(victim_name); 882 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 883 } 884 BUG_ON(ret); 885 } 886 btrfs_release_path(root, path); 887 888 /* look for a conflicting sequence number */ 889 di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, 890 btrfs_inode_ref_index(eb, ref), 891 name, namelen, 0); 892 if (di && !IS_ERR(di)) { 893 ret = drop_one_dir_item(trans, root, path, dir, di); 894 BUG_ON(ret); 895 } 896 btrfs_release_path(root, path); 897 898 899 /* look for a conflicting name */ 900 di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, 901 name, namelen, 0); 902 if (di && !IS_ERR(di)) { 903 ret = drop_one_dir_item(trans, root, path, dir, di); 904 BUG_ON(ret); 905 } 906 btrfs_release_path(root, path); 907 908 /* insert our name */ 909 ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, 910 btrfs_inode_ref_index(eb, ref)); 911 BUG_ON(ret); 912 913 btrfs_update_inode(trans, root, inode); 914 915 out: 916 ref_ptr = (unsigned long)(ref + 1) + namelen; 917 kfree(name); 918 if (ref_ptr < ref_end) 919 goto again; 920 921 /* finally write the back reference in the inode */ 922 ret = overwrite_item(trans, root, path, eb, slot, key); 923 BUG_ON(ret); 924 925 out_nowrite: 926 btrfs_release_path(root, path); 927 iput(dir); 928 iput(inode); 929 return 0; 930 } 931 932 static int insert_orphan_item(struct btrfs_trans_handle *trans, 933 struct btrfs_root *root, u64 offset) 934 { 935 int ret; 936 ret = btrfs_find_orphan_item(root, offset); 937 if (ret > 0) 938 ret = btrfs_insert_orphan_item(trans, root, offset); 939 return ret; 940 } 941 942 943 /* 944 * There are a few corners where the link count of the file can't 945 * be properly maintained during replay. So, instead of adding 946 * lots of complexity to the log code, we just scan the backrefs 947 * for any file that has been through replay. 948 * 949 * The scan will update the link count on the inode to reflect the 950 * number of back refs found. If it goes down to zero, the iput 951 * will free the inode. 952 */ 953 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, 954 struct btrfs_root *root, 955 struct inode *inode) 956 { 957 struct btrfs_path *path; 958 int ret; 959 struct btrfs_key key; 960 u64 nlink = 0; 961 unsigned long ptr; 962 unsigned long ptr_end; 963 int name_len; 964 965 key.objectid = inode->i_ino; 966 key.type = BTRFS_INODE_REF_KEY; 967 key.offset = (u64)-1; 968 969 path = btrfs_alloc_path(); 970 971 while (1) { 972 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 973 if (ret < 0) 974 break; 975 if (ret > 0) { 976 if (path->slots[0] == 0) 977 break; 978 path->slots[0]--; 979 } 980 btrfs_item_key_to_cpu(path->nodes[0], &key, 981 path->slots[0]); 982 if (key.objectid != inode->i_ino || 983 key.type != BTRFS_INODE_REF_KEY) 984 break; 985 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 986 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], 987 path->slots[0]); 988 while (ptr < ptr_end) { 989 struct btrfs_inode_ref *ref; 990 991 ref = (struct btrfs_inode_ref *)ptr; 992 name_len = btrfs_inode_ref_name_len(path->nodes[0], 993 ref); 994 ptr = (unsigned long)(ref + 1) + name_len; 995 nlink++; 996 } 997 998 if (key.offset == 0) 999 break; 1000 key.offset--; 1001 btrfs_release_path(root, path); 1002 } 1003 btrfs_release_path(root, path); 1004 if (nlink != inode->i_nlink) { 1005 inode->i_nlink = nlink; 1006 btrfs_update_inode(trans, root, inode); 1007 } 1008 BTRFS_I(inode)->index_cnt = (u64)-1; 1009 1010 if (inode->i_nlink == 0) { 1011 if (S_ISDIR(inode->i_mode)) { 1012 ret = replay_dir_deletes(trans, root, NULL, path, 1013 inode->i_ino, 1); 1014 BUG_ON(ret); 1015 } 1016 ret = insert_orphan_item(trans, root, inode->i_ino); 1017 BUG_ON(ret); 1018 } 1019 btrfs_free_path(path); 1020 1021 return 0; 1022 } 1023 1024 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, 1025 struct btrfs_root *root, 1026 struct btrfs_path *path) 1027 { 1028 int ret; 1029 struct btrfs_key key; 1030 struct inode *inode; 1031 1032 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1033 key.type = BTRFS_ORPHAN_ITEM_KEY; 1034 key.offset = (u64)-1; 1035 while (1) { 1036 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1037 if (ret < 0) 1038 break; 1039 1040 if (ret == 1) { 1041 if (path->slots[0] == 0) 1042 break; 1043 path->slots[0]--; 1044 } 1045 1046 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1047 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || 1048 key.type != BTRFS_ORPHAN_ITEM_KEY) 1049 break; 1050 1051 ret = btrfs_del_item(trans, root, path); 1052 BUG_ON(ret); 1053 1054 btrfs_release_path(root, path); 1055 inode = read_one_inode(root, key.offset); 1056 BUG_ON(!inode); 1057 1058 ret = fixup_inode_link_count(trans, root, inode); 1059 BUG_ON(ret); 1060 1061 iput(inode); 1062 1063 /* 1064 * fixup on a directory may create new entries, 1065 * make sure we always look for the highset possible 1066 * offset 1067 */ 1068 key.offset = (u64)-1; 1069 } 1070 btrfs_release_path(root, path); 1071 return 0; 1072 } 1073 1074 1075 /* 1076 * record a given inode in the fixup dir so we can check its link 1077 * count when replay is done. The link count is incremented here 1078 * so the inode won't go away until we check it 1079 */ 1080 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, 1081 struct btrfs_root *root, 1082 struct btrfs_path *path, 1083 u64 objectid) 1084 { 1085 struct btrfs_key key; 1086 int ret = 0; 1087 struct inode *inode; 1088 1089 inode = read_one_inode(root, objectid); 1090 BUG_ON(!inode); 1091 1092 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1093 btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); 1094 key.offset = objectid; 1095 1096 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1097 1098 btrfs_release_path(root, path); 1099 if (ret == 0) { 1100 btrfs_inc_nlink(inode); 1101 btrfs_update_inode(trans, root, inode); 1102 } else if (ret == -EEXIST) { 1103 ret = 0; 1104 } else { 1105 BUG(); 1106 } 1107 iput(inode); 1108 1109 return ret; 1110 } 1111 1112 /* 1113 * when replaying the log for a directory, we only insert names 1114 * for inodes that actually exist. This means an fsync on a directory 1115 * does not implicitly fsync all the new files in it 1116 */ 1117 static noinline int insert_one_name(struct btrfs_trans_handle *trans, 1118 struct btrfs_root *root, 1119 struct btrfs_path *path, 1120 u64 dirid, u64 index, 1121 char *name, int name_len, u8 type, 1122 struct btrfs_key *location) 1123 { 1124 struct inode *inode; 1125 struct inode *dir; 1126 int ret; 1127 1128 inode = read_one_inode(root, location->objectid); 1129 if (!inode) 1130 return -ENOENT; 1131 1132 dir = read_one_inode(root, dirid); 1133 if (!dir) { 1134 iput(inode); 1135 return -EIO; 1136 } 1137 ret = btrfs_add_link(trans, dir, inode, name, name_len, 1, index); 1138 1139 /* FIXME, put inode into FIXUP list */ 1140 1141 iput(inode); 1142 iput(dir); 1143 return ret; 1144 } 1145 1146 /* 1147 * take a single entry in a log directory item and replay it into 1148 * the subvolume. 1149 * 1150 * if a conflicting item exists in the subdirectory already, 1151 * the inode it points to is unlinked and put into the link count 1152 * fix up tree. 1153 * 1154 * If a name from the log points to a file or directory that does 1155 * not exist in the FS, it is skipped. fsyncs on directories 1156 * do not force down inodes inside that directory, just changes to the 1157 * names or unlinks in a directory. 1158 */ 1159 static noinline int replay_one_name(struct btrfs_trans_handle *trans, 1160 struct btrfs_root *root, 1161 struct btrfs_path *path, 1162 struct extent_buffer *eb, 1163 struct btrfs_dir_item *di, 1164 struct btrfs_key *key) 1165 { 1166 char *name; 1167 int name_len; 1168 struct btrfs_dir_item *dst_di; 1169 struct btrfs_key found_key; 1170 struct btrfs_key log_key; 1171 struct inode *dir; 1172 u8 log_type; 1173 int exists; 1174 int ret; 1175 1176 dir = read_one_inode(root, key->objectid); 1177 BUG_ON(!dir); 1178 1179 name_len = btrfs_dir_name_len(eb, di); 1180 name = kmalloc(name_len, GFP_NOFS); 1181 log_type = btrfs_dir_type(eb, di); 1182 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1183 name_len); 1184 1185 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1186 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); 1187 if (exists == 0) 1188 exists = 1; 1189 else 1190 exists = 0; 1191 btrfs_release_path(root, path); 1192 1193 if (key->type == BTRFS_DIR_ITEM_KEY) { 1194 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1195 name, name_len, 1); 1196 } else if (key->type == BTRFS_DIR_INDEX_KEY) { 1197 dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1198 key->objectid, 1199 key->offset, name, 1200 name_len, 1); 1201 } else { 1202 BUG(); 1203 } 1204 if (!dst_di || IS_ERR(dst_di)) { 1205 /* we need a sequence number to insert, so we only 1206 * do inserts for the BTRFS_DIR_INDEX_KEY types 1207 */ 1208 if (key->type != BTRFS_DIR_INDEX_KEY) 1209 goto out; 1210 goto insert; 1211 } 1212 1213 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); 1214 /* the existing item matches the logged item */ 1215 if (found_key.objectid == log_key.objectid && 1216 found_key.type == log_key.type && 1217 found_key.offset == log_key.offset && 1218 btrfs_dir_type(path->nodes[0], dst_di) == log_type) { 1219 goto out; 1220 } 1221 1222 /* 1223 * don't drop the conflicting directory entry if the inode 1224 * for the new entry doesn't exist 1225 */ 1226 if (!exists) 1227 goto out; 1228 1229 ret = drop_one_dir_item(trans, root, path, dir, dst_di); 1230 BUG_ON(ret); 1231 1232 if (key->type == BTRFS_DIR_INDEX_KEY) 1233 goto insert; 1234 out: 1235 btrfs_release_path(root, path); 1236 kfree(name); 1237 iput(dir); 1238 return 0; 1239 1240 insert: 1241 btrfs_release_path(root, path); 1242 ret = insert_one_name(trans, root, path, key->objectid, key->offset, 1243 name, name_len, log_type, &log_key); 1244 1245 BUG_ON(ret && ret != -ENOENT); 1246 goto out; 1247 } 1248 1249 /* 1250 * find all the names in a directory item and reconcile them into 1251 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than 1252 * one name in a directory item, but the same code gets used for 1253 * both directory index types 1254 */ 1255 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, 1256 struct btrfs_root *root, 1257 struct btrfs_path *path, 1258 struct extent_buffer *eb, int slot, 1259 struct btrfs_key *key) 1260 { 1261 int ret; 1262 u32 item_size = btrfs_item_size_nr(eb, slot); 1263 struct btrfs_dir_item *di; 1264 int name_len; 1265 unsigned long ptr; 1266 unsigned long ptr_end; 1267 1268 ptr = btrfs_item_ptr_offset(eb, slot); 1269 ptr_end = ptr + item_size; 1270 while (ptr < ptr_end) { 1271 di = (struct btrfs_dir_item *)ptr; 1272 name_len = btrfs_dir_name_len(eb, di); 1273 ret = replay_one_name(trans, root, path, eb, di, key); 1274 BUG_ON(ret); 1275 ptr = (unsigned long)(di + 1); 1276 ptr += name_len; 1277 } 1278 return 0; 1279 } 1280 1281 /* 1282 * directory replay has two parts. There are the standard directory 1283 * items in the log copied from the subvolume, and range items 1284 * created in the log while the subvolume was logged. 1285 * 1286 * The range items tell us which parts of the key space the log 1287 * is authoritative for. During replay, if a key in the subvolume 1288 * directory is in a logged range item, but not actually in the log 1289 * that means it was deleted from the directory before the fsync 1290 * and should be removed. 1291 */ 1292 static noinline int find_dir_range(struct btrfs_root *root, 1293 struct btrfs_path *path, 1294 u64 dirid, int key_type, 1295 u64 *start_ret, u64 *end_ret) 1296 { 1297 struct btrfs_key key; 1298 u64 found_end; 1299 struct btrfs_dir_log_item *item; 1300 int ret; 1301 int nritems; 1302 1303 if (*start_ret == (u64)-1) 1304 return 1; 1305 1306 key.objectid = dirid; 1307 key.type = key_type; 1308 key.offset = *start_ret; 1309 1310 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1311 if (ret < 0) 1312 goto out; 1313 if (ret > 0) { 1314 if (path->slots[0] == 0) 1315 goto out; 1316 path->slots[0]--; 1317 } 1318 if (ret != 0) 1319 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1320 1321 if (key.type != key_type || key.objectid != dirid) { 1322 ret = 1; 1323 goto next; 1324 } 1325 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 1326 struct btrfs_dir_log_item); 1327 found_end = btrfs_dir_log_end(path->nodes[0], item); 1328 1329 if (*start_ret >= key.offset && *start_ret <= found_end) { 1330 ret = 0; 1331 *start_ret = key.offset; 1332 *end_ret = found_end; 1333 goto out; 1334 } 1335 ret = 1; 1336 next: 1337 /* check the next slot in the tree to see if it is a valid item */ 1338 nritems = btrfs_header_nritems(path->nodes[0]); 1339 if (path->slots[0] >= nritems) { 1340 ret = btrfs_next_leaf(root, path); 1341 if (ret) 1342 goto out; 1343 } else { 1344 path->slots[0]++; 1345 } 1346 1347 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1348 1349 if (key.type != key_type || key.objectid != dirid) { 1350 ret = 1; 1351 goto out; 1352 } 1353 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 1354 struct btrfs_dir_log_item); 1355 found_end = btrfs_dir_log_end(path->nodes[0], item); 1356 *start_ret = key.offset; 1357 *end_ret = found_end; 1358 ret = 0; 1359 out: 1360 btrfs_release_path(root, path); 1361 return ret; 1362 } 1363 1364 /* 1365 * this looks for a given directory item in the log. If the directory 1366 * item is not in the log, the item is removed and the inode it points 1367 * to is unlinked 1368 */ 1369 static noinline int check_item_in_log(struct btrfs_trans_handle *trans, 1370 struct btrfs_root *root, 1371 struct btrfs_root *log, 1372 struct btrfs_path *path, 1373 struct btrfs_path *log_path, 1374 struct inode *dir, 1375 struct btrfs_key *dir_key) 1376 { 1377 int ret; 1378 struct extent_buffer *eb; 1379 int slot; 1380 u32 item_size; 1381 struct btrfs_dir_item *di; 1382 struct btrfs_dir_item *log_di; 1383 int name_len; 1384 unsigned long ptr; 1385 unsigned long ptr_end; 1386 char *name; 1387 struct inode *inode; 1388 struct btrfs_key location; 1389 1390 again: 1391 eb = path->nodes[0]; 1392 slot = path->slots[0]; 1393 item_size = btrfs_item_size_nr(eb, slot); 1394 ptr = btrfs_item_ptr_offset(eb, slot); 1395 ptr_end = ptr + item_size; 1396 while (ptr < ptr_end) { 1397 di = (struct btrfs_dir_item *)ptr; 1398 name_len = btrfs_dir_name_len(eb, di); 1399 name = kmalloc(name_len, GFP_NOFS); 1400 if (!name) { 1401 ret = -ENOMEM; 1402 goto out; 1403 } 1404 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1405 name_len); 1406 log_di = NULL; 1407 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { 1408 log_di = btrfs_lookup_dir_item(trans, log, log_path, 1409 dir_key->objectid, 1410 name, name_len, 0); 1411 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { 1412 log_di = btrfs_lookup_dir_index_item(trans, log, 1413 log_path, 1414 dir_key->objectid, 1415 dir_key->offset, 1416 name, name_len, 0); 1417 } 1418 if (!log_di || IS_ERR(log_di)) { 1419 btrfs_dir_item_key_to_cpu(eb, di, &location); 1420 btrfs_release_path(root, path); 1421 btrfs_release_path(log, log_path); 1422 inode = read_one_inode(root, location.objectid); 1423 BUG_ON(!inode); 1424 1425 ret = link_to_fixup_dir(trans, root, 1426 path, location.objectid); 1427 BUG_ON(ret); 1428 btrfs_inc_nlink(inode); 1429 ret = btrfs_unlink_inode(trans, root, dir, inode, 1430 name, name_len); 1431 BUG_ON(ret); 1432 kfree(name); 1433 iput(inode); 1434 1435 /* there might still be more names under this key 1436 * check and repeat if required 1437 */ 1438 ret = btrfs_search_slot(NULL, root, dir_key, path, 1439 0, 0); 1440 if (ret == 0) 1441 goto again; 1442 ret = 0; 1443 goto out; 1444 } 1445 btrfs_release_path(log, log_path); 1446 kfree(name); 1447 1448 ptr = (unsigned long)(di + 1); 1449 ptr += name_len; 1450 } 1451 ret = 0; 1452 out: 1453 btrfs_release_path(root, path); 1454 btrfs_release_path(log, log_path); 1455 return ret; 1456 } 1457 1458 /* 1459 * deletion replay happens before we copy any new directory items 1460 * out of the log or out of backreferences from inodes. It 1461 * scans the log to find ranges of keys that log is authoritative for, 1462 * and then scans the directory to find items in those ranges that are 1463 * not present in the log. 1464 * 1465 * Anything we don't find in the log is unlinked and removed from the 1466 * directory. 1467 */ 1468 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 1469 struct btrfs_root *root, 1470 struct btrfs_root *log, 1471 struct btrfs_path *path, 1472 u64 dirid, int del_all) 1473 { 1474 u64 range_start; 1475 u64 range_end; 1476 int key_type = BTRFS_DIR_LOG_ITEM_KEY; 1477 int ret = 0; 1478 struct btrfs_key dir_key; 1479 struct btrfs_key found_key; 1480 struct btrfs_path *log_path; 1481 struct inode *dir; 1482 1483 dir_key.objectid = dirid; 1484 dir_key.type = BTRFS_DIR_ITEM_KEY; 1485 log_path = btrfs_alloc_path(); 1486 if (!log_path) 1487 return -ENOMEM; 1488 1489 dir = read_one_inode(root, dirid); 1490 /* it isn't an error if the inode isn't there, that can happen 1491 * because we replay the deletes before we copy in the inode item 1492 * from the log 1493 */ 1494 if (!dir) { 1495 btrfs_free_path(log_path); 1496 return 0; 1497 } 1498 again: 1499 range_start = 0; 1500 range_end = 0; 1501 while (1) { 1502 if (del_all) 1503 range_end = (u64)-1; 1504 else { 1505 ret = find_dir_range(log, path, dirid, key_type, 1506 &range_start, &range_end); 1507 if (ret != 0) 1508 break; 1509 } 1510 1511 dir_key.offset = range_start; 1512 while (1) { 1513 int nritems; 1514 ret = btrfs_search_slot(NULL, root, &dir_key, path, 1515 0, 0); 1516 if (ret < 0) 1517 goto out; 1518 1519 nritems = btrfs_header_nritems(path->nodes[0]); 1520 if (path->slots[0] >= nritems) { 1521 ret = btrfs_next_leaf(root, path); 1522 if (ret) 1523 break; 1524 } 1525 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 1526 path->slots[0]); 1527 if (found_key.objectid != dirid || 1528 found_key.type != dir_key.type) 1529 goto next_type; 1530 1531 if (found_key.offset > range_end) 1532 break; 1533 1534 ret = check_item_in_log(trans, root, log, path, 1535 log_path, dir, 1536 &found_key); 1537 BUG_ON(ret); 1538 if (found_key.offset == (u64)-1) 1539 break; 1540 dir_key.offset = found_key.offset + 1; 1541 } 1542 btrfs_release_path(root, path); 1543 if (range_end == (u64)-1) 1544 break; 1545 range_start = range_end + 1; 1546 } 1547 1548 next_type: 1549 ret = 0; 1550 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { 1551 key_type = BTRFS_DIR_LOG_INDEX_KEY; 1552 dir_key.type = BTRFS_DIR_INDEX_KEY; 1553 btrfs_release_path(root, path); 1554 goto again; 1555 } 1556 out: 1557 btrfs_release_path(root, path); 1558 btrfs_free_path(log_path); 1559 iput(dir); 1560 return ret; 1561 } 1562 1563 /* 1564 * the process_func used to replay items from the log tree. This 1565 * gets called in two different stages. The first stage just looks 1566 * for inodes and makes sure they are all copied into the subvolume. 1567 * 1568 * The second stage copies all the other item types from the log into 1569 * the subvolume. The two stage approach is slower, but gets rid of 1570 * lots of complexity around inodes referencing other inodes that exist 1571 * only in the log (references come from either directory items or inode 1572 * back refs). 1573 */ 1574 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, 1575 struct walk_control *wc, u64 gen) 1576 { 1577 int nritems; 1578 struct btrfs_path *path; 1579 struct btrfs_root *root = wc->replay_dest; 1580 struct btrfs_key key; 1581 int level; 1582 int i; 1583 int ret; 1584 1585 btrfs_read_buffer(eb, gen); 1586 1587 level = btrfs_header_level(eb); 1588 1589 if (level != 0) 1590 return 0; 1591 1592 path = btrfs_alloc_path(); 1593 BUG_ON(!path); 1594 1595 nritems = btrfs_header_nritems(eb); 1596 for (i = 0; i < nritems; i++) { 1597 btrfs_item_key_to_cpu(eb, &key, i); 1598 1599 /* inode keys are done during the first stage */ 1600 if (key.type == BTRFS_INODE_ITEM_KEY && 1601 wc->stage == LOG_WALK_REPLAY_INODES) { 1602 struct btrfs_inode_item *inode_item; 1603 u32 mode; 1604 1605 inode_item = btrfs_item_ptr(eb, i, 1606 struct btrfs_inode_item); 1607 mode = btrfs_inode_mode(eb, inode_item); 1608 if (S_ISDIR(mode)) { 1609 ret = replay_dir_deletes(wc->trans, 1610 root, log, path, key.objectid, 0); 1611 BUG_ON(ret); 1612 } 1613 ret = overwrite_item(wc->trans, root, path, 1614 eb, i, &key); 1615 BUG_ON(ret); 1616 1617 /* for regular files, make sure corresponding 1618 * orhpan item exist. extents past the new EOF 1619 * will be truncated later by orphan cleanup. 1620 */ 1621 if (S_ISREG(mode)) { 1622 ret = insert_orphan_item(wc->trans, root, 1623 key.objectid); 1624 BUG_ON(ret); 1625 } 1626 1627 ret = link_to_fixup_dir(wc->trans, root, 1628 path, key.objectid); 1629 BUG_ON(ret); 1630 } 1631 if (wc->stage < LOG_WALK_REPLAY_ALL) 1632 continue; 1633 1634 /* these keys are simply copied */ 1635 if (key.type == BTRFS_XATTR_ITEM_KEY) { 1636 ret = overwrite_item(wc->trans, root, path, 1637 eb, i, &key); 1638 BUG_ON(ret); 1639 } else if (key.type == BTRFS_INODE_REF_KEY) { 1640 ret = add_inode_ref(wc->trans, root, log, path, 1641 eb, i, &key); 1642 BUG_ON(ret && ret != -ENOENT); 1643 } else if (key.type == BTRFS_EXTENT_DATA_KEY) { 1644 ret = replay_one_extent(wc->trans, root, path, 1645 eb, i, &key); 1646 BUG_ON(ret); 1647 } else if (key.type == BTRFS_DIR_ITEM_KEY || 1648 key.type == BTRFS_DIR_INDEX_KEY) { 1649 ret = replay_one_dir_item(wc->trans, root, path, 1650 eb, i, &key); 1651 BUG_ON(ret); 1652 } 1653 } 1654 btrfs_free_path(path); 1655 return 0; 1656 } 1657 1658 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, 1659 struct btrfs_root *root, 1660 struct btrfs_path *path, int *level, 1661 struct walk_control *wc) 1662 { 1663 u64 root_owner; 1664 u64 bytenr; 1665 u64 ptr_gen; 1666 struct extent_buffer *next; 1667 struct extent_buffer *cur; 1668 struct extent_buffer *parent; 1669 u32 blocksize; 1670 int ret = 0; 1671 1672 WARN_ON(*level < 0); 1673 WARN_ON(*level >= BTRFS_MAX_LEVEL); 1674 1675 while (*level > 0) { 1676 WARN_ON(*level < 0); 1677 WARN_ON(*level >= BTRFS_MAX_LEVEL); 1678 cur = path->nodes[*level]; 1679 1680 if (btrfs_header_level(cur) != *level) 1681 WARN_ON(1); 1682 1683 if (path->slots[*level] >= 1684 btrfs_header_nritems(cur)) 1685 break; 1686 1687 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 1688 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 1689 blocksize = btrfs_level_size(root, *level - 1); 1690 1691 parent = path->nodes[*level]; 1692 root_owner = btrfs_header_owner(parent); 1693 1694 next = btrfs_find_create_tree_block(root, bytenr, blocksize); 1695 1696 if (*level == 1) { 1697 wc->process_func(root, next, wc, ptr_gen); 1698 1699 path->slots[*level]++; 1700 if (wc->free) { 1701 btrfs_read_buffer(next, ptr_gen); 1702 1703 btrfs_tree_lock(next); 1704 clean_tree_block(trans, root, next); 1705 btrfs_set_lock_blocking(next); 1706 btrfs_wait_tree_block_writeback(next); 1707 btrfs_tree_unlock(next); 1708 1709 WARN_ON(root_owner != 1710 BTRFS_TREE_LOG_OBJECTID); 1711 ret = btrfs_free_reserved_extent(root, 1712 bytenr, blocksize); 1713 BUG_ON(ret); 1714 } 1715 free_extent_buffer(next); 1716 continue; 1717 } 1718 btrfs_read_buffer(next, ptr_gen); 1719 1720 WARN_ON(*level <= 0); 1721 if (path->nodes[*level-1]) 1722 free_extent_buffer(path->nodes[*level-1]); 1723 path->nodes[*level-1] = next; 1724 *level = btrfs_header_level(next); 1725 path->slots[*level] = 0; 1726 cond_resched(); 1727 } 1728 WARN_ON(*level < 0); 1729 WARN_ON(*level >= BTRFS_MAX_LEVEL); 1730 1731 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); 1732 1733 cond_resched(); 1734 return 0; 1735 } 1736 1737 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, 1738 struct btrfs_root *root, 1739 struct btrfs_path *path, int *level, 1740 struct walk_control *wc) 1741 { 1742 u64 root_owner; 1743 int i; 1744 int slot; 1745 int ret; 1746 1747 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 1748 slot = path->slots[i]; 1749 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 1750 path->slots[i]++; 1751 *level = i; 1752 WARN_ON(*level == 0); 1753 return 0; 1754 } else { 1755 struct extent_buffer *parent; 1756 if (path->nodes[*level] == root->node) 1757 parent = path->nodes[*level]; 1758 else 1759 parent = path->nodes[*level + 1]; 1760 1761 root_owner = btrfs_header_owner(parent); 1762 wc->process_func(root, path->nodes[*level], wc, 1763 btrfs_header_generation(path->nodes[*level])); 1764 if (wc->free) { 1765 struct extent_buffer *next; 1766 1767 next = path->nodes[*level]; 1768 1769 btrfs_tree_lock(next); 1770 clean_tree_block(trans, root, next); 1771 btrfs_set_lock_blocking(next); 1772 btrfs_wait_tree_block_writeback(next); 1773 btrfs_tree_unlock(next); 1774 1775 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); 1776 ret = btrfs_free_reserved_extent(root, 1777 path->nodes[*level]->start, 1778 path->nodes[*level]->len); 1779 BUG_ON(ret); 1780 } 1781 free_extent_buffer(path->nodes[*level]); 1782 path->nodes[*level] = NULL; 1783 *level = i + 1; 1784 } 1785 } 1786 return 1; 1787 } 1788 1789 /* 1790 * drop the reference count on the tree rooted at 'snap'. This traverses 1791 * the tree freeing any blocks that have a ref count of zero after being 1792 * decremented. 1793 */ 1794 static int walk_log_tree(struct btrfs_trans_handle *trans, 1795 struct btrfs_root *log, struct walk_control *wc) 1796 { 1797 int ret = 0; 1798 int wret; 1799 int level; 1800 struct btrfs_path *path; 1801 int i; 1802 int orig_level; 1803 1804 path = btrfs_alloc_path(); 1805 BUG_ON(!path); 1806 1807 level = btrfs_header_level(log->node); 1808 orig_level = level; 1809 path->nodes[level] = log->node; 1810 extent_buffer_get(log->node); 1811 path->slots[level] = 0; 1812 1813 while (1) { 1814 wret = walk_down_log_tree(trans, log, path, &level, wc); 1815 if (wret > 0) 1816 break; 1817 if (wret < 0) 1818 ret = wret; 1819 1820 wret = walk_up_log_tree(trans, log, path, &level, wc); 1821 if (wret > 0) 1822 break; 1823 if (wret < 0) 1824 ret = wret; 1825 } 1826 1827 /* was the root node processed? if not, catch it here */ 1828 if (path->nodes[orig_level]) { 1829 wc->process_func(log, path->nodes[orig_level], wc, 1830 btrfs_header_generation(path->nodes[orig_level])); 1831 if (wc->free) { 1832 struct extent_buffer *next; 1833 1834 next = path->nodes[orig_level]; 1835 1836 btrfs_tree_lock(next); 1837 clean_tree_block(trans, log, next); 1838 btrfs_set_lock_blocking(next); 1839 btrfs_wait_tree_block_writeback(next); 1840 btrfs_tree_unlock(next); 1841 1842 WARN_ON(log->root_key.objectid != 1843 BTRFS_TREE_LOG_OBJECTID); 1844 ret = btrfs_free_reserved_extent(log, next->start, 1845 next->len); 1846 BUG_ON(ret); 1847 } 1848 } 1849 1850 for (i = 0; i <= orig_level; i++) { 1851 if (path->nodes[i]) { 1852 free_extent_buffer(path->nodes[i]); 1853 path->nodes[i] = NULL; 1854 } 1855 } 1856 btrfs_free_path(path); 1857 return ret; 1858 } 1859 1860 /* 1861 * helper function to update the item for a given subvolumes log root 1862 * in the tree of log roots 1863 */ 1864 static int update_log_root(struct btrfs_trans_handle *trans, 1865 struct btrfs_root *log) 1866 { 1867 int ret; 1868 1869 if (log->log_transid == 1) { 1870 /* insert root item on the first sync */ 1871 ret = btrfs_insert_root(trans, log->fs_info->log_root_tree, 1872 &log->root_key, &log->root_item); 1873 } else { 1874 ret = btrfs_update_root(trans, log->fs_info->log_root_tree, 1875 &log->root_key, &log->root_item); 1876 } 1877 return ret; 1878 } 1879 1880 static int wait_log_commit(struct btrfs_trans_handle *trans, 1881 struct btrfs_root *root, unsigned long transid) 1882 { 1883 DEFINE_WAIT(wait); 1884 int index = transid % 2; 1885 1886 /* 1887 * we only allow two pending log transactions at a time, 1888 * so we know that if ours is more than 2 older than the 1889 * current transaction, we're done 1890 */ 1891 do { 1892 prepare_to_wait(&root->log_commit_wait[index], 1893 &wait, TASK_UNINTERRUPTIBLE); 1894 mutex_unlock(&root->log_mutex); 1895 1896 if (root->fs_info->last_trans_log_full_commit != 1897 trans->transid && root->log_transid < transid + 2 && 1898 atomic_read(&root->log_commit[index])) 1899 schedule(); 1900 1901 finish_wait(&root->log_commit_wait[index], &wait); 1902 mutex_lock(&root->log_mutex); 1903 } while (root->log_transid < transid + 2 && 1904 atomic_read(&root->log_commit[index])); 1905 return 0; 1906 } 1907 1908 static int wait_for_writer(struct btrfs_trans_handle *trans, 1909 struct btrfs_root *root) 1910 { 1911 DEFINE_WAIT(wait); 1912 while (atomic_read(&root->log_writers)) { 1913 prepare_to_wait(&root->log_writer_wait, 1914 &wait, TASK_UNINTERRUPTIBLE); 1915 mutex_unlock(&root->log_mutex); 1916 if (root->fs_info->last_trans_log_full_commit != 1917 trans->transid && atomic_read(&root->log_writers)) 1918 schedule(); 1919 mutex_lock(&root->log_mutex); 1920 finish_wait(&root->log_writer_wait, &wait); 1921 } 1922 return 0; 1923 } 1924 1925 /* 1926 * btrfs_sync_log does sends a given tree log down to the disk and 1927 * updates the super blocks to record it. When this call is done, 1928 * you know that any inodes previously logged are safely on disk only 1929 * if it returns 0. 1930 * 1931 * Any other return value means you need to call btrfs_commit_transaction. 1932 * Some of the edge cases for fsyncing directories that have had unlinks 1933 * or renames done in the past mean that sometimes the only safe 1934 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, 1935 * that has happened. 1936 */ 1937 int btrfs_sync_log(struct btrfs_trans_handle *trans, 1938 struct btrfs_root *root) 1939 { 1940 int index1; 1941 int index2; 1942 int mark; 1943 int ret; 1944 struct btrfs_root *log = root->log_root; 1945 struct btrfs_root *log_root_tree = root->fs_info->log_root_tree; 1946 unsigned long log_transid = 0; 1947 1948 mutex_lock(&root->log_mutex); 1949 index1 = root->log_transid % 2; 1950 if (atomic_read(&root->log_commit[index1])) { 1951 wait_log_commit(trans, root, root->log_transid); 1952 mutex_unlock(&root->log_mutex); 1953 return 0; 1954 } 1955 atomic_set(&root->log_commit[index1], 1); 1956 1957 /* wait for previous tree log sync to complete */ 1958 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 1959 wait_log_commit(trans, root, root->log_transid - 1); 1960 1961 while (1) { 1962 unsigned long batch = root->log_batch; 1963 if (root->log_multiple_pids) { 1964 mutex_unlock(&root->log_mutex); 1965 schedule_timeout_uninterruptible(1); 1966 mutex_lock(&root->log_mutex); 1967 } 1968 wait_for_writer(trans, root); 1969 if (batch == root->log_batch) 1970 break; 1971 } 1972 1973 /* bail out if we need to do a full commit */ 1974 if (root->fs_info->last_trans_log_full_commit == trans->transid) { 1975 ret = -EAGAIN; 1976 mutex_unlock(&root->log_mutex); 1977 goto out; 1978 } 1979 1980 log_transid = root->log_transid; 1981 if (log_transid % 2 == 0) 1982 mark = EXTENT_DIRTY; 1983 else 1984 mark = EXTENT_NEW; 1985 1986 /* we start IO on all the marked extents here, but we don't actually 1987 * wait for them until later. 1988 */ 1989 ret = btrfs_write_marked_extents(log, &log->dirty_log_pages, mark); 1990 BUG_ON(ret); 1991 1992 btrfs_set_root_node(&log->root_item, log->node); 1993 1994 root->log_batch = 0; 1995 root->log_transid++; 1996 log->log_transid = root->log_transid; 1997 root->log_start_pid = 0; 1998 smp_mb(); 1999 /* 2000 * IO has been started, blocks of the log tree have WRITTEN flag set 2001 * in their headers. new modifications of the log will be written to 2002 * new positions. so it's safe to allow log writers to go in. 2003 */ 2004 mutex_unlock(&root->log_mutex); 2005 2006 mutex_lock(&log_root_tree->log_mutex); 2007 log_root_tree->log_batch++; 2008 atomic_inc(&log_root_tree->log_writers); 2009 mutex_unlock(&log_root_tree->log_mutex); 2010 2011 ret = update_log_root(trans, log); 2012 2013 mutex_lock(&log_root_tree->log_mutex); 2014 if (atomic_dec_and_test(&log_root_tree->log_writers)) { 2015 smp_mb(); 2016 if (waitqueue_active(&log_root_tree->log_writer_wait)) 2017 wake_up(&log_root_tree->log_writer_wait); 2018 } 2019 2020 if (ret) { 2021 BUG_ON(ret != -ENOSPC); 2022 root->fs_info->last_trans_log_full_commit = trans->transid; 2023 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2024 mutex_unlock(&log_root_tree->log_mutex); 2025 ret = -EAGAIN; 2026 goto out; 2027 } 2028 2029 index2 = log_root_tree->log_transid % 2; 2030 if (atomic_read(&log_root_tree->log_commit[index2])) { 2031 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2032 wait_log_commit(trans, log_root_tree, 2033 log_root_tree->log_transid); 2034 mutex_unlock(&log_root_tree->log_mutex); 2035 goto out; 2036 } 2037 atomic_set(&log_root_tree->log_commit[index2], 1); 2038 2039 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { 2040 wait_log_commit(trans, log_root_tree, 2041 log_root_tree->log_transid - 1); 2042 } 2043 2044 wait_for_writer(trans, log_root_tree); 2045 2046 /* 2047 * now that we've moved on to the tree of log tree roots, 2048 * check the full commit flag again 2049 */ 2050 if (root->fs_info->last_trans_log_full_commit == trans->transid) { 2051 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2052 mutex_unlock(&log_root_tree->log_mutex); 2053 ret = -EAGAIN; 2054 goto out_wake_log_root; 2055 } 2056 2057 ret = btrfs_write_and_wait_marked_extents(log_root_tree, 2058 &log_root_tree->dirty_log_pages, 2059 EXTENT_DIRTY | EXTENT_NEW); 2060 BUG_ON(ret); 2061 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2062 2063 btrfs_set_super_log_root(&root->fs_info->super_for_commit, 2064 log_root_tree->node->start); 2065 btrfs_set_super_log_root_level(&root->fs_info->super_for_commit, 2066 btrfs_header_level(log_root_tree->node)); 2067 2068 log_root_tree->log_batch = 0; 2069 log_root_tree->log_transid++; 2070 smp_mb(); 2071 2072 mutex_unlock(&log_root_tree->log_mutex); 2073 2074 /* 2075 * nobody else is going to jump in and write the the ctree 2076 * super here because the log_commit atomic below is protecting 2077 * us. We must be called with a transaction handle pinning 2078 * the running transaction open, so a full commit can't hop 2079 * in and cause problems either. 2080 */ 2081 write_ctree_super(trans, root->fs_info->tree_root, 1); 2082 ret = 0; 2083 2084 mutex_lock(&root->log_mutex); 2085 if (root->last_log_commit < log_transid) 2086 root->last_log_commit = log_transid; 2087 mutex_unlock(&root->log_mutex); 2088 2089 out_wake_log_root: 2090 atomic_set(&log_root_tree->log_commit[index2], 0); 2091 smp_mb(); 2092 if (waitqueue_active(&log_root_tree->log_commit_wait[index2])) 2093 wake_up(&log_root_tree->log_commit_wait[index2]); 2094 out: 2095 atomic_set(&root->log_commit[index1], 0); 2096 smp_mb(); 2097 if (waitqueue_active(&root->log_commit_wait[index1])) 2098 wake_up(&root->log_commit_wait[index1]); 2099 return 0; 2100 } 2101 2102 static void free_log_tree(struct btrfs_trans_handle *trans, 2103 struct btrfs_root *log) 2104 { 2105 int ret; 2106 u64 start; 2107 u64 end; 2108 struct walk_control wc = { 2109 .free = 1, 2110 .process_func = process_one_buffer 2111 }; 2112 2113 ret = walk_log_tree(trans, log, &wc); 2114 BUG_ON(ret); 2115 2116 while (1) { 2117 ret = find_first_extent_bit(&log->dirty_log_pages, 2118 0, &start, &end, EXTENT_DIRTY | EXTENT_NEW); 2119 if (ret) 2120 break; 2121 2122 clear_extent_bits(&log->dirty_log_pages, start, end, 2123 EXTENT_DIRTY | EXTENT_NEW, GFP_NOFS); 2124 } 2125 2126 free_extent_buffer(log->node); 2127 kfree(log); 2128 } 2129 2130 /* 2131 * free all the extents used by the tree log. This should be called 2132 * at commit time of the full transaction 2133 */ 2134 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) 2135 { 2136 if (root->log_root) { 2137 free_log_tree(trans, root->log_root); 2138 root->log_root = NULL; 2139 } 2140 return 0; 2141 } 2142 2143 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, 2144 struct btrfs_fs_info *fs_info) 2145 { 2146 if (fs_info->log_root_tree) { 2147 free_log_tree(trans, fs_info->log_root_tree); 2148 fs_info->log_root_tree = NULL; 2149 } 2150 return 0; 2151 } 2152 2153 /* 2154 * If both a file and directory are logged, and unlinks or renames are 2155 * mixed in, we have a few interesting corners: 2156 * 2157 * create file X in dir Y 2158 * link file X to X.link in dir Y 2159 * fsync file X 2160 * unlink file X but leave X.link 2161 * fsync dir Y 2162 * 2163 * After a crash we would expect only X.link to exist. But file X 2164 * didn't get fsync'd again so the log has back refs for X and X.link. 2165 * 2166 * We solve this by removing directory entries and inode backrefs from the 2167 * log when a file that was logged in the current transaction is 2168 * unlinked. Any later fsync will include the updated log entries, and 2169 * we'll be able to reconstruct the proper directory items from backrefs. 2170 * 2171 * This optimizations allows us to avoid relogging the entire inode 2172 * or the entire directory. 2173 */ 2174 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, 2175 struct btrfs_root *root, 2176 const char *name, int name_len, 2177 struct inode *dir, u64 index) 2178 { 2179 struct btrfs_root *log; 2180 struct btrfs_dir_item *di; 2181 struct btrfs_path *path; 2182 int ret; 2183 int err = 0; 2184 int bytes_del = 0; 2185 2186 if (BTRFS_I(dir)->logged_trans < trans->transid) 2187 return 0; 2188 2189 ret = join_running_log_trans(root); 2190 if (ret) 2191 return 0; 2192 2193 mutex_lock(&BTRFS_I(dir)->log_mutex); 2194 2195 log = root->log_root; 2196 path = btrfs_alloc_path(); 2197 di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, 2198 name, name_len, -1); 2199 if (IS_ERR(di)) { 2200 err = PTR_ERR(di); 2201 goto fail; 2202 } 2203 if (di) { 2204 ret = btrfs_delete_one_dir_name(trans, log, path, di); 2205 bytes_del += name_len; 2206 BUG_ON(ret); 2207 } 2208 btrfs_release_path(log, path); 2209 di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, 2210 index, name, name_len, -1); 2211 if (IS_ERR(di)) { 2212 err = PTR_ERR(di); 2213 goto fail; 2214 } 2215 if (di) { 2216 ret = btrfs_delete_one_dir_name(trans, log, path, di); 2217 bytes_del += name_len; 2218 BUG_ON(ret); 2219 } 2220 2221 /* update the directory size in the log to reflect the names 2222 * we have removed 2223 */ 2224 if (bytes_del) { 2225 struct btrfs_key key; 2226 2227 key.objectid = dir->i_ino; 2228 key.offset = 0; 2229 key.type = BTRFS_INODE_ITEM_KEY; 2230 btrfs_release_path(log, path); 2231 2232 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); 2233 if (ret < 0) { 2234 err = ret; 2235 goto fail; 2236 } 2237 if (ret == 0) { 2238 struct btrfs_inode_item *item; 2239 u64 i_size; 2240 2241 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2242 struct btrfs_inode_item); 2243 i_size = btrfs_inode_size(path->nodes[0], item); 2244 if (i_size > bytes_del) 2245 i_size -= bytes_del; 2246 else 2247 i_size = 0; 2248 btrfs_set_inode_size(path->nodes[0], item, i_size); 2249 btrfs_mark_buffer_dirty(path->nodes[0]); 2250 } else 2251 ret = 0; 2252 btrfs_release_path(log, path); 2253 } 2254 fail: 2255 btrfs_free_path(path); 2256 mutex_unlock(&BTRFS_I(dir)->log_mutex); 2257 if (ret == -ENOSPC) { 2258 root->fs_info->last_trans_log_full_commit = trans->transid; 2259 ret = 0; 2260 } 2261 btrfs_end_log_trans(root); 2262 2263 return err; 2264 } 2265 2266 /* see comments for btrfs_del_dir_entries_in_log */ 2267 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, 2268 struct btrfs_root *root, 2269 const char *name, int name_len, 2270 struct inode *inode, u64 dirid) 2271 { 2272 struct btrfs_root *log; 2273 u64 index; 2274 int ret; 2275 2276 if (BTRFS_I(inode)->logged_trans < trans->transid) 2277 return 0; 2278 2279 ret = join_running_log_trans(root); 2280 if (ret) 2281 return 0; 2282 log = root->log_root; 2283 mutex_lock(&BTRFS_I(inode)->log_mutex); 2284 2285 ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino, 2286 dirid, &index); 2287 mutex_unlock(&BTRFS_I(inode)->log_mutex); 2288 if (ret == -ENOSPC) { 2289 root->fs_info->last_trans_log_full_commit = trans->transid; 2290 ret = 0; 2291 } 2292 btrfs_end_log_trans(root); 2293 2294 return ret; 2295 } 2296 2297 /* 2298 * creates a range item in the log for 'dirid'. first_offset and 2299 * last_offset tell us which parts of the key space the log should 2300 * be considered authoritative for. 2301 */ 2302 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, 2303 struct btrfs_root *log, 2304 struct btrfs_path *path, 2305 int key_type, u64 dirid, 2306 u64 first_offset, u64 last_offset) 2307 { 2308 int ret; 2309 struct btrfs_key key; 2310 struct btrfs_dir_log_item *item; 2311 2312 key.objectid = dirid; 2313 key.offset = first_offset; 2314 if (key_type == BTRFS_DIR_ITEM_KEY) 2315 key.type = BTRFS_DIR_LOG_ITEM_KEY; 2316 else 2317 key.type = BTRFS_DIR_LOG_INDEX_KEY; 2318 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 2319 if (ret) 2320 return ret; 2321 2322 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2323 struct btrfs_dir_log_item); 2324 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 2325 btrfs_mark_buffer_dirty(path->nodes[0]); 2326 btrfs_release_path(log, path); 2327 return 0; 2328 } 2329 2330 /* 2331 * log all the items included in the current transaction for a given 2332 * directory. This also creates the range items in the log tree required 2333 * to replay anything deleted before the fsync 2334 */ 2335 static noinline int log_dir_items(struct btrfs_trans_handle *trans, 2336 struct btrfs_root *root, struct inode *inode, 2337 struct btrfs_path *path, 2338 struct btrfs_path *dst_path, int key_type, 2339 u64 min_offset, u64 *last_offset_ret) 2340 { 2341 struct btrfs_key min_key; 2342 struct btrfs_key max_key; 2343 struct btrfs_root *log = root->log_root; 2344 struct extent_buffer *src; 2345 int err = 0; 2346 int ret; 2347 int i; 2348 int nritems; 2349 u64 first_offset = min_offset; 2350 u64 last_offset = (u64)-1; 2351 2352 log = root->log_root; 2353 max_key.objectid = inode->i_ino; 2354 max_key.offset = (u64)-1; 2355 max_key.type = key_type; 2356 2357 min_key.objectid = inode->i_ino; 2358 min_key.type = key_type; 2359 min_key.offset = min_offset; 2360 2361 path->keep_locks = 1; 2362 2363 ret = btrfs_search_forward(root, &min_key, &max_key, 2364 path, 0, trans->transid); 2365 2366 /* 2367 * we didn't find anything from this transaction, see if there 2368 * is anything at all 2369 */ 2370 if (ret != 0 || min_key.objectid != inode->i_ino || 2371 min_key.type != key_type) { 2372 min_key.objectid = inode->i_ino; 2373 min_key.type = key_type; 2374 min_key.offset = (u64)-1; 2375 btrfs_release_path(root, path); 2376 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 2377 if (ret < 0) { 2378 btrfs_release_path(root, path); 2379 return ret; 2380 } 2381 ret = btrfs_previous_item(root, path, inode->i_ino, key_type); 2382 2383 /* if ret == 0 there are items for this type, 2384 * create a range to tell us the last key of this type. 2385 * otherwise, there are no items in this directory after 2386 * *min_offset, and we create a range to indicate that. 2387 */ 2388 if (ret == 0) { 2389 struct btrfs_key tmp; 2390 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 2391 path->slots[0]); 2392 if (key_type == tmp.type) 2393 first_offset = max(min_offset, tmp.offset) + 1; 2394 } 2395 goto done; 2396 } 2397 2398 /* go backward to find any previous key */ 2399 ret = btrfs_previous_item(root, path, inode->i_ino, key_type); 2400 if (ret == 0) { 2401 struct btrfs_key tmp; 2402 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 2403 if (key_type == tmp.type) { 2404 first_offset = tmp.offset; 2405 ret = overwrite_item(trans, log, dst_path, 2406 path->nodes[0], path->slots[0], 2407 &tmp); 2408 if (ret) { 2409 err = ret; 2410 goto done; 2411 } 2412 } 2413 } 2414 btrfs_release_path(root, path); 2415 2416 /* find the first key from this transaction again */ 2417 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 2418 if (ret != 0) { 2419 WARN_ON(1); 2420 goto done; 2421 } 2422 2423 /* 2424 * we have a block from this transaction, log every item in it 2425 * from our directory 2426 */ 2427 while (1) { 2428 struct btrfs_key tmp; 2429 src = path->nodes[0]; 2430 nritems = btrfs_header_nritems(src); 2431 for (i = path->slots[0]; i < nritems; i++) { 2432 btrfs_item_key_to_cpu(src, &min_key, i); 2433 2434 if (min_key.objectid != inode->i_ino || 2435 min_key.type != key_type) 2436 goto done; 2437 ret = overwrite_item(trans, log, dst_path, src, i, 2438 &min_key); 2439 if (ret) { 2440 err = ret; 2441 goto done; 2442 } 2443 } 2444 path->slots[0] = nritems; 2445 2446 /* 2447 * look ahead to the next item and see if it is also 2448 * from this directory and from this transaction 2449 */ 2450 ret = btrfs_next_leaf(root, path); 2451 if (ret == 1) { 2452 last_offset = (u64)-1; 2453 goto done; 2454 } 2455 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 2456 if (tmp.objectid != inode->i_ino || tmp.type != key_type) { 2457 last_offset = (u64)-1; 2458 goto done; 2459 } 2460 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 2461 ret = overwrite_item(trans, log, dst_path, 2462 path->nodes[0], path->slots[0], 2463 &tmp); 2464 if (ret) 2465 err = ret; 2466 else 2467 last_offset = tmp.offset; 2468 goto done; 2469 } 2470 } 2471 done: 2472 btrfs_release_path(root, path); 2473 btrfs_release_path(log, dst_path); 2474 2475 if (err == 0) { 2476 *last_offset_ret = last_offset; 2477 /* 2478 * insert the log range keys to indicate where the log 2479 * is valid 2480 */ 2481 ret = insert_dir_log_key(trans, log, path, key_type, 2482 inode->i_ino, first_offset, 2483 last_offset); 2484 if (ret) 2485 err = ret; 2486 } 2487 return err; 2488 } 2489 2490 /* 2491 * logging directories is very similar to logging inodes, We find all the items 2492 * from the current transaction and write them to the log. 2493 * 2494 * The recovery code scans the directory in the subvolume, and if it finds a 2495 * key in the range logged that is not present in the log tree, then it means 2496 * that dir entry was unlinked during the transaction. 2497 * 2498 * In order for that scan to work, we must include one key smaller than 2499 * the smallest logged by this transaction and one key larger than the largest 2500 * key logged by this transaction. 2501 */ 2502 static noinline int log_directory_changes(struct btrfs_trans_handle *trans, 2503 struct btrfs_root *root, struct inode *inode, 2504 struct btrfs_path *path, 2505 struct btrfs_path *dst_path) 2506 { 2507 u64 min_key; 2508 u64 max_key; 2509 int ret; 2510 int key_type = BTRFS_DIR_ITEM_KEY; 2511 2512 again: 2513 min_key = 0; 2514 max_key = 0; 2515 while (1) { 2516 ret = log_dir_items(trans, root, inode, path, 2517 dst_path, key_type, min_key, 2518 &max_key); 2519 if (ret) 2520 return ret; 2521 if (max_key == (u64)-1) 2522 break; 2523 min_key = max_key + 1; 2524 } 2525 2526 if (key_type == BTRFS_DIR_ITEM_KEY) { 2527 key_type = BTRFS_DIR_INDEX_KEY; 2528 goto again; 2529 } 2530 return 0; 2531 } 2532 2533 /* 2534 * a helper function to drop items from the log before we relog an 2535 * inode. max_key_type indicates the highest item type to remove. 2536 * This cannot be run for file data extents because it does not 2537 * free the extents they point to. 2538 */ 2539 static int drop_objectid_items(struct btrfs_trans_handle *trans, 2540 struct btrfs_root *log, 2541 struct btrfs_path *path, 2542 u64 objectid, int max_key_type) 2543 { 2544 int ret; 2545 struct btrfs_key key; 2546 struct btrfs_key found_key; 2547 2548 key.objectid = objectid; 2549 key.type = max_key_type; 2550 key.offset = (u64)-1; 2551 2552 while (1) { 2553 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 2554 BUG_ON(ret == 0); 2555 if (ret < 0) 2556 break; 2557 2558 if (path->slots[0] == 0) 2559 break; 2560 2561 path->slots[0]--; 2562 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2563 path->slots[0]); 2564 2565 if (found_key.objectid != objectid) 2566 break; 2567 2568 ret = btrfs_del_item(trans, log, path); 2569 BUG_ON(ret); 2570 btrfs_release_path(log, path); 2571 } 2572 btrfs_release_path(log, path); 2573 return ret; 2574 } 2575 2576 static noinline int copy_items(struct btrfs_trans_handle *trans, 2577 struct btrfs_root *log, 2578 struct btrfs_path *dst_path, 2579 struct extent_buffer *src, 2580 int start_slot, int nr, int inode_only) 2581 { 2582 unsigned long src_offset; 2583 unsigned long dst_offset; 2584 struct btrfs_file_extent_item *extent; 2585 struct btrfs_inode_item *inode_item; 2586 int ret; 2587 struct btrfs_key *ins_keys; 2588 u32 *ins_sizes; 2589 char *ins_data; 2590 int i; 2591 struct list_head ordered_sums; 2592 2593 INIT_LIST_HEAD(&ordered_sums); 2594 2595 ins_data = kmalloc(nr * sizeof(struct btrfs_key) + 2596 nr * sizeof(u32), GFP_NOFS); 2597 ins_sizes = (u32 *)ins_data; 2598 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); 2599 2600 for (i = 0; i < nr; i++) { 2601 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); 2602 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); 2603 } 2604 ret = btrfs_insert_empty_items(trans, log, dst_path, 2605 ins_keys, ins_sizes, nr); 2606 if (ret) { 2607 kfree(ins_data); 2608 return ret; 2609 } 2610 2611 for (i = 0; i < nr; i++, dst_path->slots[0]++) { 2612 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], 2613 dst_path->slots[0]); 2614 2615 src_offset = btrfs_item_ptr_offset(src, start_slot + i); 2616 2617 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 2618 src_offset, ins_sizes[i]); 2619 2620 if (inode_only == LOG_INODE_EXISTS && 2621 ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { 2622 inode_item = btrfs_item_ptr(dst_path->nodes[0], 2623 dst_path->slots[0], 2624 struct btrfs_inode_item); 2625 btrfs_set_inode_size(dst_path->nodes[0], inode_item, 0); 2626 2627 /* set the generation to zero so the recover code 2628 * can tell the difference between an logging 2629 * just to say 'this inode exists' and a logging 2630 * to say 'update this inode with these values' 2631 */ 2632 btrfs_set_inode_generation(dst_path->nodes[0], 2633 inode_item, 0); 2634 } 2635 /* take a reference on file data extents so that truncates 2636 * or deletes of this inode don't have to relog the inode 2637 * again 2638 */ 2639 if (btrfs_key_type(ins_keys + i) == BTRFS_EXTENT_DATA_KEY) { 2640 int found_type; 2641 extent = btrfs_item_ptr(src, start_slot + i, 2642 struct btrfs_file_extent_item); 2643 2644 found_type = btrfs_file_extent_type(src, extent); 2645 if (found_type == BTRFS_FILE_EXTENT_REG || 2646 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 2647 u64 ds, dl, cs, cl; 2648 ds = btrfs_file_extent_disk_bytenr(src, 2649 extent); 2650 /* ds == 0 is a hole */ 2651 if (ds == 0) 2652 continue; 2653 2654 dl = btrfs_file_extent_disk_num_bytes(src, 2655 extent); 2656 cs = btrfs_file_extent_offset(src, extent); 2657 cl = btrfs_file_extent_num_bytes(src, 2658 extent); 2659 if (btrfs_file_extent_compression(src, 2660 extent)) { 2661 cs = 0; 2662 cl = dl; 2663 } 2664 2665 ret = btrfs_lookup_csums_range( 2666 log->fs_info->csum_root, 2667 ds + cs, ds + cs + cl - 1, 2668 &ordered_sums); 2669 BUG_ON(ret); 2670 } 2671 } 2672 } 2673 2674 btrfs_mark_buffer_dirty(dst_path->nodes[0]); 2675 btrfs_release_path(log, dst_path); 2676 kfree(ins_data); 2677 2678 /* 2679 * we have to do this after the loop above to avoid changing the 2680 * log tree while trying to change the log tree. 2681 */ 2682 ret = 0; 2683 while (!list_empty(&ordered_sums)) { 2684 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 2685 struct btrfs_ordered_sum, 2686 list); 2687 if (!ret) 2688 ret = btrfs_csum_file_blocks(trans, log, sums); 2689 list_del(&sums->list); 2690 kfree(sums); 2691 } 2692 return ret; 2693 } 2694 2695 /* log a single inode in the tree log. 2696 * At least one parent directory for this inode must exist in the tree 2697 * or be logged already. 2698 * 2699 * Any items from this inode changed by the current transaction are copied 2700 * to the log tree. An extra reference is taken on any extents in this 2701 * file, allowing us to avoid a whole pile of corner cases around logging 2702 * blocks that have been removed from the tree. 2703 * 2704 * See LOG_INODE_ALL and related defines for a description of what inode_only 2705 * does. 2706 * 2707 * This handles both files and directories. 2708 */ 2709 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 2710 struct btrfs_root *root, struct inode *inode, 2711 int inode_only) 2712 { 2713 struct btrfs_path *path; 2714 struct btrfs_path *dst_path; 2715 struct btrfs_key min_key; 2716 struct btrfs_key max_key; 2717 struct btrfs_root *log = root->log_root; 2718 struct extent_buffer *src = NULL; 2719 int err = 0; 2720 int ret; 2721 int nritems; 2722 int ins_start_slot = 0; 2723 int ins_nr; 2724 2725 log = root->log_root; 2726 2727 path = btrfs_alloc_path(); 2728 dst_path = btrfs_alloc_path(); 2729 2730 min_key.objectid = inode->i_ino; 2731 min_key.type = BTRFS_INODE_ITEM_KEY; 2732 min_key.offset = 0; 2733 2734 max_key.objectid = inode->i_ino; 2735 2736 /* today the code can only do partial logging of directories */ 2737 if (!S_ISDIR(inode->i_mode)) 2738 inode_only = LOG_INODE_ALL; 2739 2740 if (inode_only == LOG_INODE_EXISTS || S_ISDIR(inode->i_mode)) 2741 max_key.type = BTRFS_XATTR_ITEM_KEY; 2742 else 2743 max_key.type = (u8)-1; 2744 max_key.offset = (u64)-1; 2745 2746 mutex_lock(&BTRFS_I(inode)->log_mutex); 2747 2748 /* 2749 * a brute force approach to making sure we get the most uptodate 2750 * copies of everything. 2751 */ 2752 if (S_ISDIR(inode->i_mode)) { 2753 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; 2754 2755 if (inode_only == LOG_INODE_EXISTS) 2756 max_key_type = BTRFS_XATTR_ITEM_KEY; 2757 ret = drop_objectid_items(trans, log, path, 2758 inode->i_ino, max_key_type); 2759 } else { 2760 ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); 2761 } 2762 if (ret) { 2763 err = ret; 2764 goto out_unlock; 2765 } 2766 path->keep_locks = 1; 2767 2768 while (1) { 2769 ins_nr = 0; 2770 ret = btrfs_search_forward(root, &min_key, &max_key, 2771 path, 0, trans->transid); 2772 if (ret != 0) 2773 break; 2774 again: 2775 /* note, ins_nr might be > 0 here, cleanup outside the loop */ 2776 if (min_key.objectid != inode->i_ino) 2777 break; 2778 if (min_key.type > max_key.type) 2779 break; 2780 2781 src = path->nodes[0]; 2782 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 2783 ins_nr++; 2784 goto next_slot; 2785 } else if (!ins_nr) { 2786 ins_start_slot = path->slots[0]; 2787 ins_nr = 1; 2788 goto next_slot; 2789 } 2790 2791 ret = copy_items(trans, log, dst_path, src, ins_start_slot, 2792 ins_nr, inode_only); 2793 if (ret) { 2794 err = ret; 2795 goto out_unlock; 2796 } 2797 ins_nr = 1; 2798 ins_start_slot = path->slots[0]; 2799 next_slot: 2800 2801 nritems = btrfs_header_nritems(path->nodes[0]); 2802 path->slots[0]++; 2803 if (path->slots[0] < nritems) { 2804 btrfs_item_key_to_cpu(path->nodes[0], &min_key, 2805 path->slots[0]); 2806 goto again; 2807 } 2808 if (ins_nr) { 2809 ret = copy_items(trans, log, dst_path, src, 2810 ins_start_slot, 2811 ins_nr, inode_only); 2812 if (ret) { 2813 err = ret; 2814 goto out_unlock; 2815 } 2816 ins_nr = 0; 2817 } 2818 btrfs_release_path(root, path); 2819 2820 if (min_key.offset < (u64)-1) 2821 min_key.offset++; 2822 else if (min_key.type < (u8)-1) 2823 min_key.type++; 2824 else if (min_key.objectid < (u64)-1) 2825 min_key.objectid++; 2826 else 2827 break; 2828 } 2829 if (ins_nr) { 2830 ret = copy_items(trans, log, dst_path, src, 2831 ins_start_slot, 2832 ins_nr, inode_only); 2833 if (ret) { 2834 err = ret; 2835 goto out_unlock; 2836 } 2837 ins_nr = 0; 2838 } 2839 WARN_ON(ins_nr); 2840 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { 2841 btrfs_release_path(root, path); 2842 btrfs_release_path(log, dst_path); 2843 ret = log_directory_changes(trans, root, inode, path, dst_path); 2844 if (ret) { 2845 err = ret; 2846 goto out_unlock; 2847 } 2848 } 2849 BTRFS_I(inode)->logged_trans = trans->transid; 2850 out_unlock: 2851 mutex_unlock(&BTRFS_I(inode)->log_mutex); 2852 2853 btrfs_free_path(path); 2854 btrfs_free_path(dst_path); 2855 return err; 2856 } 2857 2858 /* 2859 * follow the dentry parent pointers up the chain and see if any 2860 * of the directories in it require a full commit before they can 2861 * be logged. Returns zero if nothing special needs to be done or 1 if 2862 * a full commit is required. 2863 */ 2864 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, 2865 struct inode *inode, 2866 struct dentry *parent, 2867 struct super_block *sb, 2868 u64 last_committed) 2869 { 2870 int ret = 0; 2871 struct btrfs_root *root; 2872 struct dentry *old_parent = NULL; 2873 2874 /* 2875 * for regular files, if its inode is already on disk, we don't 2876 * have to worry about the parents at all. This is because 2877 * we can use the last_unlink_trans field to record renames 2878 * and other fun in this file. 2879 */ 2880 if (S_ISREG(inode->i_mode) && 2881 BTRFS_I(inode)->generation <= last_committed && 2882 BTRFS_I(inode)->last_unlink_trans <= last_committed) 2883 goto out; 2884 2885 if (!S_ISDIR(inode->i_mode)) { 2886 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 2887 goto out; 2888 inode = parent->d_inode; 2889 } 2890 2891 while (1) { 2892 BTRFS_I(inode)->logged_trans = trans->transid; 2893 smp_mb(); 2894 2895 if (BTRFS_I(inode)->last_unlink_trans > last_committed) { 2896 root = BTRFS_I(inode)->root; 2897 2898 /* 2899 * make sure any commits to the log are forced 2900 * to be full commits 2901 */ 2902 root->fs_info->last_trans_log_full_commit = 2903 trans->transid; 2904 ret = 1; 2905 break; 2906 } 2907 2908 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 2909 break; 2910 2911 if (IS_ROOT(parent)) 2912 break; 2913 2914 parent = dget_parent(parent); 2915 dput(old_parent); 2916 old_parent = parent; 2917 inode = parent->d_inode; 2918 2919 } 2920 dput(old_parent); 2921 out: 2922 return ret; 2923 } 2924 2925 static int inode_in_log(struct btrfs_trans_handle *trans, 2926 struct inode *inode) 2927 { 2928 struct btrfs_root *root = BTRFS_I(inode)->root; 2929 int ret = 0; 2930 2931 mutex_lock(&root->log_mutex); 2932 if (BTRFS_I(inode)->logged_trans == trans->transid && 2933 BTRFS_I(inode)->last_sub_trans <= root->last_log_commit) 2934 ret = 1; 2935 mutex_unlock(&root->log_mutex); 2936 return ret; 2937 } 2938 2939 2940 /* 2941 * helper function around btrfs_log_inode to make sure newly created 2942 * parent directories also end up in the log. A minimal inode and backref 2943 * only logging is done of any parent directories that are older than 2944 * the last committed transaction 2945 */ 2946 int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 2947 struct btrfs_root *root, struct inode *inode, 2948 struct dentry *parent, int exists_only) 2949 { 2950 int inode_only = exists_only ? LOG_INODE_EXISTS : LOG_INODE_ALL; 2951 struct super_block *sb; 2952 struct dentry *old_parent = NULL; 2953 int ret = 0; 2954 u64 last_committed = root->fs_info->last_trans_committed; 2955 2956 sb = inode->i_sb; 2957 2958 if (btrfs_test_opt(root, NOTREELOG)) { 2959 ret = 1; 2960 goto end_no_trans; 2961 } 2962 2963 if (root->fs_info->last_trans_log_full_commit > 2964 root->fs_info->last_trans_committed) { 2965 ret = 1; 2966 goto end_no_trans; 2967 } 2968 2969 if (root != BTRFS_I(inode)->root || 2970 btrfs_root_refs(&root->root_item) == 0) { 2971 ret = 1; 2972 goto end_no_trans; 2973 } 2974 2975 ret = check_parent_dirs_for_sync(trans, inode, parent, 2976 sb, last_committed); 2977 if (ret) 2978 goto end_no_trans; 2979 2980 if (inode_in_log(trans, inode)) { 2981 ret = BTRFS_NO_LOG_SYNC; 2982 goto end_no_trans; 2983 } 2984 2985 ret = start_log_trans(trans, root); 2986 if (ret) 2987 goto end_trans; 2988 2989 ret = btrfs_log_inode(trans, root, inode, inode_only); 2990 if (ret) 2991 goto end_trans; 2992 2993 /* 2994 * for regular files, if its inode is already on disk, we don't 2995 * have to worry about the parents at all. This is because 2996 * we can use the last_unlink_trans field to record renames 2997 * and other fun in this file. 2998 */ 2999 if (S_ISREG(inode->i_mode) && 3000 BTRFS_I(inode)->generation <= last_committed && 3001 BTRFS_I(inode)->last_unlink_trans <= last_committed) { 3002 ret = 0; 3003 goto end_trans; 3004 } 3005 3006 inode_only = LOG_INODE_EXISTS; 3007 while (1) { 3008 if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb) 3009 break; 3010 3011 inode = parent->d_inode; 3012 if (root != BTRFS_I(inode)->root) 3013 break; 3014 3015 if (BTRFS_I(inode)->generation > 3016 root->fs_info->last_trans_committed) { 3017 ret = btrfs_log_inode(trans, root, inode, inode_only); 3018 if (ret) 3019 goto end_trans; 3020 } 3021 if (IS_ROOT(parent)) 3022 break; 3023 3024 parent = dget_parent(parent); 3025 dput(old_parent); 3026 old_parent = parent; 3027 } 3028 ret = 0; 3029 end_trans: 3030 dput(old_parent); 3031 if (ret < 0) { 3032 BUG_ON(ret != -ENOSPC); 3033 root->fs_info->last_trans_log_full_commit = trans->transid; 3034 ret = 1; 3035 } 3036 btrfs_end_log_trans(root); 3037 end_no_trans: 3038 return ret; 3039 } 3040 3041 /* 3042 * it is not safe to log dentry if the chunk root has added new 3043 * chunks. This returns 0 if the dentry was logged, and 1 otherwise. 3044 * If this returns 1, you must commit the transaction to safely get your 3045 * data on disk. 3046 */ 3047 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 3048 struct btrfs_root *root, struct dentry *dentry) 3049 { 3050 struct dentry *parent = dget_parent(dentry); 3051 int ret; 3052 3053 ret = btrfs_log_inode_parent(trans, root, dentry->d_inode, parent, 0); 3054 dput(parent); 3055 3056 return ret; 3057 } 3058 3059 /* 3060 * should be called during mount to recover any replay any log trees 3061 * from the FS 3062 */ 3063 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) 3064 { 3065 int ret; 3066 struct btrfs_path *path; 3067 struct btrfs_trans_handle *trans; 3068 struct btrfs_key key; 3069 struct btrfs_key found_key; 3070 struct btrfs_key tmp_key; 3071 struct btrfs_root *log; 3072 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 3073 struct walk_control wc = { 3074 .process_func = process_one_buffer, 3075 .stage = 0, 3076 }; 3077 3078 fs_info->log_root_recovering = 1; 3079 path = btrfs_alloc_path(); 3080 BUG_ON(!path); 3081 3082 trans = btrfs_start_transaction(fs_info->tree_root, 0); 3083 3084 wc.trans = trans; 3085 wc.pin = 1; 3086 3087 walk_log_tree(trans, log_root_tree, &wc); 3088 3089 again: 3090 key.objectid = BTRFS_TREE_LOG_OBJECTID; 3091 key.offset = (u64)-1; 3092 btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); 3093 3094 while (1) { 3095 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 3096 if (ret < 0) 3097 break; 3098 if (ret > 0) { 3099 if (path->slots[0] == 0) 3100 break; 3101 path->slots[0]--; 3102 } 3103 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 3104 path->slots[0]); 3105 btrfs_release_path(log_root_tree, path); 3106 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 3107 break; 3108 3109 log = btrfs_read_fs_root_no_radix(log_root_tree, 3110 &found_key); 3111 BUG_ON(!log); 3112 3113 3114 tmp_key.objectid = found_key.offset; 3115 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 3116 tmp_key.offset = (u64)-1; 3117 3118 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 3119 BUG_ON(!wc.replay_dest); 3120 3121 wc.replay_dest->log_root = log; 3122 btrfs_record_root_in_trans(trans, wc.replay_dest); 3123 ret = walk_log_tree(trans, log, &wc); 3124 BUG_ON(ret); 3125 3126 if (wc.stage == LOG_WALK_REPLAY_ALL) { 3127 ret = fixup_inode_link_counts(trans, wc.replay_dest, 3128 path); 3129 BUG_ON(ret); 3130 } 3131 3132 key.offset = found_key.offset - 1; 3133 wc.replay_dest->log_root = NULL; 3134 free_extent_buffer(log->node); 3135 free_extent_buffer(log->commit_root); 3136 kfree(log); 3137 3138 if (found_key.offset == 0) 3139 break; 3140 } 3141 btrfs_release_path(log_root_tree, path); 3142 3143 /* step one is to pin it all, step two is to replay just inodes */ 3144 if (wc.pin) { 3145 wc.pin = 0; 3146 wc.process_func = replay_one_buffer; 3147 wc.stage = LOG_WALK_REPLAY_INODES; 3148 goto again; 3149 } 3150 /* step three is to replay everything */ 3151 if (wc.stage < LOG_WALK_REPLAY_ALL) { 3152 wc.stage++; 3153 goto again; 3154 } 3155 3156 btrfs_free_path(path); 3157 3158 free_extent_buffer(log_root_tree->node); 3159 log_root_tree->log_root = NULL; 3160 fs_info->log_root_recovering = 0; 3161 3162 /* step 4: commit the transaction, which also unpins the blocks */ 3163 btrfs_commit_transaction(trans, fs_info->tree_root); 3164 3165 kfree(log_root_tree); 3166 return 0; 3167 } 3168 3169 /* 3170 * there are some corner cases where we want to force a full 3171 * commit instead of allowing a directory to be logged. 3172 * 3173 * They revolve around files there were unlinked from the directory, and 3174 * this function updates the parent directory so that a full commit is 3175 * properly done if it is fsync'd later after the unlinks are done. 3176 */ 3177 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 3178 struct inode *dir, struct inode *inode, 3179 int for_rename) 3180 { 3181 /* 3182 * when we're logging a file, if it hasn't been renamed 3183 * or unlinked, and its inode is fully committed on disk, 3184 * we don't have to worry about walking up the directory chain 3185 * to log its parents. 3186 * 3187 * So, we use the last_unlink_trans field to put this transid 3188 * into the file. When the file is logged we check it and 3189 * don't log the parents if the file is fully on disk. 3190 */ 3191 if (S_ISREG(inode->i_mode)) 3192 BTRFS_I(inode)->last_unlink_trans = trans->transid; 3193 3194 /* 3195 * if this directory was already logged any new 3196 * names for this file/dir will get recorded 3197 */ 3198 smp_mb(); 3199 if (BTRFS_I(dir)->logged_trans == trans->transid) 3200 return; 3201 3202 /* 3203 * if the inode we're about to unlink was logged, 3204 * the log will be properly updated for any new names 3205 */ 3206 if (BTRFS_I(inode)->logged_trans == trans->transid) 3207 return; 3208 3209 /* 3210 * when renaming files across directories, if the directory 3211 * there we're unlinking from gets fsync'd later on, there's 3212 * no way to find the destination directory later and fsync it 3213 * properly. So, we have to be conservative and force commits 3214 * so the new name gets discovered. 3215 */ 3216 if (for_rename) 3217 goto record; 3218 3219 /* we can safely do the unlink without any special recording */ 3220 return; 3221 3222 record: 3223 BTRFS_I(dir)->last_unlink_trans = trans->transid; 3224 } 3225 3226 /* 3227 * Call this after adding a new name for a file and it will properly 3228 * update the log to reflect the new name. 3229 * 3230 * It will return zero if all goes well, and it will return 1 if a 3231 * full transaction commit is required. 3232 */ 3233 int btrfs_log_new_name(struct btrfs_trans_handle *trans, 3234 struct inode *inode, struct inode *old_dir, 3235 struct dentry *parent) 3236 { 3237 struct btrfs_root * root = BTRFS_I(inode)->root; 3238 3239 /* 3240 * this will force the logging code to walk the dentry chain 3241 * up for the file 3242 */ 3243 if (S_ISREG(inode->i_mode)) 3244 BTRFS_I(inode)->last_unlink_trans = trans->transid; 3245 3246 /* 3247 * if this inode hasn't been logged and directory we're renaming it 3248 * from hasn't been logged, we don't need to log it 3249 */ 3250 if (BTRFS_I(inode)->logged_trans <= 3251 root->fs_info->last_trans_committed && 3252 (!old_dir || BTRFS_I(old_dir)->logged_trans <= 3253 root->fs_info->last_trans_committed)) 3254 return 0; 3255 3256 return btrfs_log_inode_parent(trans, root, inode, parent, 1); 3257 } 3258 3259