1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/blkdev.h> 9 #include <linux/list_sort.h> 10 #include <linux/iversion.h> 11 #include "misc.h" 12 #include "ctree.h" 13 #include "tree-log.h" 14 #include "disk-io.h" 15 #include "locking.h" 16 #include "print-tree.h" 17 #include "backref.h" 18 #include "compression.h" 19 #include "qgroup.h" 20 #include "block-group.h" 21 #include "space-info.h" 22 #include "zoned.h" 23 24 /* magic values for the inode_only field in btrfs_log_inode: 25 * 26 * LOG_INODE_ALL means to log everything 27 * LOG_INODE_EXISTS means to log just enough to recreate the inode 28 * during log replay 29 */ 30 enum { 31 LOG_INODE_ALL, 32 LOG_INODE_EXISTS, 33 LOG_OTHER_INODE, 34 LOG_OTHER_INODE_ALL, 35 }; 36 37 /* 38 * directory trouble cases 39 * 40 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync 41 * log, we must force a full commit before doing an fsync of the directory 42 * where the unlink was done. 43 * ---> record transid of last unlink/rename per directory 44 * 45 * mkdir foo/some_dir 46 * normal commit 47 * rename foo/some_dir foo2/some_dir 48 * mkdir foo/some_dir 49 * fsync foo/some_dir/some_file 50 * 51 * The fsync above will unlink the original some_dir without recording 52 * it in its new location (foo2). After a crash, some_dir will be gone 53 * unless the fsync of some_file forces a full commit 54 * 55 * 2) we must log any new names for any file or dir that is in the fsync 56 * log. ---> check inode while renaming/linking. 57 * 58 * 2a) we must log any new names for any file or dir during rename 59 * when the directory they are being removed from was logged. 60 * ---> check inode and old parent dir during rename 61 * 62 * 2a is actually the more important variant. With the extra logging 63 * a crash might unlink the old name without recreating the new one 64 * 65 * 3) after a crash, we must go through any directories with a link count 66 * of zero and redo the rm -rf 67 * 68 * mkdir f1/foo 69 * normal commit 70 * rm -rf f1/foo 71 * fsync(f1) 72 * 73 * The directory f1 was fully removed from the FS, but fsync was never 74 * called on f1, only its parent dir. After a crash the rm -rf must 75 * be replayed. This must be able to recurse down the entire 76 * directory tree. The inode link count fixup code takes care of the 77 * ugly details. 78 */ 79 80 /* 81 * stages for the tree walking. The first 82 * stage (0) is to only pin down the blocks we find 83 * the second stage (1) is to make sure that all the inodes 84 * we find in the log are created in the subvolume. 85 * 86 * The last stage is to deal with directories and links and extents 87 * and all the other fun semantics 88 */ 89 enum { 90 LOG_WALK_PIN_ONLY, 91 LOG_WALK_REPLAY_INODES, 92 LOG_WALK_REPLAY_DIR_INDEX, 93 LOG_WALK_REPLAY_ALL, 94 }; 95 96 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 97 struct btrfs_root *root, struct btrfs_inode *inode, 98 int inode_only, 99 struct btrfs_log_ctx *ctx); 100 static int link_to_fixup_dir(struct btrfs_trans_handle *trans, 101 struct btrfs_root *root, 102 struct btrfs_path *path, u64 objectid); 103 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 104 struct btrfs_root *root, 105 struct btrfs_root *log, 106 struct btrfs_path *path, 107 u64 dirid, int del_all); 108 static void wait_log_commit(struct btrfs_root *root, int transid); 109 110 /* 111 * tree logging is a special write ahead log used to make sure that 112 * fsyncs and O_SYNCs can happen without doing full tree commits. 113 * 114 * Full tree commits are expensive because they require commonly 115 * modified blocks to be recowed, creating many dirty pages in the 116 * extent tree an 4x-6x higher write load than ext3. 117 * 118 * Instead of doing a tree commit on every fsync, we use the 119 * key ranges and transaction ids to find items for a given file or directory 120 * that have changed in this transaction. Those items are copied into 121 * a special tree (one per subvolume root), that tree is written to disk 122 * and then the fsync is considered complete. 123 * 124 * After a crash, items are copied out of the log-tree back into the 125 * subvolume tree. Any file data extents found are recorded in the extent 126 * allocation tree, and the log-tree freed. 127 * 128 * The log tree is read three times, once to pin down all the extents it is 129 * using in ram and once, once to create all the inodes logged in the tree 130 * and once to do all the other items. 131 */ 132 133 /* 134 * start a sub transaction and setup the log tree 135 * this increments the log tree writer count to make the people 136 * syncing the tree wait for us to finish 137 */ 138 static int start_log_trans(struct btrfs_trans_handle *trans, 139 struct btrfs_root *root, 140 struct btrfs_log_ctx *ctx) 141 { 142 struct btrfs_fs_info *fs_info = root->fs_info; 143 struct btrfs_root *tree_root = fs_info->tree_root; 144 const bool zoned = btrfs_is_zoned(fs_info); 145 int ret = 0; 146 bool created = false; 147 148 /* 149 * First check if the log root tree was already created. If not, create 150 * it before locking the root's log_mutex, just to keep lockdep happy. 151 */ 152 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state)) { 153 mutex_lock(&tree_root->log_mutex); 154 if (!fs_info->log_root_tree) { 155 ret = btrfs_init_log_root_tree(trans, fs_info); 156 if (!ret) { 157 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &tree_root->state); 158 created = true; 159 } 160 } 161 mutex_unlock(&tree_root->log_mutex); 162 if (ret) 163 return ret; 164 } 165 166 mutex_lock(&root->log_mutex); 167 168 again: 169 if (root->log_root) { 170 int index = (root->log_transid + 1) % 2; 171 172 if (btrfs_need_log_full_commit(trans)) { 173 ret = -EAGAIN; 174 goto out; 175 } 176 177 if (zoned && atomic_read(&root->log_commit[index])) { 178 wait_log_commit(root, root->log_transid - 1); 179 goto again; 180 } 181 182 if (!root->log_start_pid) { 183 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 184 root->log_start_pid = current->pid; 185 } else if (root->log_start_pid != current->pid) { 186 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 187 } 188 } else { 189 /* 190 * This means fs_info->log_root_tree was already created 191 * for some other FS trees. Do the full commit not to mix 192 * nodes from multiple log transactions to do sequential 193 * writing. 194 */ 195 if (zoned && !created) { 196 ret = -EAGAIN; 197 goto out; 198 } 199 200 ret = btrfs_add_log_tree(trans, root); 201 if (ret) 202 goto out; 203 204 set_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); 205 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 206 root->log_start_pid = current->pid; 207 } 208 209 atomic_inc(&root->log_writers); 210 if (ctx && !ctx->logging_new_name) { 211 int index = root->log_transid % 2; 212 list_add_tail(&ctx->list, &root->log_ctxs[index]); 213 ctx->log_transid = root->log_transid; 214 } 215 216 out: 217 mutex_unlock(&root->log_mutex); 218 return ret; 219 } 220 221 /* 222 * returns 0 if there was a log transaction running and we were able 223 * to join, or returns -ENOENT if there were not transactions 224 * in progress 225 */ 226 static int join_running_log_trans(struct btrfs_root *root) 227 { 228 const bool zoned = btrfs_is_zoned(root->fs_info); 229 int ret = -ENOENT; 230 231 if (!test_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state)) 232 return ret; 233 234 mutex_lock(&root->log_mutex); 235 again: 236 if (root->log_root) { 237 int index = (root->log_transid + 1) % 2; 238 239 ret = 0; 240 if (zoned && atomic_read(&root->log_commit[index])) { 241 wait_log_commit(root, root->log_transid - 1); 242 goto again; 243 } 244 atomic_inc(&root->log_writers); 245 } 246 mutex_unlock(&root->log_mutex); 247 return ret; 248 } 249 250 /* 251 * This either makes the current running log transaction wait 252 * until you call btrfs_end_log_trans() or it makes any future 253 * log transactions wait until you call btrfs_end_log_trans() 254 */ 255 void btrfs_pin_log_trans(struct btrfs_root *root) 256 { 257 atomic_inc(&root->log_writers); 258 } 259 260 /* 261 * indicate we're done making changes to the log tree 262 * and wake up anyone waiting to do a sync 263 */ 264 void btrfs_end_log_trans(struct btrfs_root *root) 265 { 266 if (atomic_dec_and_test(&root->log_writers)) { 267 /* atomic_dec_and_test implies a barrier */ 268 cond_wake_up_nomb(&root->log_writer_wait); 269 } 270 } 271 272 static int btrfs_write_tree_block(struct extent_buffer *buf) 273 { 274 return filemap_fdatawrite_range(buf->pages[0]->mapping, buf->start, 275 buf->start + buf->len - 1); 276 } 277 278 static void btrfs_wait_tree_block_writeback(struct extent_buffer *buf) 279 { 280 filemap_fdatawait_range(buf->pages[0]->mapping, 281 buf->start, buf->start + buf->len - 1); 282 } 283 284 /* 285 * the walk control struct is used to pass state down the chain when 286 * processing the log tree. The stage field tells us which part 287 * of the log tree processing we are currently doing. The others 288 * are state fields used for that specific part 289 */ 290 struct walk_control { 291 /* should we free the extent on disk when done? This is used 292 * at transaction commit time while freeing a log tree 293 */ 294 int free; 295 296 /* should we write out the extent buffer? This is used 297 * while flushing the log tree to disk during a sync 298 */ 299 int write; 300 301 /* should we wait for the extent buffer io to finish? Also used 302 * while flushing the log tree to disk for a sync 303 */ 304 int wait; 305 306 /* pin only walk, we record which extents on disk belong to the 307 * log trees 308 */ 309 int pin; 310 311 /* what stage of the replay code we're currently in */ 312 int stage; 313 314 /* 315 * Ignore any items from the inode currently being processed. Needs 316 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in 317 * the LOG_WALK_REPLAY_INODES stage. 318 */ 319 bool ignore_cur_inode; 320 321 /* the root we are currently replaying */ 322 struct btrfs_root *replay_dest; 323 324 /* the trans handle for the current replay */ 325 struct btrfs_trans_handle *trans; 326 327 /* the function that gets used to process blocks we find in the 328 * tree. Note the extent_buffer might not be up to date when it is 329 * passed in, and it must be checked or read if you need the data 330 * inside it 331 */ 332 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, 333 struct walk_control *wc, u64 gen, int level); 334 }; 335 336 /* 337 * process_func used to pin down extents, write them or wait on them 338 */ 339 static int process_one_buffer(struct btrfs_root *log, 340 struct extent_buffer *eb, 341 struct walk_control *wc, u64 gen, int level) 342 { 343 struct btrfs_fs_info *fs_info = log->fs_info; 344 int ret = 0; 345 346 /* 347 * If this fs is mixed then we need to be able to process the leaves to 348 * pin down any logged extents, so we have to read the block. 349 */ 350 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 351 ret = btrfs_read_buffer(eb, gen, level, NULL); 352 if (ret) 353 return ret; 354 } 355 356 if (wc->pin) 357 ret = btrfs_pin_extent_for_log_replay(wc->trans, eb->start, 358 eb->len); 359 360 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { 361 if (wc->pin && btrfs_header_level(eb) == 0) 362 ret = btrfs_exclude_logged_extents(eb); 363 if (wc->write) 364 btrfs_write_tree_block(eb); 365 if (wc->wait) 366 btrfs_wait_tree_block_writeback(eb); 367 } 368 return ret; 369 } 370 371 /* 372 * Item overwrite used by replay and tree logging. eb, slot and key all refer 373 * to the src data we are copying out. 374 * 375 * root is the tree we are copying into, and path is a scratch 376 * path for use in this function (it should be released on entry and 377 * will be released on exit). 378 * 379 * If the key is already in the destination tree the existing item is 380 * overwritten. If the existing item isn't big enough, it is extended. 381 * If it is too large, it is truncated. 382 * 383 * If the key isn't in the destination yet, a new item is inserted. 384 */ 385 static noinline int overwrite_item(struct btrfs_trans_handle *trans, 386 struct btrfs_root *root, 387 struct btrfs_path *path, 388 struct extent_buffer *eb, int slot, 389 struct btrfs_key *key) 390 { 391 int ret; 392 u32 item_size; 393 u64 saved_i_size = 0; 394 int save_old_i_size = 0; 395 unsigned long src_ptr; 396 unsigned long dst_ptr; 397 int overwrite_root = 0; 398 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; 399 400 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 401 overwrite_root = 1; 402 403 item_size = btrfs_item_size_nr(eb, slot); 404 src_ptr = btrfs_item_ptr_offset(eb, slot); 405 406 /* look for the key in the destination tree */ 407 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 408 if (ret < 0) 409 return ret; 410 411 if (ret == 0) { 412 char *src_copy; 413 char *dst_copy; 414 u32 dst_size = btrfs_item_size_nr(path->nodes[0], 415 path->slots[0]); 416 if (dst_size != item_size) 417 goto insert; 418 419 if (item_size == 0) { 420 btrfs_release_path(path); 421 return 0; 422 } 423 dst_copy = kmalloc(item_size, GFP_NOFS); 424 src_copy = kmalloc(item_size, GFP_NOFS); 425 if (!dst_copy || !src_copy) { 426 btrfs_release_path(path); 427 kfree(dst_copy); 428 kfree(src_copy); 429 return -ENOMEM; 430 } 431 432 read_extent_buffer(eb, src_copy, src_ptr, item_size); 433 434 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 435 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, 436 item_size); 437 ret = memcmp(dst_copy, src_copy, item_size); 438 439 kfree(dst_copy); 440 kfree(src_copy); 441 /* 442 * they have the same contents, just return, this saves 443 * us from cowing blocks in the destination tree and doing 444 * extra writes that may not have been done by a previous 445 * sync 446 */ 447 if (ret == 0) { 448 btrfs_release_path(path); 449 return 0; 450 } 451 452 /* 453 * We need to load the old nbytes into the inode so when we 454 * replay the extents we've logged we get the right nbytes. 455 */ 456 if (inode_item) { 457 struct btrfs_inode_item *item; 458 u64 nbytes; 459 u32 mode; 460 461 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 462 struct btrfs_inode_item); 463 nbytes = btrfs_inode_nbytes(path->nodes[0], item); 464 item = btrfs_item_ptr(eb, slot, 465 struct btrfs_inode_item); 466 btrfs_set_inode_nbytes(eb, item, nbytes); 467 468 /* 469 * If this is a directory we need to reset the i_size to 470 * 0 so that we can set it up properly when replaying 471 * the rest of the items in this log. 472 */ 473 mode = btrfs_inode_mode(eb, item); 474 if (S_ISDIR(mode)) 475 btrfs_set_inode_size(eb, item, 0); 476 } 477 } else if (inode_item) { 478 struct btrfs_inode_item *item; 479 u32 mode; 480 481 /* 482 * New inode, set nbytes to 0 so that the nbytes comes out 483 * properly when we replay the extents. 484 */ 485 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 486 btrfs_set_inode_nbytes(eb, item, 0); 487 488 /* 489 * If this is a directory we need to reset the i_size to 0 so 490 * that we can set it up properly when replaying the rest of 491 * the items in this log. 492 */ 493 mode = btrfs_inode_mode(eb, item); 494 if (S_ISDIR(mode)) 495 btrfs_set_inode_size(eb, item, 0); 496 } 497 insert: 498 btrfs_release_path(path); 499 /* try to insert the key into the destination tree */ 500 path->skip_release_on_error = 1; 501 ret = btrfs_insert_empty_item(trans, root, path, 502 key, item_size); 503 path->skip_release_on_error = 0; 504 505 /* make sure any existing item is the correct size */ 506 if (ret == -EEXIST || ret == -EOVERFLOW) { 507 u32 found_size; 508 found_size = btrfs_item_size_nr(path->nodes[0], 509 path->slots[0]); 510 if (found_size > item_size) 511 btrfs_truncate_item(path, item_size, 1); 512 else if (found_size < item_size) 513 btrfs_extend_item(path, item_size - found_size); 514 } else if (ret) { 515 return ret; 516 } 517 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 518 path->slots[0]); 519 520 /* don't overwrite an existing inode if the generation number 521 * was logged as zero. This is done when the tree logging code 522 * is just logging an inode to make sure it exists after recovery. 523 * 524 * Also, don't overwrite i_size on directories during replay. 525 * log replay inserts and removes directory items based on the 526 * state of the tree found in the subvolume, and i_size is modified 527 * as it goes 528 */ 529 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { 530 struct btrfs_inode_item *src_item; 531 struct btrfs_inode_item *dst_item; 532 533 src_item = (struct btrfs_inode_item *)src_ptr; 534 dst_item = (struct btrfs_inode_item *)dst_ptr; 535 536 if (btrfs_inode_generation(eb, src_item) == 0) { 537 struct extent_buffer *dst_eb = path->nodes[0]; 538 const u64 ino_size = btrfs_inode_size(eb, src_item); 539 540 /* 541 * For regular files an ino_size == 0 is used only when 542 * logging that an inode exists, as part of a directory 543 * fsync, and the inode wasn't fsynced before. In this 544 * case don't set the size of the inode in the fs/subvol 545 * tree, otherwise we would be throwing valid data away. 546 */ 547 if (S_ISREG(btrfs_inode_mode(eb, src_item)) && 548 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) && 549 ino_size != 0) 550 btrfs_set_inode_size(dst_eb, dst_item, ino_size); 551 goto no_copy; 552 } 553 554 if (overwrite_root && 555 S_ISDIR(btrfs_inode_mode(eb, src_item)) && 556 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { 557 save_old_i_size = 1; 558 saved_i_size = btrfs_inode_size(path->nodes[0], 559 dst_item); 560 } 561 } 562 563 copy_extent_buffer(path->nodes[0], eb, dst_ptr, 564 src_ptr, item_size); 565 566 if (save_old_i_size) { 567 struct btrfs_inode_item *dst_item; 568 dst_item = (struct btrfs_inode_item *)dst_ptr; 569 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); 570 } 571 572 /* make sure the generation is filled in */ 573 if (key->type == BTRFS_INODE_ITEM_KEY) { 574 struct btrfs_inode_item *dst_item; 575 dst_item = (struct btrfs_inode_item *)dst_ptr; 576 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { 577 btrfs_set_inode_generation(path->nodes[0], dst_item, 578 trans->transid); 579 } 580 } 581 no_copy: 582 btrfs_mark_buffer_dirty(path->nodes[0]); 583 btrfs_release_path(path); 584 return 0; 585 } 586 587 /* 588 * simple helper to read an inode off the disk from a given root 589 * This can only be called for subvolume roots and not for the log 590 */ 591 static noinline struct inode *read_one_inode(struct btrfs_root *root, 592 u64 objectid) 593 { 594 struct inode *inode; 595 596 inode = btrfs_iget(root->fs_info->sb, objectid, root); 597 if (IS_ERR(inode)) 598 inode = NULL; 599 return inode; 600 } 601 602 /* replays a single extent in 'eb' at 'slot' with 'key' into the 603 * subvolume 'root'. path is released on entry and should be released 604 * on exit. 605 * 606 * extents in the log tree have not been allocated out of the extent 607 * tree yet. So, this completes the allocation, taking a reference 608 * as required if the extent already exists or creating a new extent 609 * if it isn't in the extent allocation tree yet. 610 * 611 * The extent is inserted into the file, dropping any existing extents 612 * from the file that overlap the new one. 613 */ 614 static noinline int replay_one_extent(struct btrfs_trans_handle *trans, 615 struct btrfs_root *root, 616 struct btrfs_path *path, 617 struct extent_buffer *eb, int slot, 618 struct btrfs_key *key) 619 { 620 struct btrfs_drop_extents_args drop_args = { 0 }; 621 struct btrfs_fs_info *fs_info = root->fs_info; 622 int found_type; 623 u64 extent_end; 624 u64 start = key->offset; 625 u64 nbytes = 0; 626 struct btrfs_file_extent_item *item; 627 struct inode *inode = NULL; 628 unsigned long size; 629 int ret = 0; 630 631 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 632 found_type = btrfs_file_extent_type(eb, item); 633 634 if (found_type == BTRFS_FILE_EXTENT_REG || 635 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 636 nbytes = btrfs_file_extent_num_bytes(eb, item); 637 extent_end = start + nbytes; 638 639 /* 640 * We don't add to the inodes nbytes if we are prealloc or a 641 * hole. 642 */ 643 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 644 nbytes = 0; 645 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 646 size = btrfs_file_extent_ram_bytes(eb, item); 647 nbytes = btrfs_file_extent_ram_bytes(eb, item); 648 extent_end = ALIGN(start + size, 649 fs_info->sectorsize); 650 } else { 651 ret = 0; 652 goto out; 653 } 654 655 inode = read_one_inode(root, key->objectid); 656 if (!inode) { 657 ret = -EIO; 658 goto out; 659 } 660 661 /* 662 * first check to see if we already have this extent in the 663 * file. This must be done before the btrfs_drop_extents run 664 * so we don't try to drop this extent. 665 */ 666 ret = btrfs_lookup_file_extent(trans, root, path, 667 btrfs_ino(BTRFS_I(inode)), start, 0); 668 669 if (ret == 0 && 670 (found_type == BTRFS_FILE_EXTENT_REG || 671 found_type == BTRFS_FILE_EXTENT_PREALLOC)) { 672 struct btrfs_file_extent_item cmp1; 673 struct btrfs_file_extent_item cmp2; 674 struct btrfs_file_extent_item *existing; 675 struct extent_buffer *leaf; 676 677 leaf = path->nodes[0]; 678 existing = btrfs_item_ptr(leaf, path->slots[0], 679 struct btrfs_file_extent_item); 680 681 read_extent_buffer(eb, &cmp1, (unsigned long)item, 682 sizeof(cmp1)); 683 read_extent_buffer(leaf, &cmp2, (unsigned long)existing, 684 sizeof(cmp2)); 685 686 /* 687 * we already have a pointer to this exact extent, 688 * we don't have to do anything 689 */ 690 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 691 btrfs_release_path(path); 692 goto out; 693 } 694 } 695 btrfs_release_path(path); 696 697 /* drop any overlapping extents */ 698 drop_args.start = start; 699 drop_args.end = extent_end; 700 drop_args.drop_cache = true; 701 ret = btrfs_drop_extents(trans, root, BTRFS_I(inode), &drop_args); 702 if (ret) 703 goto out; 704 705 if (found_type == BTRFS_FILE_EXTENT_REG || 706 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 707 u64 offset; 708 unsigned long dest_offset; 709 struct btrfs_key ins; 710 711 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 && 712 btrfs_fs_incompat(fs_info, NO_HOLES)) 713 goto update_inode; 714 715 ret = btrfs_insert_empty_item(trans, root, path, key, 716 sizeof(*item)); 717 if (ret) 718 goto out; 719 dest_offset = btrfs_item_ptr_offset(path->nodes[0], 720 path->slots[0]); 721 copy_extent_buffer(path->nodes[0], eb, dest_offset, 722 (unsigned long)item, sizeof(*item)); 723 724 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); 725 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); 726 ins.type = BTRFS_EXTENT_ITEM_KEY; 727 offset = key->offset - btrfs_file_extent_offset(eb, item); 728 729 /* 730 * Manually record dirty extent, as here we did a shallow 731 * file extent item copy and skip normal backref update, 732 * but modifying extent tree all by ourselves. 733 * So need to manually record dirty extent for qgroup, 734 * as the owner of the file extent changed from log tree 735 * (doesn't affect qgroup) to fs/file tree(affects qgroup) 736 */ 737 ret = btrfs_qgroup_trace_extent(trans, 738 btrfs_file_extent_disk_bytenr(eb, item), 739 btrfs_file_extent_disk_num_bytes(eb, item), 740 GFP_NOFS); 741 if (ret < 0) 742 goto out; 743 744 if (ins.objectid > 0) { 745 struct btrfs_ref ref = { 0 }; 746 u64 csum_start; 747 u64 csum_end; 748 LIST_HEAD(ordered_sums); 749 750 /* 751 * is this extent already allocated in the extent 752 * allocation tree? If so, just add a reference 753 */ 754 ret = btrfs_lookup_data_extent(fs_info, ins.objectid, 755 ins.offset); 756 if (ret == 0) { 757 btrfs_init_generic_ref(&ref, 758 BTRFS_ADD_DELAYED_REF, 759 ins.objectid, ins.offset, 0); 760 btrfs_init_data_ref(&ref, 761 root->root_key.objectid, 762 key->objectid, offset); 763 ret = btrfs_inc_extent_ref(trans, &ref); 764 if (ret) 765 goto out; 766 } else { 767 /* 768 * insert the extent pointer in the extent 769 * allocation tree 770 */ 771 ret = btrfs_alloc_logged_file_extent(trans, 772 root->root_key.objectid, 773 key->objectid, offset, &ins); 774 if (ret) 775 goto out; 776 } 777 btrfs_release_path(path); 778 779 if (btrfs_file_extent_compression(eb, item)) { 780 csum_start = ins.objectid; 781 csum_end = csum_start + ins.offset; 782 } else { 783 csum_start = ins.objectid + 784 btrfs_file_extent_offset(eb, item); 785 csum_end = csum_start + 786 btrfs_file_extent_num_bytes(eb, item); 787 } 788 789 ret = btrfs_lookup_csums_range(root->log_root, 790 csum_start, csum_end - 1, 791 &ordered_sums, 0); 792 if (ret) 793 goto out; 794 /* 795 * Now delete all existing cums in the csum root that 796 * cover our range. We do this because we can have an 797 * extent that is completely referenced by one file 798 * extent item and partially referenced by another 799 * file extent item (like after using the clone or 800 * extent_same ioctls). In this case if we end up doing 801 * the replay of the one that partially references the 802 * extent first, and we do not do the csum deletion 803 * below, we can get 2 csum items in the csum tree that 804 * overlap each other. For example, imagine our log has 805 * the two following file extent items: 806 * 807 * key (257 EXTENT_DATA 409600) 808 * extent data disk byte 12845056 nr 102400 809 * extent data offset 20480 nr 20480 ram 102400 810 * 811 * key (257 EXTENT_DATA 819200) 812 * extent data disk byte 12845056 nr 102400 813 * extent data offset 0 nr 102400 ram 102400 814 * 815 * Where the second one fully references the 100K extent 816 * that starts at disk byte 12845056, and the log tree 817 * has a single csum item that covers the entire range 818 * of the extent: 819 * 820 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 821 * 822 * After the first file extent item is replayed, the 823 * csum tree gets the following csum item: 824 * 825 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 826 * 827 * Which covers the 20K sub-range starting at offset 20K 828 * of our extent. Now when we replay the second file 829 * extent item, if we do not delete existing csum items 830 * that cover any of its blocks, we end up getting two 831 * csum items in our csum tree that overlap each other: 832 * 833 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 834 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 835 * 836 * Which is a problem, because after this anyone trying 837 * to lookup up for the checksum of any block of our 838 * extent starting at an offset of 40K or higher, will 839 * end up looking at the second csum item only, which 840 * does not contain the checksum for any block starting 841 * at offset 40K or higher of our extent. 842 */ 843 while (!list_empty(&ordered_sums)) { 844 struct btrfs_ordered_sum *sums; 845 sums = list_entry(ordered_sums.next, 846 struct btrfs_ordered_sum, 847 list); 848 if (!ret) 849 ret = btrfs_del_csums(trans, 850 fs_info->csum_root, 851 sums->bytenr, 852 sums->len); 853 if (!ret) 854 ret = btrfs_csum_file_blocks(trans, 855 fs_info->csum_root, sums); 856 list_del(&sums->list); 857 kfree(sums); 858 } 859 if (ret) 860 goto out; 861 } else { 862 btrfs_release_path(path); 863 } 864 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 865 /* inline extents are easy, we just overwrite them */ 866 ret = overwrite_item(trans, root, path, eb, slot, key); 867 if (ret) 868 goto out; 869 } 870 871 ret = btrfs_inode_set_file_extent_range(BTRFS_I(inode), start, 872 extent_end - start); 873 if (ret) 874 goto out; 875 876 update_inode: 877 btrfs_update_inode_bytes(BTRFS_I(inode), nbytes, drop_args.bytes_found); 878 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 879 out: 880 if (inode) 881 iput(inode); 882 return ret; 883 } 884 885 /* 886 * when cleaning up conflicts between the directory names in the 887 * subvolume, directory names in the log and directory names in the 888 * inode back references, we may have to unlink inodes from directories. 889 * 890 * This is a helper function to do the unlink of a specific directory 891 * item 892 */ 893 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, 894 struct btrfs_root *root, 895 struct btrfs_path *path, 896 struct btrfs_inode *dir, 897 struct btrfs_dir_item *di) 898 { 899 struct inode *inode; 900 char *name; 901 int name_len; 902 struct extent_buffer *leaf; 903 struct btrfs_key location; 904 int ret; 905 906 leaf = path->nodes[0]; 907 908 btrfs_dir_item_key_to_cpu(leaf, di, &location); 909 name_len = btrfs_dir_name_len(leaf, di); 910 name = kmalloc(name_len, GFP_NOFS); 911 if (!name) 912 return -ENOMEM; 913 914 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); 915 btrfs_release_path(path); 916 917 inode = read_one_inode(root, location.objectid); 918 if (!inode) { 919 ret = -EIO; 920 goto out; 921 } 922 923 ret = link_to_fixup_dir(trans, root, path, location.objectid); 924 if (ret) 925 goto out; 926 927 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name, 928 name_len); 929 if (ret) 930 goto out; 931 else 932 ret = btrfs_run_delayed_items(trans); 933 out: 934 kfree(name); 935 iput(inode); 936 return ret; 937 } 938 939 /* 940 * helper function to see if a given name and sequence number found 941 * in an inode back reference are already in a directory and correctly 942 * point to this inode 943 */ 944 static noinline int inode_in_dir(struct btrfs_root *root, 945 struct btrfs_path *path, 946 u64 dirid, u64 objectid, u64 index, 947 const char *name, int name_len) 948 { 949 struct btrfs_dir_item *di; 950 struct btrfs_key location; 951 int match = 0; 952 953 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, 954 index, name, name_len, 0); 955 if (di && !IS_ERR(di)) { 956 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 957 if (location.objectid != objectid) 958 goto out; 959 } else 960 goto out; 961 btrfs_release_path(path); 962 963 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); 964 if (di && !IS_ERR(di)) { 965 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 966 if (location.objectid != objectid) 967 goto out; 968 } else 969 goto out; 970 match = 1; 971 out: 972 btrfs_release_path(path); 973 return match; 974 } 975 976 /* 977 * helper function to check a log tree for a named back reference in 978 * an inode. This is used to decide if a back reference that is 979 * found in the subvolume conflicts with what we find in the log. 980 * 981 * inode backreferences may have multiple refs in a single item, 982 * during replay we process one reference at a time, and we don't 983 * want to delete valid links to a file from the subvolume if that 984 * link is also in the log. 985 */ 986 static noinline int backref_in_log(struct btrfs_root *log, 987 struct btrfs_key *key, 988 u64 ref_objectid, 989 const char *name, int namelen) 990 { 991 struct btrfs_path *path; 992 int ret; 993 994 path = btrfs_alloc_path(); 995 if (!path) 996 return -ENOMEM; 997 998 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); 999 if (ret < 0) { 1000 goto out; 1001 } else if (ret == 1) { 1002 ret = 0; 1003 goto out; 1004 } 1005 1006 if (key->type == BTRFS_INODE_EXTREF_KEY) 1007 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], 1008 path->slots[0], 1009 ref_objectid, 1010 name, namelen); 1011 else 1012 ret = !!btrfs_find_name_in_backref(path->nodes[0], 1013 path->slots[0], 1014 name, namelen); 1015 out: 1016 btrfs_free_path(path); 1017 return ret; 1018 } 1019 1020 static inline int __add_inode_ref(struct btrfs_trans_handle *trans, 1021 struct btrfs_root *root, 1022 struct btrfs_path *path, 1023 struct btrfs_root *log_root, 1024 struct btrfs_inode *dir, 1025 struct btrfs_inode *inode, 1026 u64 inode_objectid, u64 parent_objectid, 1027 u64 ref_index, char *name, int namelen, 1028 int *search_done) 1029 { 1030 int ret; 1031 char *victim_name; 1032 int victim_name_len; 1033 struct extent_buffer *leaf; 1034 struct btrfs_dir_item *di; 1035 struct btrfs_key search_key; 1036 struct btrfs_inode_extref *extref; 1037 1038 again: 1039 /* Search old style refs */ 1040 search_key.objectid = inode_objectid; 1041 search_key.type = BTRFS_INODE_REF_KEY; 1042 search_key.offset = parent_objectid; 1043 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 1044 if (ret == 0) { 1045 struct btrfs_inode_ref *victim_ref; 1046 unsigned long ptr; 1047 unsigned long ptr_end; 1048 1049 leaf = path->nodes[0]; 1050 1051 /* are we trying to overwrite a back ref for the root directory 1052 * if so, just jump out, we're done 1053 */ 1054 if (search_key.objectid == search_key.offset) 1055 return 1; 1056 1057 /* check all the names in this back reference to see 1058 * if they are in the log. if so, we allow them to stay 1059 * otherwise they must be unlinked as a conflict 1060 */ 1061 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1062 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); 1063 while (ptr < ptr_end) { 1064 victim_ref = (struct btrfs_inode_ref *)ptr; 1065 victim_name_len = btrfs_inode_ref_name_len(leaf, 1066 victim_ref); 1067 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1068 if (!victim_name) 1069 return -ENOMEM; 1070 1071 read_extent_buffer(leaf, victim_name, 1072 (unsigned long)(victim_ref + 1), 1073 victim_name_len); 1074 1075 ret = backref_in_log(log_root, &search_key, 1076 parent_objectid, victim_name, 1077 victim_name_len); 1078 if (ret < 0) { 1079 kfree(victim_name); 1080 return ret; 1081 } else if (!ret) { 1082 inc_nlink(&inode->vfs_inode); 1083 btrfs_release_path(path); 1084 1085 ret = btrfs_unlink_inode(trans, root, dir, inode, 1086 victim_name, victim_name_len); 1087 kfree(victim_name); 1088 if (ret) 1089 return ret; 1090 ret = btrfs_run_delayed_items(trans); 1091 if (ret) 1092 return ret; 1093 *search_done = 1; 1094 goto again; 1095 } 1096 kfree(victim_name); 1097 1098 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 1099 } 1100 1101 /* 1102 * NOTE: we have searched root tree and checked the 1103 * corresponding ref, it does not need to check again. 1104 */ 1105 *search_done = 1; 1106 } 1107 btrfs_release_path(path); 1108 1109 /* Same search but for extended refs */ 1110 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, 1111 inode_objectid, parent_objectid, 0, 1112 0); 1113 if (!IS_ERR_OR_NULL(extref)) { 1114 u32 item_size; 1115 u32 cur_offset = 0; 1116 unsigned long base; 1117 struct inode *victim_parent; 1118 1119 leaf = path->nodes[0]; 1120 1121 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1122 base = btrfs_item_ptr_offset(leaf, path->slots[0]); 1123 1124 while (cur_offset < item_size) { 1125 extref = (struct btrfs_inode_extref *)(base + cur_offset); 1126 1127 victim_name_len = btrfs_inode_extref_name_len(leaf, extref); 1128 1129 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) 1130 goto next; 1131 1132 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1133 if (!victim_name) 1134 return -ENOMEM; 1135 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name, 1136 victim_name_len); 1137 1138 search_key.objectid = inode_objectid; 1139 search_key.type = BTRFS_INODE_EXTREF_KEY; 1140 search_key.offset = btrfs_extref_hash(parent_objectid, 1141 victim_name, 1142 victim_name_len); 1143 ret = backref_in_log(log_root, &search_key, 1144 parent_objectid, victim_name, 1145 victim_name_len); 1146 if (ret < 0) { 1147 return ret; 1148 } else if (!ret) { 1149 ret = -ENOENT; 1150 victim_parent = read_one_inode(root, 1151 parent_objectid); 1152 if (victim_parent) { 1153 inc_nlink(&inode->vfs_inode); 1154 btrfs_release_path(path); 1155 1156 ret = btrfs_unlink_inode(trans, root, 1157 BTRFS_I(victim_parent), 1158 inode, 1159 victim_name, 1160 victim_name_len); 1161 if (!ret) 1162 ret = btrfs_run_delayed_items( 1163 trans); 1164 } 1165 iput(victim_parent); 1166 kfree(victim_name); 1167 if (ret) 1168 return ret; 1169 *search_done = 1; 1170 goto again; 1171 } 1172 kfree(victim_name); 1173 next: 1174 cur_offset += victim_name_len + sizeof(*extref); 1175 } 1176 *search_done = 1; 1177 } 1178 btrfs_release_path(path); 1179 1180 /* look for a conflicting sequence number */ 1181 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), 1182 ref_index, name, namelen, 0); 1183 if (di && !IS_ERR(di)) { 1184 ret = drop_one_dir_item(trans, root, path, dir, di); 1185 if (ret) 1186 return ret; 1187 } 1188 btrfs_release_path(path); 1189 1190 /* look for a conflicting name */ 1191 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), 1192 name, namelen, 0); 1193 if (di && !IS_ERR(di)) { 1194 ret = drop_one_dir_item(trans, root, path, dir, di); 1195 if (ret) 1196 return ret; 1197 } 1198 btrfs_release_path(path); 1199 1200 return 0; 1201 } 1202 1203 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1204 u32 *namelen, char **name, u64 *index, 1205 u64 *parent_objectid) 1206 { 1207 struct btrfs_inode_extref *extref; 1208 1209 extref = (struct btrfs_inode_extref *)ref_ptr; 1210 1211 *namelen = btrfs_inode_extref_name_len(eb, extref); 1212 *name = kmalloc(*namelen, GFP_NOFS); 1213 if (*name == NULL) 1214 return -ENOMEM; 1215 1216 read_extent_buffer(eb, *name, (unsigned long)&extref->name, 1217 *namelen); 1218 1219 if (index) 1220 *index = btrfs_inode_extref_index(eb, extref); 1221 if (parent_objectid) 1222 *parent_objectid = btrfs_inode_extref_parent(eb, extref); 1223 1224 return 0; 1225 } 1226 1227 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1228 u32 *namelen, char **name, u64 *index) 1229 { 1230 struct btrfs_inode_ref *ref; 1231 1232 ref = (struct btrfs_inode_ref *)ref_ptr; 1233 1234 *namelen = btrfs_inode_ref_name_len(eb, ref); 1235 *name = kmalloc(*namelen, GFP_NOFS); 1236 if (*name == NULL) 1237 return -ENOMEM; 1238 1239 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); 1240 1241 if (index) 1242 *index = btrfs_inode_ref_index(eb, ref); 1243 1244 return 0; 1245 } 1246 1247 /* 1248 * Take an inode reference item from the log tree and iterate all names from the 1249 * inode reference item in the subvolume tree with the same key (if it exists). 1250 * For any name that is not in the inode reference item from the log tree, do a 1251 * proper unlink of that name (that is, remove its entry from the inode 1252 * reference item and both dir index keys). 1253 */ 1254 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, 1255 struct btrfs_root *root, 1256 struct btrfs_path *path, 1257 struct btrfs_inode *inode, 1258 struct extent_buffer *log_eb, 1259 int log_slot, 1260 struct btrfs_key *key) 1261 { 1262 int ret; 1263 unsigned long ref_ptr; 1264 unsigned long ref_end; 1265 struct extent_buffer *eb; 1266 1267 again: 1268 btrfs_release_path(path); 1269 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 1270 if (ret > 0) { 1271 ret = 0; 1272 goto out; 1273 } 1274 if (ret < 0) 1275 goto out; 1276 1277 eb = path->nodes[0]; 1278 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 1279 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]); 1280 while (ref_ptr < ref_end) { 1281 char *name = NULL; 1282 int namelen; 1283 u64 parent_id; 1284 1285 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1286 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1287 NULL, &parent_id); 1288 } else { 1289 parent_id = key->offset; 1290 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1291 NULL); 1292 } 1293 if (ret) 1294 goto out; 1295 1296 if (key->type == BTRFS_INODE_EXTREF_KEY) 1297 ret = !!btrfs_find_name_in_ext_backref(log_eb, log_slot, 1298 parent_id, name, 1299 namelen); 1300 else 1301 ret = !!btrfs_find_name_in_backref(log_eb, log_slot, 1302 name, namelen); 1303 1304 if (!ret) { 1305 struct inode *dir; 1306 1307 btrfs_release_path(path); 1308 dir = read_one_inode(root, parent_id); 1309 if (!dir) { 1310 ret = -ENOENT; 1311 kfree(name); 1312 goto out; 1313 } 1314 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 1315 inode, name, namelen); 1316 kfree(name); 1317 iput(dir); 1318 if (ret) 1319 goto out; 1320 goto again; 1321 } 1322 1323 kfree(name); 1324 ref_ptr += namelen; 1325 if (key->type == BTRFS_INODE_EXTREF_KEY) 1326 ref_ptr += sizeof(struct btrfs_inode_extref); 1327 else 1328 ref_ptr += sizeof(struct btrfs_inode_ref); 1329 } 1330 ret = 0; 1331 out: 1332 btrfs_release_path(path); 1333 return ret; 1334 } 1335 1336 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir, 1337 const u8 ref_type, const char *name, 1338 const int namelen) 1339 { 1340 struct btrfs_key key; 1341 struct btrfs_path *path; 1342 const u64 parent_id = btrfs_ino(BTRFS_I(dir)); 1343 int ret; 1344 1345 path = btrfs_alloc_path(); 1346 if (!path) 1347 return -ENOMEM; 1348 1349 key.objectid = btrfs_ino(BTRFS_I(inode)); 1350 key.type = ref_type; 1351 if (key.type == BTRFS_INODE_REF_KEY) 1352 key.offset = parent_id; 1353 else 1354 key.offset = btrfs_extref_hash(parent_id, name, namelen); 1355 1356 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0); 1357 if (ret < 0) 1358 goto out; 1359 if (ret > 0) { 1360 ret = 0; 1361 goto out; 1362 } 1363 if (key.type == BTRFS_INODE_EXTREF_KEY) 1364 ret = !!btrfs_find_name_in_ext_backref(path->nodes[0], 1365 path->slots[0], parent_id, name, namelen); 1366 else 1367 ret = !!btrfs_find_name_in_backref(path->nodes[0], path->slots[0], 1368 name, namelen); 1369 1370 out: 1371 btrfs_free_path(path); 1372 return ret; 1373 } 1374 1375 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1376 struct inode *dir, struct inode *inode, const char *name, 1377 int namelen, u64 ref_index) 1378 { 1379 struct btrfs_dir_item *dir_item; 1380 struct btrfs_key key; 1381 struct btrfs_path *path; 1382 struct inode *other_inode = NULL; 1383 int ret; 1384 1385 path = btrfs_alloc_path(); 1386 if (!path) 1387 return -ENOMEM; 1388 1389 dir_item = btrfs_lookup_dir_item(NULL, root, path, 1390 btrfs_ino(BTRFS_I(dir)), 1391 name, namelen, 0); 1392 if (!dir_item) { 1393 btrfs_release_path(path); 1394 goto add_link; 1395 } else if (IS_ERR(dir_item)) { 1396 ret = PTR_ERR(dir_item); 1397 goto out; 1398 } 1399 1400 /* 1401 * Our inode's dentry collides with the dentry of another inode which is 1402 * in the log but not yet processed since it has a higher inode number. 1403 * So delete that other dentry. 1404 */ 1405 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key); 1406 btrfs_release_path(path); 1407 other_inode = read_one_inode(root, key.objectid); 1408 if (!other_inode) { 1409 ret = -ENOENT; 1410 goto out; 1411 } 1412 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode), 1413 name, namelen); 1414 if (ret) 1415 goto out; 1416 /* 1417 * If we dropped the link count to 0, bump it so that later the iput() 1418 * on the inode will not free it. We will fixup the link count later. 1419 */ 1420 if (other_inode->i_nlink == 0) 1421 inc_nlink(other_inode); 1422 1423 ret = btrfs_run_delayed_items(trans); 1424 if (ret) 1425 goto out; 1426 add_link: 1427 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 1428 name, namelen, 0, ref_index); 1429 out: 1430 iput(other_inode); 1431 btrfs_free_path(path); 1432 1433 return ret; 1434 } 1435 1436 /* 1437 * replay one inode back reference item found in the log tree. 1438 * eb, slot and key refer to the buffer and key found in the log tree. 1439 * root is the destination we are replaying into, and path is for temp 1440 * use by this function. (it should be released on return). 1441 */ 1442 static noinline int add_inode_ref(struct btrfs_trans_handle *trans, 1443 struct btrfs_root *root, 1444 struct btrfs_root *log, 1445 struct btrfs_path *path, 1446 struct extent_buffer *eb, int slot, 1447 struct btrfs_key *key) 1448 { 1449 struct inode *dir = NULL; 1450 struct inode *inode = NULL; 1451 unsigned long ref_ptr; 1452 unsigned long ref_end; 1453 char *name = NULL; 1454 int namelen; 1455 int ret; 1456 int search_done = 0; 1457 int log_ref_ver = 0; 1458 u64 parent_objectid; 1459 u64 inode_objectid; 1460 u64 ref_index = 0; 1461 int ref_struct_size; 1462 1463 ref_ptr = btrfs_item_ptr_offset(eb, slot); 1464 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 1465 1466 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1467 struct btrfs_inode_extref *r; 1468 1469 ref_struct_size = sizeof(struct btrfs_inode_extref); 1470 log_ref_ver = 1; 1471 r = (struct btrfs_inode_extref *)ref_ptr; 1472 parent_objectid = btrfs_inode_extref_parent(eb, r); 1473 } else { 1474 ref_struct_size = sizeof(struct btrfs_inode_ref); 1475 parent_objectid = key->offset; 1476 } 1477 inode_objectid = key->objectid; 1478 1479 /* 1480 * it is possible that we didn't log all the parent directories 1481 * for a given inode. If we don't find the dir, just don't 1482 * copy the back ref in. The link count fixup code will take 1483 * care of the rest 1484 */ 1485 dir = read_one_inode(root, parent_objectid); 1486 if (!dir) { 1487 ret = -ENOENT; 1488 goto out; 1489 } 1490 1491 inode = read_one_inode(root, inode_objectid); 1492 if (!inode) { 1493 ret = -EIO; 1494 goto out; 1495 } 1496 1497 while (ref_ptr < ref_end) { 1498 if (log_ref_ver) { 1499 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1500 &ref_index, &parent_objectid); 1501 /* 1502 * parent object can change from one array 1503 * item to another. 1504 */ 1505 if (!dir) 1506 dir = read_one_inode(root, parent_objectid); 1507 if (!dir) { 1508 ret = -ENOENT; 1509 goto out; 1510 } 1511 } else { 1512 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1513 &ref_index); 1514 } 1515 if (ret) 1516 goto out; 1517 1518 /* if we already have a perfect match, we're done */ 1519 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)), 1520 btrfs_ino(BTRFS_I(inode)), ref_index, 1521 name, namelen)) { 1522 /* 1523 * look for a conflicting back reference in the 1524 * metadata. if we find one we have to unlink that name 1525 * of the file before we add our new link. Later on, we 1526 * overwrite any existing back reference, and we don't 1527 * want to create dangling pointers in the directory. 1528 */ 1529 1530 if (!search_done) { 1531 ret = __add_inode_ref(trans, root, path, log, 1532 BTRFS_I(dir), 1533 BTRFS_I(inode), 1534 inode_objectid, 1535 parent_objectid, 1536 ref_index, name, namelen, 1537 &search_done); 1538 if (ret) { 1539 if (ret == 1) 1540 ret = 0; 1541 goto out; 1542 } 1543 } 1544 1545 /* 1546 * If a reference item already exists for this inode 1547 * with the same parent and name, but different index, 1548 * drop it and the corresponding directory index entries 1549 * from the parent before adding the new reference item 1550 * and dir index entries, otherwise we would fail with 1551 * -EEXIST returned from btrfs_add_link() below. 1552 */ 1553 ret = btrfs_inode_ref_exists(inode, dir, key->type, 1554 name, namelen); 1555 if (ret > 0) { 1556 ret = btrfs_unlink_inode(trans, root, 1557 BTRFS_I(dir), 1558 BTRFS_I(inode), 1559 name, namelen); 1560 /* 1561 * If we dropped the link count to 0, bump it so 1562 * that later the iput() on the inode will not 1563 * free it. We will fixup the link count later. 1564 */ 1565 if (!ret && inode->i_nlink == 0) 1566 inc_nlink(inode); 1567 } 1568 if (ret < 0) 1569 goto out; 1570 1571 /* insert our name */ 1572 ret = add_link(trans, root, dir, inode, name, namelen, 1573 ref_index); 1574 if (ret) 1575 goto out; 1576 1577 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 1578 if (ret) 1579 goto out; 1580 } 1581 1582 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; 1583 kfree(name); 1584 name = NULL; 1585 if (log_ref_ver) { 1586 iput(dir); 1587 dir = NULL; 1588 } 1589 } 1590 1591 /* 1592 * Before we overwrite the inode reference item in the subvolume tree 1593 * with the item from the log tree, we must unlink all names from the 1594 * parent directory that are in the subvolume's tree inode reference 1595 * item, otherwise we end up with an inconsistent subvolume tree where 1596 * dir index entries exist for a name but there is no inode reference 1597 * item with the same name. 1598 */ 1599 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot, 1600 key); 1601 if (ret) 1602 goto out; 1603 1604 /* finally write the back reference in the inode */ 1605 ret = overwrite_item(trans, root, path, eb, slot, key); 1606 out: 1607 btrfs_release_path(path); 1608 kfree(name); 1609 iput(dir); 1610 iput(inode); 1611 return ret; 1612 } 1613 1614 static int count_inode_extrefs(struct btrfs_root *root, 1615 struct btrfs_inode *inode, struct btrfs_path *path) 1616 { 1617 int ret = 0; 1618 int name_len; 1619 unsigned int nlink = 0; 1620 u32 item_size; 1621 u32 cur_offset = 0; 1622 u64 inode_objectid = btrfs_ino(inode); 1623 u64 offset = 0; 1624 unsigned long ptr; 1625 struct btrfs_inode_extref *extref; 1626 struct extent_buffer *leaf; 1627 1628 while (1) { 1629 ret = btrfs_find_one_extref(root, inode_objectid, offset, path, 1630 &extref, &offset); 1631 if (ret) 1632 break; 1633 1634 leaf = path->nodes[0]; 1635 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1636 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1637 cur_offset = 0; 1638 1639 while (cur_offset < item_size) { 1640 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 1641 name_len = btrfs_inode_extref_name_len(leaf, extref); 1642 1643 nlink++; 1644 1645 cur_offset += name_len + sizeof(*extref); 1646 } 1647 1648 offset++; 1649 btrfs_release_path(path); 1650 } 1651 btrfs_release_path(path); 1652 1653 if (ret < 0 && ret != -ENOENT) 1654 return ret; 1655 return nlink; 1656 } 1657 1658 static int count_inode_refs(struct btrfs_root *root, 1659 struct btrfs_inode *inode, struct btrfs_path *path) 1660 { 1661 int ret; 1662 struct btrfs_key key; 1663 unsigned int nlink = 0; 1664 unsigned long ptr; 1665 unsigned long ptr_end; 1666 int name_len; 1667 u64 ino = btrfs_ino(inode); 1668 1669 key.objectid = ino; 1670 key.type = BTRFS_INODE_REF_KEY; 1671 key.offset = (u64)-1; 1672 1673 while (1) { 1674 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1675 if (ret < 0) 1676 break; 1677 if (ret > 0) { 1678 if (path->slots[0] == 0) 1679 break; 1680 path->slots[0]--; 1681 } 1682 process_slot: 1683 btrfs_item_key_to_cpu(path->nodes[0], &key, 1684 path->slots[0]); 1685 if (key.objectid != ino || 1686 key.type != BTRFS_INODE_REF_KEY) 1687 break; 1688 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 1689 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], 1690 path->slots[0]); 1691 while (ptr < ptr_end) { 1692 struct btrfs_inode_ref *ref; 1693 1694 ref = (struct btrfs_inode_ref *)ptr; 1695 name_len = btrfs_inode_ref_name_len(path->nodes[0], 1696 ref); 1697 ptr = (unsigned long)(ref + 1) + name_len; 1698 nlink++; 1699 } 1700 1701 if (key.offset == 0) 1702 break; 1703 if (path->slots[0] > 0) { 1704 path->slots[0]--; 1705 goto process_slot; 1706 } 1707 key.offset--; 1708 btrfs_release_path(path); 1709 } 1710 btrfs_release_path(path); 1711 1712 return nlink; 1713 } 1714 1715 /* 1716 * There are a few corners where the link count of the file can't 1717 * be properly maintained during replay. So, instead of adding 1718 * lots of complexity to the log code, we just scan the backrefs 1719 * for any file that has been through replay. 1720 * 1721 * The scan will update the link count on the inode to reflect the 1722 * number of back refs found. If it goes down to zero, the iput 1723 * will free the inode. 1724 */ 1725 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, 1726 struct btrfs_root *root, 1727 struct inode *inode) 1728 { 1729 struct btrfs_path *path; 1730 int ret; 1731 u64 nlink = 0; 1732 u64 ino = btrfs_ino(BTRFS_I(inode)); 1733 1734 path = btrfs_alloc_path(); 1735 if (!path) 1736 return -ENOMEM; 1737 1738 ret = count_inode_refs(root, BTRFS_I(inode), path); 1739 if (ret < 0) 1740 goto out; 1741 1742 nlink = ret; 1743 1744 ret = count_inode_extrefs(root, BTRFS_I(inode), path); 1745 if (ret < 0) 1746 goto out; 1747 1748 nlink += ret; 1749 1750 ret = 0; 1751 1752 if (nlink != inode->i_nlink) { 1753 set_nlink(inode, nlink); 1754 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 1755 if (ret) 1756 goto out; 1757 } 1758 BTRFS_I(inode)->index_cnt = (u64)-1; 1759 1760 if (inode->i_nlink == 0) { 1761 if (S_ISDIR(inode->i_mode)) { 1762 ret = replay_dir_deletes(trans, root, NULL, path, 1763 ino, 1); 1764 if (ret) 1765 goto out; 1766 } 1767 ret = btrfs_insert_orphan_item(trans, root, ino); 1768 if (ret == -EEXIST) 1769 ret = 0; 1770 } 1771 1772 out: 1773 btrfs_free_path(path); 1774 return ret; 1775 } 1776 1777 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, 1778 struct btrfs_root *root, 1779 struct btrfs_path *path) 1780 { 1781 int ret; 1782 struct btrfs_key key; 1783 struct inode *inode; 1784 1785 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1786 key.type = BTRFS_ORPHAN_ITEM_KEY; 1787 key.offset = (u64)-1; 1788 while (1) { 1789 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1790 if (ret < 0) 1791 break; 1792 1793 if (ret == 1) { 1794 ret = 0; 1795 if (path->slots[0] == 0) 1796 break; 1797 path->slots[0]--; 1798 } 1799 1800 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1801 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || 1802 key.type != BTRFS_ORPHAN_ITEM_KEY) 1803 break; 1804 1805 ret = btrfs_del_item(trans, root, path); 1806 if (ret) 1807 break; 1808 1809 btrfs_release_path(path); 1810 inode = read_one_inode(root, key.offset); 1811 if (!inode) { 1812 ret = -EIO; 1813 break; 1814 } 1815 1816 ret = fixup_inode_link_count(trans, root, inode); 1817 iput(inode); 1818 if (ret) 1819 break; 1820 1821 /* 1822 * fixup on a directory may create new entries, 1823 * make sure we always look for the highset possible 1824 * offset 1825 */ 1826 key.offset = (u64)-1; 1827 } 1828 btrfs_release_path(path); 1829 return ret; 1830 } 1831 1832 1833 /* 1834 * record a given inode in the fixup dir so we can check its link 1835 * count when replay is done. The link count is incremented here 1836 * so the inode won't go away until we check it 1837 */ 1838 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, 1839 struct btrfs_root *root, 1840 struct btrfs_path *path, 1841 u64 objectid) 1842 { 1843 struct btrfs_key key; 1844 int ret = 0; 1845 struct inode *inode; 1846 1847 inode = read_one_inode(root, objectid); 1848 if (!inode) 1849 return -EIO; 1850 1851 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1852 key.type = BTRFS_ORPHAN_ITEM_KEY; 1853 key.offset = objectid; 1854 1855 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1856 1857 btrfs_release_path(path); 1858 if (ret == 0) { 1859 if (!inode->i_nlink) 1860 set_nlink(inode, 1); 1861 else 1862 inc_nlink(inode); 1863 ret = btrfs_update_inode(trans, root, BTRFS_I(inode)); 1864 } else if (ret == -EEXIST) { 1865 ret = 0; 1866 } 1867 iput(inode); 1868 1869 return ret; 1870 } 1871 1872 /* 1873 * when replaying the log for a directory, we only insert names 1874 * for inodes that actually exist. This means an fsync on a directory 1875 * does not implicitly fsync all the new files in it 1876 */ 1877 static noinline int insert_one_name(struct btrfs_trans_handle *trans, 1878 struct btrfs_root *root, 1879 u64 dirid, u64 index, 1880 char *name, int name_len, 1881 struct btrfs_key *location) 1882 { 1883 struct inode *inode; 1884 struct inode *dir; 1885 int ret; 1886 1887 inode = read_one_inode(root, location->objectid); 1888 if (!inode) 1889 return -ENOENT; 1890 1891 dir = read_one_inode(root, dirid); 1892 if (!dir) { 1893 iput(inode); 1894 return -EIO; 1895 } 1896 1897 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 1898 name_len, 1, index); 1899 1900 /* FIXME, put inode into FIXUP list */ 1901 1902 iput(inode); 1903 iput(dir); 1904 return ret; 1905 } 1906 1907 /* 1908 * take a single entry in a log directory item and replay it into 1909 * the subvolume. 1910 * 1911 * if a conflicting item exists in the subdirectory already, 1912 * the inode it points to is unlinked and put into the link count 1913 * fix up tree. 1914 * 1915 * If a name from the log points to a file or directory that does 1916 * not exist in the FS, it is skipped. fsyncs on directories 1917 * do not force down inodes inside that directory, just changes to the 1918 * names or unlinks in a directory. 1919 * 1920 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a 1921 * non-existing inode) and 1 if the name was replayed. 1922 */ 1923 static noinline int replay_one_name(struct btrfs_trans_handle *trans, 1924 struct btrfs_root *root, 1925 struct btrfs_path *path, 1926 struct extent_buffer *eb, 1927 struct btrfs_dir_item *di, 1928 struct btrfs_key *key) 1929 { 1930 char *name; 1931 int name_len; 1932 struct btrfs_dir_item *dst_di; 1933 struct btrfs_key found_key; 1934 struct btrfs_key log_key; 1935 struct inode *dir; 1936 u8 log_type; 1937 int exists; 1938 int ret = 0; 1939 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY); 1940 bool name_added = false; 1941 1942 dir = read_one_inode(root, key->objectid); 1943 if (!dir) 1944 return -EIO; 1945 1946 name_len = btrfs_dir_name_len(eb, di); 1947 name = kmalloc(name_len, GFP_NOFS); 1948 if (!name) { 1949 ret = -ENOMEM; 1950 goto out; 1951 } 1952 1953 log_type = btrfs_dir_type(eb, di); 1954 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1955 name_len); 1956 1957 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1958 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); 1959 if (exists == 0) 1960 exists = 1; 1961 else 1962 exists = 0; 1963 btrfs_release_path(path); 1964 1965 if (key->type == BTRFS_DIR_ITEM_KEY) { 1966 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1967 name, name_len, 1); 1968 } else if (key->type == BTRFS_DIR_INDEX_KEY) { 1969 dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1970 key->objectid, 1971 key->offset, name, 1972 name_len, 1); 1973 } else { 1974 /* Corruption */ 1975 ret = -EINVAL; 1976 goto out; 1977 } 1978 if (IS_ERR_OR_NULL(dst_di)) { 1979 /* we need a sequence number to insert, so we only 1980 * do inserts for the BTRFS_DIR_INDEX_KEY types 1981 */ 1982 if (key->type != BTRFS_DIR_INDEX_KEY) 1983 goto out; 1984 goto insert; 1985 } 1986 1987 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); 1988 /* the existing item matches the logged item */ 1989 if (found_key.objectid == log_key.objectid && 1990 found_key.type == log_key.type && 1991 found_key.offset == log_key.offset && 1992 btrfs_dir_type(path->nodes[0], dst_di) == log_type) { 1993 update_size = false; 1994 goto out; 1995 } 1996 1997 /* 1998 * don't drop the conflicting directory entry if the inode 1999 * for the new entry doesn't exist 2000 */ 2001 if (!exists) 2002 goto out; 2003 2004 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di); 2005 if (ret) 2006 goto out; 2007 2008 if (key->type == BTRFS_DIR_INDEX_KEY) 2009 goto insert; 2010 out: 2011 btrfs_release_path(path); 2012 if (!ret && update_size) { 2013 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2); 2014 ret = btrfs_update_inode(trans, root, BTRFS_I(dir)); 2015 } 2016 kfree(name); 2017 iput(dir); 2018 if (!ret && name_added) 2019 ret = 1; 2020 return ret; 2021 2022 insert: 2023 /* 2024 * Check if the inode reference exists in the log for the given name, 2025 * inode and parent inode 2026 */ 2027 found_key.objectid = log_key.objectid; 2028 found_key.type = BTRFS_INODE_REF_KEY; 2029 found_key.offset = key->objectid; 2030 ret = backref_in_log(root->log_root, &found_key, 0, name, name_len); 2031 if (ret < 0) { 2032 goto out; 2033 } else if (ret) { 2034 /* The dentry will be added later. */ 2035 ret = 0; 2036 update_size = false; 2037 goto out; 2038 } 2039 2040 found_key.objectid = log_key.objectid; 2041 found_key.type = BTRFS_INODE_EXTREF_KEY; 2042 found_key.offset = key->objectid; 2043 ret = backref_in_log(root->log_root, &found_key, key->objectid, name, 2044 name_len); 2045 if (ret < 0) { 2046 goto out; 2047 } else if (ret) { 2048 /* The dentry will be added later. */ 2049 ret = 0; 2050 update_size = false; 2051 goto out; 2052 } 2053 btrfs_release_path(path); 2054 ret = insert_one_name(trans, root, key->objectid, key->offset, 2055 name, name_len, &log_key); 2056 if (ret && ret != -ENOENT && ret != -EEXIST) 2057 goto out; 2058 if (!ret) 2059 name_added = true; 2060 update_size = false; 2061 ret = 0; 2062 goto out; 2063 } 2064 2065 /* 2066 * find all the names in a directory item and reconcile them into 2067 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than 2068 * one name in a directory item, but the same code gets used for 2069 * both directory index types 2070 */ 2071 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, 2072 struct btrfs_root *root, 2073 struct btrfs_path *path, 2074 struct extent_buffer *eb, int slot, 2075 struct btrfs_key *key) 2076 { 2077 int ret = 0; 2078 u32 item_size = btrfs_item_size_nr(eb, slot); 2079 struct btrfs_dir_item *di; 2080 int name_len; 2081 unsigned long ptr; 2082 unsigned long ptr_end; 2083 struct btrfs_path *fixup_path = NULL; 2084 2085 ptr = btrfs_item_ptr_offset(eb, slot); 2086 ptr_end = ptr + item_size; 2087 while (ptr < ptr_end) { 2088 di = (struct btrfs_dir_item *)ptr; 2089 name_len = btrfs_dir_name_len(eb, di); 2090 ret = replay_one_name(trans, root, path, eb, di, key); 2091 if (ret < 0) 2092 break; 2093 ptr = (unsigned long)(di + 1); 2094 ptr += name_len; 2095 2096 /* 2097 * If this entry refers to a non-directory (directories can not 2098 * have a link count > 1) and it was added in the transaction 2099 * that was not committed, make sure we fixup the link count of 2100 * the inode it the entry points to. Otherwise something like 2101 * the following would result in a directory pointing to an 2102 * inode with a wrong link that does not account for this dir 2103 * entry: 2104 * 2105 * mkdir testdir 2106 * touch testdir/foo 2107 * touch testdir/bar 2108 * sync 2109 * 2110 * ln testdir/bar testdir/bar_link 2111 * ln testdir/foo testdir/foo_link 2112 * xfs_io -c "fsync" testdir/bar 2113 * 2114 * <power failure> 2115 * 2116 * mount fs, log replay happens 2117 * 2118 * File foo would remain with a link count of 1 when it has two 2119 * entries pointing to it in the directory testdir. This would 2120 * make it impossible to ever delete the parent directory has 2121 * it would result in stale dentries that can never be deleted. 2122 */ 2123 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) { 2124 struct btrfs_key di_key; 2125 2126 if (!fixup_path) { 2127 fixup_path = btrfs_alloc_path(); 2128 if (!fixup_path) { 2129 ret = -ENOMEM; 2130 break; 2131 } 2132 } 2133 2134 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 2135 ret = link_to_fixup_dir(trans, root, fixup_path, 2136 di_key.objectid); 2137 if (ret) 2138 break; 2139 } 2140 ret = 0; 2141 } 2142 btrfs_free_path(fixup_path); 2143 return ret; 2144 } 2145 2146 /* 2147 * directory replay has two parts. There are the standard directory 2148 * items in the log copied from the subvolume, and range items 2149 * created in the log while the subvolume was logged. 2150 * 2151 * The range items tell us which parts of the key space the log 2152 * is authoritative for. During replay, if a key in the subvolume 2153 * directory is in a logged range item, but not actually in the log 2154 * that means it was deleted from the directory before the fsync 2155 * and should be removed. 2156 */ 2157 static noinline int find_dir_range(struct btrfs_root *root, 2158 struct btrfs_path *path, 2159 u64 dirid, int key_type, 2160 u64 *start_ret, u64 *end_ret) 2161 { 2162 struct btrfs_key key; 2163 u64 found_end; 2164 struct btrfs_dir_log_item *item; 2165 int ret; 2166 int nritems; 2167 2168 if (*start_ret == (u64)-1) 2169 return 1; 2170 2171 key.objectid = dirid; 2172 key.type = key_type; 2173 key.offset = *start_ret; 2174 2175 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2176 if (ret < 0) 2177 goto out; 2178 if (ret > 0) { 2179 if (path->slots[0] == 0) 2180 goto out; 2181 path->slots[0]--; 2182 } 2183 if (ret != 0) 2184 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2185 2186 if (key.type != key_type || key.objectid != dirid) { 2187 ret = 1; 2188 goto next; 2189 } 2190 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2191 struct btrfs_dir_log_item); 2192 found_end = btrfs_dir_log_end(path->nodes[0], item); 2193 2194 if (*start_ret >= key.offset && *start_ret <= found_end) { 2195 ret = 0; 2196 *start_ret = key.offset; 2197 *end_ret = found_end; 2198 goto out; 2199 } 2200 ret = 1; 2201 next: 2202 /* check the next slot in the tree to see if it is a valid item */ 2203 nritems = btrfs_header_nritems(path->nodes[0]); 2204 path->slots[0]++; 2205 if (path->slots[0] >= nritems) { 2206 ret = btrfs_next_leaf(root, path); 2207 if (ret) 2208 goto out; 2209 } 2210 2211 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2212 2213 if (key.type != key_type || key.objectid != dirid) { 2214 ret = 1; 2215 goto out; 2216 } 2217 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2218 struct btrfs_dir_log_item); 2219 found_end = btrfs_dir_log_end(path->nodes[0], item); 2220 *start_ret = key.offset; 2221 *end_ret = found_end; 2222 ret = 0; 2223 out: 2224 btrfs_release_path(path); 2225 return ret; 2226 } 2227 2228 /* 2229 * this looks for a given directory item in the log. If the directory 2230 * item is not in the log, the item is removed and the inode it points 2231 * to is unlinked 2232 */ 2233 static noinline int check_item_in_log(struct btrfs_trans_handle *trans, 2234 struct btrfs_root *root, 2235 struct btrfs_root *log, 2236 struct btrfs_path *path, 2237 struct btrfs_path *log_path, 2238 struct inode *dir, 2239 struct btrfs_key *dir_key) 2240 { 2241 int ret; 2242 struct extent_buffer *eb; 2243 int slot; 2244 u32 item_size; 2245 struct btrfs_dir_item *di; 2246 struct btrfs_dir_item *log_di; 2247 int name_len; 2248 unsigned long ptr; 2249 unsigned long ptr_end; 2250 char *name; 2251 struct inode *inode; 2252 struct btrfs_key location; 2253 2254 again: 2255 eb = path->nodes[0]; 2256 slot = path->slots[0]; 2257 item_size = btrfs_item_size_nr(eb, slot); 2258 ptr = btrfs_item_ptr_offset(eb, slot); 2259 ptr_end = ptr + item_size; 2260 while (ptr < ptr_end) { 2261 di = (struct btrfs_dir_item *)ptr; 2262 name_len = btrfs_dir_name_len(eb, di); 2263 name = kmalloc(name_len, GFP_NOFS); 2264 if (!name) { 2265 ret = -ENOMEM; 2266 goto out; 2267 } 2268 read_extent_buffer(eb, name, (unsigned long)(di + 1), 2269 name_len); 2270 log_di = NULL; 2271 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { 2272 log_di = btrfs_lookup_dir_item(trans, log, log_path, 2273 dir_key->objectid, 2274 name, name_len, 0); 2275 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { 2276 log_di = btrfs_lookup_dir_index_item(trans, log, 2277 log_path, 2278 dir_key->objectid, 2279 dir_key->offset, 2280 name, name_len, 0); 2281 } 2282 if (!log_di || log_di == ERR_PTR(-ENOENT)) { 2283 btrfs_dir_item_key_to_cpu(eb, di, &location); 2284 btrfs_release_path(path); 2285 btrfs_release_path(log_path); 2286 inode = read_one_inode(root, location.objectid); 2287 if (!inode) { 2288 kfree(name); 2289 return -EIO; 2290 } 2291 2292 ret = link_to_fixup_dir(trans, root, 2293 path, location.objectid); 2294 if (ret) { 2295 kfree(name); 2296 iput(inode); 2297 goto out; 2298 } 2299 2300 inc_nlink(inode); 2301 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 2302 BTRFS_I(inode), name, name_len); 2303 if (!ret) 2304 ret = btrfs_run_delayed_items(trans); 2305 kfree(name); 2306 iput(inode); 2307 if (ret) 2308 goto out; 2309 2310 /* there might still be more names under this key 2311 * check and repeat if required 2312 */ 2313 ret = btrfs_search_slot(NULL, root, dir_key, path, 2314 0, 0); 2315 if (ret == 0) 2316 goto again; 2317 ret = 0; 2318 goto out; 2319 } else if (IS_ERR(log_di)) { 2320 kfree(name); 2321 return PTR_ERR(log_di); 2322 } 2323 btrfs_release_path(log_path); 2324 kfree(name); 2325 2326 ptr = (unsigned long)(di + 1); 2327 ptr += name_len; 2328 } 2329 ret = 0; 2330 out: 2331 btrfs_release_path(path); 2332 btrfs_release_path(log_path); 2333 return ret; 2334 } 2335 2336 static int replay_xattr_deletes(struct btrfs_trans_handle *trans, 2337 struct btrfs_root *root, 2338 struct btrfs_root *log, 2339 struct btrfs_path *path, 2340 const u64 ino) 2341 { 2342 struct btrfs_key search_key; 2343 struct btrfs_path *log_path; 2344 int i; 2345 int nritems; 2346 int ret; 2347 2348 log_path = btrfs_alloc_path(); 2349 if (!log_path) 2350 return -ENOMEM; 2351 2352 search_key.objectid = ino; 2353 search_key.type = BTRFS_XATTR_ITEM_KEY; 2354 search_key.offset = 0; 2355 again: 2356 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 2357 if (ret < 0) 2358 goto out; 2359 process_leaf: 2360 nritems = btrfs_header_nritems(path->nodes[0]); 2361 for (i = path->slots[0]; i < nritems; i++) { 2362 struct btrfs_key key; 2363 struct btrfs_dir_item *di; 2364 struct btrfs_dir_item *log_di; 2365 u32 total_size; 2366 u32 cur; 2367 2368 btrfs_item_key_to_cpu(path->nodes[0], &key, i); 2369 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) { 2370 ret = 0; 2371 goto out; 2372 } 2373 2374 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item); 2375 total_size = btrfs_item_size_nr(path->nodes[0], i); 2376 cur = 0; 2377 while (cur < total_size) { 2378 u16 name_len = btrfs_dir_name_len(path->nodes[0], di); 2379 u16 data_len = btrfs_dir_data_len(path->nodes[0], di); 2380 u32 this_len = sizeof(*di) + name_len + data_len; 2381 char *name; 2382 2383 name = kmalloc(name_len, GFP_NOFS); 2384 if (!name) { 2385 ret = -ENOMEM; 2386 goto out; 2387 } 2388 read_extent_buffer(path->nodes[0], name, 2389 (unsigned long)(di + 1), name_len); 2390 2391 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino, 2392 name, name_len, 0); 2393 btrfs_release_path(log_path); 2394 if (!log_di) { 2395 /* Doesn't exist in log tree, so delete it. */ 2396 btrfs_release_path(path); 2397 di = btrfs_lookup_xattr(trans, root, path, ino, 2398 name, name_len, -1); 2399 kfree(name); 2400 if (IS_ERR(di)) { 2401 ret = PTR_ERR(di); 2402 goto out; 2403 } 2404 ASSERT(di); 2405 ret = btrfs_delete_one_dir_name(trans, root, 2406 path, di); 2407 if (ret) 2408 goto out; 2409 btrfs_release_path(path); 2410 search_key = key; 2411 goto again; 2412 } 2413 kfree(name); 2414 if (IS_ERR(log_di)) { 2415 ret = PTR_ERR(log_di); 2416 goto out; 2417 } 2418 cur += this_len; 2419 di = (struct btrfs_dir_item *)((char *)di + this_len); 2420 } 2421 } 2422 ret = btrfs_next_leaf(root, path); 2423 if (ret > 0) 2424 ret = 0; 2425 else if (ret == 0) 2426 goto process_leaf; 2427 out: 2428 btrfs_free_path(log_path); 2429 btrfs_release_path(path); 2430 return ret; 2431 } 2432 2433 2434 /* 2435 * deletion replay happens before we copy any new directory items 2436 * out of the log or out of backreferences from inodes. It 2437 * scans the log to find ranges of keys that log is authoritative for, 2438 * and then scans the directory to find items in those ranges that are 2439 * not present in the log. 2440 * 2441 * Anything we don't find in the log is unlinked and removed from the 2442 * directory. 2443 */ 2444 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 2445 struct btrfs_root *root, 2446 struct btrfs_root *log, 2447 struct btrfs_path *path, 2448 u64 dirid, int del_all) 2449 { 2450 u64 range_start; 2451 u64 range_end; 2452 int key_type = BTRFS_DIR_LOG_ITEM_KEY; 2453 int ret = 0; 2454 struct btrfs_key dir_key; 2455 struct btrfs_key found_key; 2456 struct btrfs_path *log_path; 2457 struct inode *dir; 2458 2459 dir_key.objectid = dirid; 2460 dir_key.type = BTRFS_DIR_ITEM_KEY; 2461 log_path = btrfs_alloc_path(); 2462 if (!log_path) 2463 return -ENOMEM; 2464 2465 dir = read_one_inode(root, dirid); 2466 /* it isn't an error if the inode isn't there, that can happen 2467 * because we replay the deletes before we copy in the inode item 2468 * from the log 2469 */ 2470 if (!dir) { 2471 btrfs_free_path(log_path); 2472 return 0; 2473 } 2474 again: 2475 range_start = 0; 2476 range_end = 0; 2477 while (1) { 2478 if (del_all) 2479 range_end = (u64)-1; 2480 else { 2481 ret = find_dir_range(log, path, dirid, key_type, 2482 &range_start, &range_end); 2483 if (ret != 0) 2484 break; 2485 } 2486 2487 dir_key.offset = range_start; 2488 while (1) { 2489 int nritems; 2490 ret = btrfs_search_slot(NULL, root, &dir_key, path, 2491 0, 0); 2492 if (ret < 0) 2493 goto out; 2494 2495 nritems = btrfs_header_nritems(path->nodes[0]); 2496 if (path->slots[0] >= nritems) { 2497 ret = btrfs_next_leaf(root, path); 2498 if (ret == 1) 2499 break; 2500 else if (ret < 0) 2501 goto out; 2502 } 2503 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2504 path->slots[0]); 2505 if (found_key.objectid != dirid || 2506 found_key.type != dir_key.type) 2507 goto next_type; 2508 2509 if (found_key.offset > range_end) 2510 break; 2511 2512 ret = check_item_in_log(trans, root, log, path, 2513 log_path, dir, 2514 &found_key); 2515 if (ret) 2516 goto out; 2517 if (found_key.offset == (u64)-1) 2518 break; 2519 dir_key.offset = found_key.offset + 1; 2520 } 2521 btrfs_release_path(path); 2522 if (range_end == (u64)-1) 2523 break; 2524 range_start = range_end + 1; 2525 } 2526 2527 next_type: 2528 ret = 0; 2529 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { 2530 key_type = BTRFS_DIR_LOG_INDEX_KEY; 2531 dir_key.type = BTRFS_DIR_INDEX_KEY; 2532 btrfs_release_path(path); 2533 goto again; 2534 } 2535 out: 2536 btrfs_release_path(path); 2537 btrfs_free_path(log_path); 2538 iput(dir); 2539 return ret; 2540 } 2541 2542 /* 2543 * the process_func used to replay items from the log tree. This 2544 * gets called in two different stages. The first stage just looks 2545 * for inodes and makes sure they are all copied into the subvolume. 2546 * 2547 * The second stage copies all the other item types from the log into 2548 * the subvolume. The two stage approach is slower, but gets rid of 2549 * lots of complexity around inodes referencing other inodes that exist 2550 * only in the log (references come from either directory items or inode 2551 * back refs). 2552 */ 2553 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, 2554 struct walk_control *wc, u64 gen, int level) 2555 { 2556 int nritems; 2557 struct btrfs_path *path; 2558 struct btrfs_root *root = wc->replay_dest; 2559 struct btrfs_key key; 2560 int i; 2561 int ret; 2562 2563 ret = btrfs_read_buffer(eb, gen, level, NULL); 2564 if (ret) 2565 return ret; 2566 2567 level = btrfs_header_level(eb); 2568 2569 if (level != 0) 2570 return 0; 2571 2572 path = btrfs_alloc_path(); 2573 if (!path) 2574 return -ENOMEM; 2575 2576 nritems = btrfs_header_nritems(eb); 2577 for (i = 0; i < nritems; i++) { 2578 btrfs_item_key_to_cpu(eb, &key, i); 2579 2580 /* inode keys are done during the first stage */ 2581 if (key.type == BTRFS_INODE_ITEM_KEY && 2582 wc->stage == LOG_WALK_REPLAY_INODES) { 2583 struct btrfs_inode_item *inode_item; 2584 u32 mode; 2585 2586 inode_item = btrfs_item_ptr(eb, i, 2587 struct btrfs_inode_item); 2588 /* 2589 * If we have a tmpfile (O_TMPFILE) that got fsync'ed 2590 * and never got linked before the fsync, skip it, as 2591 * replaying it is pointless since it would be deleted 2592 * later. We skip logging tmpfiles, but it's always 2593 * possible we are replaying a log created with a kernel 2594 * that used to log tmpfiles. 2595 */ 2596 if (btrfs_inode_nlink(eb, inode_item) == 0) { 2597 wc->ignore_cur_inode = true; 2598 continue; 2599 } else { 2600 wc->ignore_cur_inode = false; 2601 } 2602 ret = replay_xattr_deletes(wc->trans, root, log, 2603 path, key.objectid); 2604 if (ret) 2605 break; 2606 mode = btrfs_inode_mode(eb, inode_item); 2607 if (S_ISDIR(mode)) { 2608 ret = replay_dir_deletes(wc->trans, 2609 root, log, path, key.objectid, 0); 2610 if (ret) 2611 break; 2612 } 2613 ret = overwrite_item(wc->trans, root, path, 2614 eb, i, &key); 2615 if (ret) 2616 break; 2617 2618 /* 2619 * Before replaying extents, truncate the inode to its 2620 * size. We need to do it now and not after log replay 2621 * because before an fsync we can have prealloc extents 2622 * added beyond the inode's i_size. If we did it after, 2623 * through orphan cleanup for example, we would drop 2624 * those prealloc extents just after replaying them. 2625 */ 2626 if (S_ISREG(mode)) { 2627 struct btrfs_drop_extents_args drop_args = { 0 }; 2628 struct inode *inode; 2629 u64 from; 2630 2631 inode = read_one_inode(root, key.objectid); 2632 if (!inode) { 2633 ret = -EIO; 2634 break; 2635 } 2636 from = ALIGN(i_size_read(inode), 2637 root->fs_info->sectorsize); 2638 drop_args.start = from; 2639 drop_args.end = (u64)-1; 2640 drop_args.drop_cache = true; 2641 ret = btrfs_drop_extents(wc->trans, root, 2642 BTRFS_I(inode), 2643 &drop_args); 2644 if (!ret) { 2645 inode_sub_bytes(inode, 2646 drop_args.bytes_found); 2647 /* Update the inode's nbytes. */ 2648 ret = btrfs_update_inode(wc->trans, 2649 root, BTRFS_I(inode)); 2650 } 2651 iput(inode); 2652 if (ret) 2653 break; 2654 } 2655 2656 ret = link_to_fixup_dir(wc->trans, root, 2657 path, key.objectid); 2658 if (ret) 2659 break; 2660 } 2661 2662 if (wc->ignore_cur_inode) 2663 continue; 2664 2665 if (key.type == BTRFS_DIR_INDEX_KEY && 2666 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { 2667 ret = replay_one_dir_item(wc->trans, root, path, 2668 eb, i, &key); 2669 if (ret) 2670 break; 2671 } 2672 2673 if (wc->stage < LOG_WALK_REPLAY_ALL) 2674 continue; 2675 2676 /* these keys are simply copied */ 2677 if (key.type == BTRFS_XATTR_ITEM_KEY) { 2678 ret = overwrite_item(wc->trans, root, path, 2679 eb, i, &key); 2680 if (ret) 2681 break; 2682 } else if (key.type == BTRFS_INODE_REF_KEY || 2683 key.type == BTRFS_INODE_EXTREF_KEY) { 2684 ret = add_inode_ref(wc->trans, root, log, path, 2685 eb, i, &key); 2686 if (ret && ret != -ENOENT) 2687 break; 2688 ret = 0; 2689 } else if (key.type == BTRFS_EXTENT_DATA_KEY) { 2690 ret = replay_one_extent(wc->trans, root, path, 2691 eb, i, &key); 2692 if (ret) 2693 break; 2694 } else if (key.type == BTRFS_DIR_ITEM_KEY) { 2695 ret = replay_one_dir_item(wc->trans, root, path, 2696 eb, i, &key); 2697 if (ret) 2698 break; 2699 } 2700 } 2701 btrfs_free_path(path); 2702 return ret; 2703 } 2704 2705 /* 2706 * Correctly adjust the reserved bytes occupied by a log tree extent buffer 2707 */ 2708 static void unaccount_log_buffer(struct btrfs_fs_info *fs_info, u64 start) 2709 { 2710 struct btrfs_block_group *cache; 2711 2712 cache = btrfs_lookup_block_group(fs_info, start); 2713 if (!cache) { 2714 btrfs_err(fs_info, "unable to find block group for %llu", start); 2715 return; 2716 } 2717 2718 spin_lock(&cache->space_info->lock); 2719 spin_lock(&cache->lock); 2720 cache->reserved -= fs_info->nodesize; 2721 cache->space_info->bytes_reserved -= fs_info->nodesize; 2722 spin_unlock(&cache->lock); 2723 spin_unlock(&cache->space_info->lock); 2724 2725 btrfs_put_block_group(cache); 2726 } 2727 2728 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, 2729 struct btrfs_root *root, 2730 struct btrfs_path *path, int *level, 2731 struct walk_control *wc) 2732 { 2733 struct btrfs_fs_info *fs_info = root->fs_info; 2734 u64 bytenr; 2735 u64 ptr_gen; 2736 struct extent_buffer *next; 2737 struct extent_buffer *cur; 2738 u32 blocksize; 2739 int ret = 0; 2740 2741 while (*level > 0) { 2742 struct btrfs_key first_key; 2743 2744 cur = path->nodes[*level]; 2745 2746 WARN_ON(btrfs_header_level(cur) != *level); 2747 2748 if (path->slots[*level] >= 2749 btrfs_header_nritems(cur)) 2750 break; 2751 2752 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 2753 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 2754 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]); 2755 blocksize = fs_info->nodesize; 2756 2757 next = btrfs_find_create_tree_block(fs_info, bytenr, 2758 btrfs_header_owner(cur), 2759 *level - 1); 2760 if (IS_ERR(next)) 2761 return PTR_ERR(next); 2762 2763 if (*level == 1) { 2764 ret = wc->process_func(root, next, wc, ptr_gen, 2765 *level - 1); 2766 if (ret) { 2767 free_extent_buffer(next); 2768 return ret; 2769 } 2770 2771 path->slots[*level]++; 2772 if (wc->free) { 2773 ret = btrfs_read_buffer(next, ptr_gen, 2774 *level - 1, &first_key); 2775 if (ret) { 2776 free_extent_buffer(next); 2777 return ret; 2778 } 2779 2780 if (trans) { 2781 btrfs_tree_lock(next); 2782 btrfs_clean_tree_block(next); 2783 btrfs_wait_tree_block_writeback(next); 2784 btrfs_tree_unlock(next); 2785 ret = btrfs_pin_reserved_extent(trans, 2786 bytenr, blocksize); 2787 if (ret) { 2788 free_extent_buffer(next); 2789 return ret; 2790 } 2791 btrfs_redirty_list_add( 2792 trans->transaction, next); 2793 } else { 2794 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2795 clear_extent_buffer_dirty(next); 2796 unaccount_log_buffer(fs_info, bytenr); 2797 } 2798 } 2799 free_extent_buffer(next); 2800 continue; 2801 } 2802 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key); 2803 if (ret) { 2804 free_extent_buffer(next); 2805 return ret; 2806 } 2807 2808 if (path->nodes[*level-1]) 2809 free_extent_buffer(path->nodes[*level-1]); 2810 path->nodes[*level-1] = next; 2811 *level = btrfs_header_level(next); 2812 path->slots[*level] = 0; 2813 cond_resched(); 2814 } 2815 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); 2816 2817 cond_resched(); 2818 return 0; 2819 } 2820 2821 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, 2822 struct btrfs_root *root, 2823 struct btrfs_path *path, int *level, 2824 struct walk_control *wc) 2825 { 2826 struct btrfs_fs_info *fs_info = root->fs_info; 2827 int i; 2828 int slot; 2829 int ret; 2830 2831 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 2832 slot = path->slots[i]; 2833 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 2834 path->slots[i]++; 2835 *level = i; 2836 WARN_ON(*level == 0); 2837 return 0; 2838 } else { 2839 ret = wc->process_func(root, path->nodes[*level], wc, 2840 btrfs_header_generation(path->nodes[*level]), 2841 *level); 2842 if (ret) 2843 return ret; 2844 2845 if (wc->free) { 2846 struct extent_buffer *next; 2847 2848 next = path->nodes[*level]; 2849 2850 if (trans) { 2851 btrfs_tree_lock(next); 2852 btrfs_clean_tree_block(next); 2853 btrfs_wait_tree_block_writeback(next); 2854 btrfs_tree_unlock(next); 2855 ret = btrfs_pin_reserved_extent(trans, 2856 path->nodes[*level]->start, 2857 path->nodes[*level]->len); 2858 if (ret) 2859 return ret; 2860 } else { 2861 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2862 clear_extent_buffer_dirty(next); 2863 2864 unaccount_log_buffer(fs_info, 2865 path->nodes[*level]->start); 2866 } 2867 } 2868 free_extent_buffer(path->nodes[*level]); 2869 path->nodes[*level] = NULL; 2870 *level = i + 1; 2871 } 2872 } 2873 return 1; 2874 } 2875 2876 /* 2877 * drop the reference count on the tree rooted at 'snap'. This traverses 2878 * the tree freeing any blocks that have a ref count of zero after being 2879 * decremented. 2880 */ 2881 static int walk_log_tree(struct btrfs_trans_handle *trans, 2882 struct btrfs_root *log, struct walk_control *wc) 2883 { 2884 struct btrfs_fs_info *fs_info = log->fs_info; 2885 int ret = 0; 2886 int wret; 2887 int level; 2888 struct btrfs_path *path; 2889 int orig_level; 2890 2891 path = btrfs_alloc_path(); 2892 if (!path) 2893 return -ENOMEM; 2894 2895 level = btrfs_header_level(log->node); 2896 orig_level = level; 2897 path->nodes[level] = log->node; 2898 atomic_inc(&log->node->refs); 2899 path->slots[level] = 0; 2900 2901 while (1) { 2902 wret = walk_down_log_tree(trans, log, path, &level, wc); 2903 if (wret > 0) 2904 break; 2905 if (wret < 0) { 2906 ret = wret; 2907 goto out; 2908 } 2909 2910 wret = walk_up_log_tree(trans, log, path, &level, wc); 2911 if (wret > 0) 2912 break; 2913 if (wret < 0) { 2914 ret = wret; 2915 goto out; 2916 } 2917 } 2918 2919 /* was the root node processed? if not, catch it here */ 2920 if (path->nodes[orig_level]) { 2921 ret = wc->process_func(log, path->nodes[orig_level], wc, 2922 btrfs_header_generation(path->nodes[orig_level]), 2923 orig_level); 2924 if (ret) 2925 goto out; 2926 if (wc->free) { 2927 struct extent_buffer *next; 2928 2929 next = path->nodes[orig_level]; 2930 2931 if (trans) { 2932 btrfs_tree_lock(next); 2933 btrfs_clean_tree_block(next); 2934 btrfs_wait_tree_block_writeback(next); 2935 btrfs_tree_unlock(next); 2936 ret = btrfs_pin_reserved_extent(trans, 2937 next->start, next->len); 2938 if (ret) 2939 goto out; 2940 } else { 2941 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2942 clear_extent_buffer_dirty(next); 2943 unaccount_log_buffer(fs_info, next->start); 2944 } 2945 } 2946 } 2947 2948 out: 2949 btrfs_free_path(path); 2950 return ret; 2951 } 2952 2953 /* 2954 * helper function to update the item for a given subvolumes log root 2955 * in the tree of log roots 2956 */ 2957 static int update_log_root(struct btrfs_trans_handle *trans, 2958 struct btrfs_root *log, 2959 struct btrfs_root_item *root_item) 2960 { 2961 struct btrfs_fs_info *fs_info = log->fs_info; 2962 int ret; 2963 2964 if (log->log_transid == 1) { 2965 /* insert root item on the first sync */ 2966 ret = btrfs_insert_root(trans, fs_info->log_root_tree, 2967 &log->root_key, root_item); 2968 } else { 2969 ret = btrfs_update_root(trans, fs_info->log_root_tree, 2970 &log->root_key, root_item); 2971 } 2972 return ret; 2973 } 2974 2975 static void wait_log_commit(struct btrfs_root *root, int transid) 2976 { 2977 DEFINE_WAIT(wait); 2978 int index = transid % 2; 2979 2980 /* 2981 * we only allow two pending log transactions at a time, 2982 * so we know that if ours is more than 2 older than the 2983 * current transaction, we're done 2984 */ 2985 for (;;) { 2986 prepare_to_wait(&root->log_commit_wait[index], 2987 &wait, TASK_UNINTERRUPTIBLE); 2988 2989 if (!(root->log_transid_committed < transid && 2990 atomic_read(&root->log_commit[index]))) 2991 break; 2992 2993 mutex_unlock(&root->log_mutex); 2994 schedule(); 2995 mutex_lock(&root->log_mutex); 2996 } 2997 finish_wait(&root->log_commit_wait[index], &wait); 2998 } 2999 3000 static void wait_for_writer(struct btrfs_root *root) 3001 { 3002 DEFINE_WAIT(wait); 3003 3004 for (;;) { 3005 prepare_to_wait(&root->log_writer_wait, &wait, 3006 TASK_UNINTERRUPTIBLE); 3007 if (!atomic_read(&root->log_writers)) 3008 break; 3009 3010 mutex_unlock(&root->log_mutex); 3011 schedule(); 3012 mutex_lock(&root->log_mutex); 3013 } 3014 finish_wait(&root->log_writer_wait, &wait); 3015 } 3016 3017 static inline void btrfs_remove_log_ctx(struct btrfs_root *root, 3018 struct btrfs_log_ctx *ctx) 3019 { 3020 if (!ctx) 3021 return; 3022 3023 mutex_lock(&root->log_mutex); 3024 list_del_init(&ctx->list); 3025 mutex_unlock(&root->log_mutex); 3026 } 3027 3028 /* 3029 * Invoked in log mutex context, or be sure there is no other task which 3030 * can access the list. 3031 */ 3032 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root, 3033 int index, int error) 3034 { 3035 struct btrfs_log_ctx *ctx; 3036 struct btrfs_log_ctx *safe; 3037 3038 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { 3039 list_del_init(&ctx->list); 3040 ctx->log_ret = error; 3041 } 3042 3043 INIT_LIST_HEAD(&root->log_ctxs[index]); 3044 } 3045 3046 /* 3047 * btrfs_sync_log does sends a given tree log down to the disk and 3048 * updates the super blocks to record it. When this call is done, 3049 * you know that any inodes previously logged are safely on disk only 3050 * if it returns 0. 3051 * 3052 * Any other return value means you need to call btrfs_commit_transaction. 3053 * Some of the edge cases for fsyncing directories that have had unlinks 3054 * or renames done in the past mean that sometimes the only safe 3055 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, 3056 * that has happened. 3057 */ 3058 int btrfs_sync_log(struct btrfs_trans_handle *trans, 3059 struct btrfs_root *root, struct btrfs_log_ctx *ctx) 3060 { 3061 int index1; 3062 int index2; 3063 int mark; 3064 int ret; 3065 struct btrfs_fs_info *fs_info = root->fs_info; 3066 struct btrfs_root *log = root->log_root; 3067 struct btrfs_root *log_root_tree = fs_info->log_root_tree; 3068 struct btrfs_root_item new_root_item; 3069 int log_transid = 0; 3070 struct btrfs_log_ctx root_log_ctx; 3071 struct blk_plug plug; 3072 u64 log_root_start; 3073 u64 log_root_level; 3074 3075 mutex_lock(&root->log_mutex); 3076 log_transid = ctx->log_transid; 3077 if (root->log_transid_committed >= log_transid) { 3078 mutex_unlock(&root->log_mutex); 3079 return ctx->log_ret; 3080 } 3081 3082 index1 = log_transid % 2; 3083 if (atomic_read(&root->log_commit[index1])) { 3084 wait_log_commit(root, log_transid); 3085 mutex_unlock(&root->log_mutex); 3086 return ctx->log_ret; 3087 } 3088 ASSERT(log_transid == root->log_transid); 3089 atomic_set(&root->log_commit[index1], 1); 3090 3091 /* wait for previous tree log sync to complete */ 3092 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 3093 wait_log_commit(root, log_transid - 1); 3094 3095 while (1) { 3096 int batch = atomic_read(&root->log_batch); 3097 /* when we're on an ssd, just kick the log commit out */ 3098 if (!btrfs_test_opt(fs_info, SSD) && 3099 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) { 3100 mutex_unlock(&root->log_mutex); 3101 schedule_timeout_uninterruptible(1); 3102 mutex_lock(&root->log_mutex); 3103 } 3104 wait_for_writer(root); 3105 if (batch == atomic_read(&root->log_batch)) 3106 break; 3107 } 3108 3109 /* bail out if we need to do a full commit */ 3110 if (btrfs_need_log_full_commit(trans)) { 3111 ret = -EAGAIN; 3112 mutex_unlock(&root->log_mutex); 3113 goto out; 3114 } 3115 3116 if (log_transid % 2 == 0) 3117 mark = EXTENT_DIRTY; 3118 else 3119 mark = EXTENT_NEW; 3120 3121 /* we start IO on all the marked extents here, but we don't actually 3122 * wait for them until later. 3123 */ 3124 blk_start_plug(&plug); 3125 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark); 3126 /* 3127 * -EAGAIN happens when someone, e.g., a concurrent transaction 3128 * commit, writes a dirty extent in this tree-log commit. This 3129 * concurrent write will create a hole writing out the extents, 3130 * and we cannot proceed on a zoned filesystem, requiring 3131 * sequential writing. While we can bail out to a full commit 3132 * here, but we can continue hoping the concurrent writing fills 3133 * the hole. 3134 */ 3135 if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) 3136 ret = 0; 3137 if (ret) { 3138 blk_finish_plug(&plug); 3139 btrfs_abort_transaction(trans, ret); 3140 btrfs_set_log_full_commit(trans); 3141 mutex_unlock(&root->log_mutex); 3142 goto out; 3143 } 3144 3145 /* 3146 * We _must_ update under the root->log_mutex in order to make sure we 3147 * have a consistent view of the log root we are trying to commit at 3148 * this moment. 3149 * 3150 * We _must_ copy this into a local copy, because we are not holding the 3151 * log_root_tree->log_mutex yet. This is important because when we 3152 * commit the log_root_tree we must have a consistent view of the 3153 * log_root_tree when we update the super block to point at the 3154 * log_root_tree bytenr. If we update the log_root_tree here we'll race 3155 * with the commit and possibly point at the new block which we may not 3156 * have written out. 3157 */ 3158 btrfs_set_root_node(&log->root_item, log->node); 3159 memcpy(&new_root_item, &log->root_item, sizeof(new_root_item)); 3160 3161 root->log_transid++; 3162 log->log_transid = root->log_transid; 3163 root->log_start_pid = 0; 3164 /* 3165 * IO has been started, blocks of the log tree have WRITTEN flag set 3166 * in their headers. new modifications of the log will be written to 3167 * new positions. so it's safe to allow log writers to go in. 3168 */ 3169 mutex_unlock(&root->log_mutex); 3170 3171 if (btrfs_is_zoned(fs_info)) { 3172 mutex_lock(&fs_info->tree_root->log_mutex); 3173 if (!log_root_tree->node) { 3174 ret = btrfs_alloc_log_tree_node(trans, log_root_tree); 3175 if (ret) { 3176 mutex_unlock(&fs_info->tree_root->log_mutex); 3177 goto out; 3178 } 3179 } 3180 mutex_unlock(&fs_info->tree_root->log_mutex); 3181 } 3182 3183 btrfs_init_log_ctx(&root_log_ctx, NULL); 3184 3185 mutex_lock(&log_root_tree->log_mutex); 3186 3187 index2 = log_root_tree->log_transid % 2; 3188 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); 3189 root_log_ctx.log_transid = log_root_tree->log_transid; 3190 3191 /* 3192 * Now we are safe to update the log_root_tree because we're under the 3193 * log_mutex, and we're a current writer so we're holding the commit 3194 * open until we drop the log_mutex. 3195 */ 3196 ret = update_log_root(trans, log, &new_root_item); 3197 if (ret) { 3198 if (!list_empty(&root_log_ctx.list)) 3199 list_del_init(&root_log_ctx.list); 3200 3201 blk_finish_plug(&plug); 3202 btrfs_set_log_full_commit(trans); 3203 3204 if (ret != -ENOSPC) { 3205 btrfs_abort_transaction(trans, ret); 3206 mutex_unlock(&log_root_tree->log_mutex); 3207 goto out; 3208 } 3209 btrfs_wait_tree_log_extents(log, mark); 3210 mutex_unlock(&log_root_tree->log_mutex); 3211 ret = -EAGAIN; 3212 goto out; 3213 } 3214 3215 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { 3216 blk_finish_plug(&plug); 3217 list_del_init(&root_log_ctx.list); 3218 mutex_unlock(&log_root_tree->log_mutex); 3219 ret = root_log_ctx.log_ret; 3220 goto out; 3221 } 3222 3223 index2 = root_log_ctx.log_transid % 2; 3224 if (atomic_read(&log_root_tree->log_commit[index2])) { 3225 blk_finish_plug(&plug); 3226 ret = btrfs_wait_tree_log_extents(log, mark); 3227 wait_log_commit(log_root_tree, 3228 root_log_ctx.log_transid); 3229 mutex_unlock(&log_root_tree->log_mutex); 3230 if (!ret) 3231 ret = root_log_ctx.log_ret; 3232 goto out; 3233 } 3234 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); 3235 atomic_set(&log_root_tree->log_commit[index2], 1); 3236 3237 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { 3238 wait_log_commit(log_root_tree, 3239 root_log_ctx.log_transid - 1); 3240 } 3241 3242 /* 3243 * now that we've moved on to the tree of log tree roots, 3244 * check the full commit flag again 3245 */ 3246 if (btrfs_need_log_full_commit(trans)) { 3247 blk_finish_plug(&plug); 3248 btrfs_wait_tree_log_extents(log, mark); 3249 mutex_unlock(&log_root_tree->log_mutex); 3250 ret = -EAGAIN; 3251 goto out_wake_log_root; 3252 } 3253 3254 ret = btrfs_write_marked_extents(fs_info, 3255 &log_root_tree->dirty_log_pages, 3256 EXTENT_DIRTY | EXTENT_NEW); 3257 blk_finish_plug(&plug); 3258 /* 3259 * As described above, -EAGAIN indicates a hole in the extents. We 3260 * cannot wait for these write outs since the waiting cause a 3261 * deadlock. Bail out to the full commit instead. 3262 */ 3263 if (ret == -EAGAIN && btrfs_is_zoned(fs_info)) { 3264 btrfs_set_log_full_commit(trans); 3265 btrfs_wait_tree_log_extents(log, mark); 3266 mutex_unlock(&log_root_tree->log_mutex); 3267 goto out_wake_log_root; 3268 } else if (ret) { 3269 btrfs_set_log_full_commit(trans); 3270 btrfs_abort_transaction(trans, ret); 3271 mutex_unlock(&log_root_tree->log_mutex); 3272 goto out_wake_log_root; 3273 } 3274 ret = btrfs_wait_tree_log_extents(log, mark); 3275 if (!ret) 3276 ret = btrfs_wait_tree_log_extents(log_root_tree, 3277 EXTENT_NEW | EXTENT_DIRTY); 3278 if (ret) { 3279 btrfs_set_log_full_commit(trans); 3280 mutex_unlock(&log_root_tree->log_mutex); 3281 goto out_wake_log_root; 3282 } 3283 3284 log_root_start = log_root_tree->node->start; 3285 log_root_level = btrfs_header_level(log_root_tree->node); 3286 log_root_tree->log_transid++; 3287 mutex_unlock(&log_root_tree->log_mutex); 3288 3289 /* 3290 * Here we are guaranteed that nobody is going to write the superblock 3291 * for the current transaction before us and that neither we do write 3292 * our superblock before the previous transaction finishes its commit 3293 * and writes its superblock, because: 3294 * 3295 * 1) We are holding a handle on the current transaction, so no body 3296 * can commit it until we release the handle; 3297 * 3298 * 2) Before writing our superblock we acquire the tree_log_mutex, so 3299 * if the previous transaction is still committing, and hasn't yet 3300 * written its superblock, we wait for it to do it, because a 3301 * transaction commit acquires the tree_log_mutex when the commit 3302 * begins and releases it only after writing its superblock. 3303 */ 3304 mutex_lock(&fs_info->tree_log_mutex); 3305 3306 /* 3307 * The previous transaction writeout phase could have failed, and thus 3308 * marked the fs in an error state. We must not commit here, as we 3309 * could have updated our generation in the super_for_commit and 3310 * writing the super here would result in transid mismatches. If there 3311 * is an error here just bail. 3312 */ 3313 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) { 3314 ret = -EIO; 3315 btrfs_set_log_full_commit(trans); 3316 btrfs_abort_transaction(trans, ret); 3317 mutex_unlock(&fs_info->tree_log_mutex); 3318 goto out_wake_log_root; 3319 } 3320 3321 btrfs_set_super_log_root(fs_info->super_for_commit, log_root_start); 3322 btrfs_set_super_log_root_level(fs_info->super_for_commit, log_root_level); 3323 ret = write_all_supers(fs_info, 1); 3324 mutex_unlock(&fs_info->tree_log_mutex); 3325 if (ret) { 3326 btrfs_set_log_full_commit(trans); 3327 btrfs_abort_transaction(trans, ret); 3328 goto out_wake_log_root; 3329 } 3330 3331 mutex_lock(&root->log_mutex); 3332 if (root->last_log_commit < log_transid) 3333 root->last_log_commit = log_transid; 3334 mutex_unlock(&root->log_mutex); 3335 3336 out_wake_log_root: 3337 mutex_lock(&log_root_tree->log_mutex); 3338 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); 3339 3340 log_root_tree->log_transid_committed++; 3341 atomic_set(&log_root_tree->log_commit[index2], 0); 3342 mutex_unlock(&log_root_tree->log_mutex); 3343 3344 /* 3345 * The barrier before waitqueue_active (in cond_wake_up) is needed so 3346 * all the updates above are seen by the woken threads. It might not be 3347 * necessary, but proving that seems to be hard. 3348 */ 3349 cond_wake_up(&log_root_tree->log_commit_wait[index2]); 3350 out: 3351 mutex_lock(&root->log_mutex); 3352 btrfs_remove_all_log_ctxs(root, index1, ret); 3353 root->log_transid_committed++; 3354 atomic_set(&root->log_commit[index1], 0); 3355 mutex_unlock(&root->log_mutex); 3356 3357 /* 3358 * The barrier before waitqueue_active (in cond_wake_up) is needed so 3359 * all the updates above are seen by the woken threads. It might not be 3360 * necessary, but proving that seems to be hard. 3361 */ 3362 cond_wake_up(&root->log_commit_wait[index1]); 3363 return ret; 3364 } 3365 3366 static void free_log_tree(struct btrfs_trans_handle *trans, 3367 struct btrfs_root *log) 3368 { 3369 int ret; 3370 struct walk_control wc = { 3371 .free = 1, 3372 .process_func = process_one_buffer 3373 }; 3374 3375 if (log->node) { 3376 ret = walk_log_tree(trans, log, &wc); 3377 if (ret) { 3378 if (trans) 3379 btrfs_abort_transaction(trans, ret); 3380 else 3381 btrfs_handle_fs_error(log->fs_info, ret, NULL); 3382 } 3383 } 3384 3385 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1, 3386 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT); 3387 extent_io_tree_release(&log->log_csum_range); 3388 3389 if (trans && log->node) 3390 btrfs_redirty_list_add(trans->transaction, log->node); 3391 btrfs_put_root(log); 3392 } 3393 3394 /* 3395 * free all the extents used by the tree log. This should be called 3396 * at commit time of the full transaction 3397 */ 3398 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) 3399 { 3400 if (root->log_root) { 3401 free_log_tree(trans, root->log_root); 3402 root->log_root = NULL; 3403 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &root->state); 3404 } 3405 return 0; 3406 } 3407 3408 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, 3409 struct btrfs_fs_info *fs_info) 3410 { 3411 if (fs_info->log_root_tree) { 3412 free_log_tree(trans, fs_info->log_root_tree); 3413 fs_info->log_root_tree = NULL; 3414 clear_bit(BTRFS_ROOT_HAS_LOG_TREE, &fs_info->tree_root->state); 3415 } 3416 return 0; 3417 } 3418 3419 /* 3420 * Check if an inode was logged in the current transaction. We can't always rely 3421 * on an inode's logged_trans value, because it's an in-memory only field and 3422 * therefore not persisted. This means that its value is lost if the inode gets 3423 * evicted and loaded again from disk (in which case it has a value of 0, and 3424 * certainly it is smaller then any possible transaction ID), when that happens 3425 * the full_sync flag is set in the inode's runtime flags, so on that case we 3426 * assume eviction happened and ignore the logged_trans value, assuming the 3427 * worst case, that the inode was logged before in the current transaction. 3428 */ 3429 static bool inode_logged(struct btrfs_trans_handle *trans, 3430 struct btrfs_inode *inode) 3431 { 3432 if (inode->logged_trans == trans->transid) 3433 return true; 3434 3435 if (inode->last_trans == trans->transid && 3436 test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags) && 3437 !test_bit(BTRFS_FS_LOG_RECOVERING, &trans->fs_info->flags)) 3438 return true; 3439 3440 return false; 3441 } 3442 3443 /* 3444 * If both a file and directory are logged, and unlinks or renames are 3445 * mixed in, we have a few interesting corners: 3446 * 3447 * create file X in dir Y 3448 * link file X to X.link in dir Y 3449 * fsync file X 3450 * unlink file X but leave X.link 3451 * fsync dir Y 3452 * 3453 * After a crash we would expect only X.link to exist. But file X 3454 * didn't get fsync'd again so the log has back refs for X and X.link. 3455 * 3456 * We solve this by removing directory entries and inode backrefs from the 3457 * log when a file that was logged in the current transaction is 3458 * unlinked. Any later fsync will include the updated log entries, and 3459 * we'll be able to reconstruct the proper directory items from backrefs. 3460 * 3461 * This optimizations allows us to avoid relogging the entire inode 3462 * or the entire directory. 3463 */ 3464 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, 3465 struct btrfs_root *root, 3466 const char *name, int name_len, 3467 struct btrfs_inode *dir, u64 index) 3468 { 3469 struct btrfs_root *log; 3470 struct btrfs_dir_item *di; 3471 struct btrfs_path *path; 3472 int ret; 3473 int err = 0; 3474 u64 dir_ino = btrfs_ino(dir); 3475 3476 if (!inode_logged(trans, dir)) 3477 return 0; 3478 3479 ret = join_running_log_trans(root); 3480 if (ret) 3481 return 0; 3482 3483 mutex_lock(&dir->log_mutex); 3484 3485 log = root->log_root; 3486 path = btrfs_alloc_path(); 3487 if (!path) { 3488 err = -ENOMEM; 3489 goto out_unlock; 3490 } 3491 3492 di = btrfs_lookup_dir_item(trans, log, path, dir_ino, 3493 name, name_len, -1); 3494 if (IS_ERR(di)) { 3495 err = PTR_ERR(di); 3496 goto fail; 3497 } 3498 if (di) { 3499 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3500 if (ret) { 3501 err = ret; 3502 goto fail; 3503 } 3504 } 3505 btrfs_release_path(path); 3506 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, 3507 index, name, name_len, -1); 3508 if (IS_ERR(di)) { 3509 err = PTR_ERR(di); 3510 goto fail; 3511 } 3512 if (di) { 3513 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3514 if (ret) { 3515 err = ret; 3516 goto fail; 3517 } 3518 } 3519 3520 /* 3521 * We do not need to update the size field of the directory's inode item 3522 * because on log replay we update the field to reflect all existing 3523 * entries in the directory (see overwrite_item()). 3524 */ 3525 fail: 3526 btrfs_free_path(path); 3527 out_unlock: 3528 mutex_unlock(&dir->log_mutex); 3529 if (err == -ENOSPC) { 3530 btrfs_set_log_full_commit(trans); 3531 err = 0; 3532 } else if (err < 0 && err != -ENOENT) { 3533 /* ENOENT can be returned if the entry hasn't been fsynced yet */ 3534 btrfs_abort_transaction(trans, err); 3535 } 3536 3537 btrfs_end_log_trans(root); 3538 3539 return err; 3540 } 3541 3542 /* see comments for btrfs_del_dir_entries_in_log */ 3543 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, 3544 struct btrfs_root *root, 3545 const char *name, int name_len, 3546 struct btrfs_inode *inode, u64 dirid) 3547 { 3548 struct btrfs_root *log; 3549 u64 index; 3550 int ret; 3551 3552 if (!inode_logged(trans, inode)) 3553 return 0; 3554 3555 ret = join_running_log_trans(root); 3556 if (ret) 3557 return 0; 3558 log = root->log_root; 3559 mutex_lock(&inode->log_mutex); 3560 3561 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), 3562 dirid, &index); 3563 mutex_unlock(&inode->log_mutex); 3564 if (ret == -ENOSPC) { 3565 btrfs_set_log_full_commit(trans); 3566 ret = 0; 3567 } else if (ret < 0 && ret != -ENOENT) 3568 btrfs_abort_transaction(trans, ret); 3569 btrfs_end_log_trans(root); 3570 3571 return ret; 3572 } 3573 3574 /* 3575 * creates a range item in the log for 'dirid'. first_offset and 3576 * last_offset tell us which parts of the key space the log should 3577 * be considered authoritative for. 3578 */ 3579 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, 3580 struct btrfs_root *log, 3581 struct btrfs_path *path, 3582 int key_type, u64 dirid, 3583 u64 first_offset, u64 last_offset) 3584 { 3585 int ret; 3586 struct btrfs_key key; 3587 struct btrfs_dir_log_item *item; 3588 3589 key.objectid = dirid; 3590 key.offset = first_offset; 3591 if (key_type == BTRFS_DIR_ITEM_KEY) 3592 key.type = BTRFS_DIR_LOG_ITEM_KEY; 3593 else 3594 key.type = BTRFS_DIR_LOG_INDEX_KEY; 3595 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 3596 if (ret) 3597 return ret; 3598 3599 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3600 struct btrfs_dir_log_item); 3601 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 3602 btrfs_mark_buffer_dirty(path->nodes[0]); 3603 btrfs_release_path(path); 3604 return 0; 3605 } 3606 3607 /* 3608 * log all the items included in the current transaction for a given 3609 * directory. This also creates the range items in the log tree required 3610 * to replay anything deleted before the fsync 3611 */ 3612 static noinline int log_dir_items(struct btrfs_trans_handle *trans, 3613 struct btrfs_root *root, struct btrfs_inode *inode, 3614 struct btrfs_path *path, 3615 struct btrfs_path *dst_path, int key_type, 3616 struct btrfs_log_ctx *ctx, 3617 u64 min_offset, u64 *last_offset_ret) 3618 { 3619 struct btrfs_key min_key; 3620 struct btrfs_root *log = root->log_root; 3621 struct extent_buffer *src; 3622 int err = 0; 3623 int ret; 3624 int i; 3625 int nritems; 3626 u64 first_offset = min_offset; 3627 u64 last_offset = (u64)-1; 3628 u64 ino = btrfs_ino(inode); 3629 3630 log = root->log_root; 3631 3632 min_key.objectid = ino; 3633 min_key.type = key_type; 3634 min_key.offset = min_offset; 3635 3636 ret = btrfs_search_forward(root, &min_key, path, trans->transid); 3637 3638 /* 3639 * we didn't find anything from this transaction, see if there 3640 * is anything at all 3641 */ 3642 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { 3643 min_key.objectid = ino; 3644 min_key.type = key_type; 3645 min_key.offset = (u64)-1; 3646 btrfs_release_path(path); 3647 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3648 if (ret < 0) { 3649 btrfs_release_path(path); 3650 return ret; 3651 } 3652 ret = btrfs_previous_item(root, path, ino, key_type); 3653 3654 /* if ret == 0 there are items for this type, 3655 * create a range to tell us the last key of this type. 3656 * otherwise, there are no items in this directory after 3657 * *min_offset, and we create a range to indicate that. 3658 */ 3659 if (ret == 0) { 3660 struct btrfs_key tmp; 3661 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 3662 path->slots[0]); 3663 if (key_type == tmp.type) 3664 first_offset = max(min_offset, tmp.offset) + 1; 3665 } 3666 goto done; 3667 } 3668 3669 /* go backward to find any previous key */ 3670 ret = btrfs_previous_item(root, path, ino, key_type); 3671 if (ret == 0) { 3672 struct btrfs_key tmp; 3673 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3674 if (key_type == tmp.type) { 3675 first_offset = tmp.offset; 3676 ret = overwrite_item(trans, log, dst_path, 3677 path->nodes[0], path->slots[0], 3678 &tmp); 3679 if (ret) { 3680 err = ret; 3681 goto done; 3682 } 3683 } 3684 } 3685 btrfs_release_path(path); 3686 3687 /* 3688 * Find the first key from this transaction again. See the note for 3689 * log_new_dir_dentries, if we're logging a directory recursively we 3690 * won't be holding its i_mutex, which means we can modify the directory 3691 * while we're logging it. If we remove an entry between our first 3692 * search and this search we'll not find the key again and can just 3693 * bail. 3694 */ 3695 search: 3696 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3697 if (ret != 0) 3698 goto done; 3699 3700 /* 3701 * we have a block from this transaction, log every item in it 3702 * from our directory 3703 */ 3704 while (1) { 3705 struct btrfs_key tmp; 3706 src = path->nodes[0]; 3707 nritems = btrfs_header_nritems(src); 3708 for (i = path->slots[0]; i < nritems; i++) { 3709 struct btrfs_dir_item *di; 3710 3711 btrfs_item_key_to_cpu(src, &min_key, i); 3712 3713 if (min_key.objectid != ino || min_key.type != key_type) 3714 goto done; 3715 3716 if (need_resched()) { 3717 btrfs_release_path(path); 3718 cond_resched(); 3719 goto search; 3720 } 3721 3722 ret = overwrite_item(trans, log, dst_path, src, i, 3723 &min_key); 3724 if (ret) { 3725 err = ret; 3726 goto done; 3727 } 3728 3729 /* 3730 * We must make sure that when we log a directory entry, 3731 * the corresponding inode, after log replay, has a 3732 * matching link count. For example: 3733 * 3734 * touch foo 3735 * mkdir mydir 3736 * sync 3737 * ln foo mydir/bar 3738 * xfs_io -c "fsync" mydir 3739 * <crash> 3740 * <mount fs and log replay> 3741 * 3742 * Would result in a fsync log that when replayed, our 3743 * file inode would have a link count of 1, but we get 3744 * two directory entries pointing to the same inode. 3745 * After removing one of the names, it would not be 3746 * possible to remove the other name, which resulted 3747 * always in stale file handle errors, and would not 3748 * be possible to rmdir the parent directory, since 3749 * its i_size could never decrement to the value 3750 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors. 3751 */ 3752 di = btrfs_item_ptr(src, i, struct btrfs_dir_item); 3753 btrfs_dir_item_key_to_cpu(src, di, &tmp); 3754 if (ctx && 3755 (btrfs_dir_transid(src, di) == trans->transid || 3756 btrfs_dir_type(src, di) == BTRFS_FT_DIR) && 3757 tmp.type != BTRFS_ROOT_ITEM_KEY) 3758 ctx->log_new_dentries = true; 3759 } 3760 path->slots[0] = nritems; 3761 3762 /* 3763 * look ahead to the next item and see if it is also 3764 * from this directory and from this transaction 3765 */ 3766 ret = btrfs_next_leaf(root, path); 3767 if (ret) { 3768 if (ret == 1) 3769 last_offset = (u64)-1; 3770 else 3771 err = ret; 3772 goto done; 3773 } 3774 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3775 if (tmp.objectid != ino || tmp.type != key_type) { 3776 last_offset = (u64)-1; 3777 goto done; 3778 } 3779 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 3780 ret = overwrite_item(trans, log, dst_path, 3781 path->nodes[0], path->slots[0], 3782 &tmp); 3783 if (ret) 3784 err = ret; 3785 else 3786 last_offset = tmp.offset; 3787 goto done; 3788 } 3789 } 3790 done: 3791 btrfs_release_path(path); 3792 btrfs_release_path(dst_path); 3793 3794 if (err == 0) { 3795 *last_offset_ret = last_offset; 3796 /* 3797 * insert the log range keys to indicate where the log 3798 * is valid 3799 */ 3800 ret = insert_dir_log_key(trans, log, path, key_type, 3801 ino, first_offset, last_offset); 3802 if (ret) 3803 err = ret; 3804 } 3805 return err; 3806 } 3807 3808 /* 3809 * logging directories is very similar to logging inodes, We find all the items 3810 * from the current transaction and write them to the log. 3811 * 3812 * The recovery code scans the directory in the subvolume, and if it finds a 3813 * key in the range logged that is not present in the log tree, then it means 3814 * that dir entry was unlinked during the transaction. 3815 * 3816 * In order for that scan to work, we must include one key smaller than 3817 * the smallest logged by this transaction and one key larger than the largest 3818 * key logged by this transaction. 3819 */ 3820 static noinline int log_directory_changes(struct btrfs_trans_handle *trans, 3821 struct btrfs_root *root, struct btrfs_inode *inode, 3822 struct btrfs_path *path, 3823 struct btrfs_path *dst_path, 3824 struct btrfs_log_ctx *ctx) 3825 { 3826 u64 min_key; 3827 u64 max_key; 3828 int ret; 3829 int key_type = BTRFS_DIR_ITEM_KEY; 3830 3831 again: 3832 min_key = 0; 3833 max_key = 0; 3834 while (1) { 3835 ret = log_dir_items(trans, root, inode, path, dst_path, key_type, 3836 ctx, min_key, &max_key); 3837 if (ret) 3838 return ret; 3839 if (max_key == (u64)-1) 3840 break; 3841 min_key = max_key + 1; 3842 } 3843 3844 if (key_type == BTRFS_DIR_ITEM_KEY) { 3845 key_type = BTRFS_DIR_INDEX_KEY; 3846 goto again; 3847 } 3848 return 0; 3849 } 3850 3851 /* 3852 * a helper function to drop items from the log before we relog an 3853 * inode. max_key_type indicates the highest item type to remove. 3854 * This cannot be run for file data extents because it does not 3855 * free the extents they point to. 3856 */ 3857 static int drop_objectid_items(struct btrfs_trans_handle *trans, 3858 struct btrfs_root *log, 3859 struct btrfs_path *path, 3860 u64 objectid, int max_key_type) 3861 { 3862 int ret; 3863 struct btrfs_key key; 3864 struct btrfs_key found_key; 3865 int start_slot; 3866 3867 key.objectid = objectid; 3868 key.type = max_key_type; 3869 key.offset = (u64)-1; 3870 3871 while (1) { 3872 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 3873 BUG_ON(ret == 0); /* Logic error */ 3874 if (ret < 0) 3875 break; 3876 3877 if (path->slots[0] == 0) 3878 break; 3879 3880 path->slots[0]--; 3881 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 3882 path->slots[0]); 3883 3884 if (found_key.objectid != objectid) 3885 break; 3886 3887 found_key.offset = 0; 3888 found_key.type = 0; 3889 ret = btrfs_bin_search(path->nodes[0], &found_key, &start_slot); 3890 if (ret < 0) 3891 break; 3892 3893 ret = btrfs_del_items(trans, log, path, start_slot, 3894 path->slots[0] - start_slot + 1); 3895 /* 3896 * If start slot isn't 0 then we don't need to re-search, we've 3897 * found the last guy with the objectid in this tree. 3898 */ 3899 if (ret || start_slot != 0) 3900 break; 3901 btrfs_release_path(path); 3902 } 3903 btrfs_release_path(path); 3904 if (ret > 0) 3905 ret = 0; 3906 return ret; 3907 } 3908 3909 static void fill_inode_item(struct btrfs_trans_handle *trans, 3910 struct extent_buffer *leaf, 3911 struct btrfs_inode_item *item, 3912 struct inode *inode, int log_inode_only, 3913 u64 logged_isize) 3914 { 3915 struct btrfs_map_token token; 3916 3917 btrfs_init_map_token(&token, leaf); 3918 3919 if (log_inode_only) { 3920 /* set the generation to zero so the recover code 3921 * can tell the difference between an logging 3922 * just to say 'this inode exists' and a logging 3923 * to say 'update this inode with these values' 3924 */ 3925 btrfs_set_token_inode_generation(&token, item, 0); 3926 btrfs_set_token_inode_size(&token, item, logged_isize); 3927 } else { 3928 btrfs_set_token_inode_generation(&token, item, 3929 BTRFS_I(inode)->generation); 3930 btrfs_set_token_inode_size(&token, item, inode->i_size); 3931 } 3932 3933 btrfs_set_token_inode_uid(&token, item, i_uid_read(inode)); 3934 btrfs_set_token_inode_gid(&token, item, i_gid_read(inode)); 3935 btrfs_set_token_inode_mode(&token, item, inode->i_mode); 3936 btrfs_set_token_inode_nlink(&token, item, inode->i_nlink); 3937 3938 btrfs_set_token_timespec_sec(&token, &item->atime, 3939 inode->i_atime.tv_sec); 3940 btrfs_set_token_timespec_nsec(&token, &item->atime, 3941 inode->i_atime.tv_nsec); 3942 3943 btrfs_set_token_timespec_sec(&token, &item->mtime, 3944 inode->i_mtime.tv_sec); 3945 btrfs_set_token_timespec_nsec(&token, &item->mtime, 3946 inode->i_mtime.tv_nsec); 3947 3948 btrfs_set_token_timespec_sec(&token, &item->ctime, 3949 inode->i_ctime.tv_sec); 3950 btrfs_set_token_timespec_nsec(&token, &item->ctime, 3951 inode->i_ctime.tv_nsec); 3952 3953 /* 3954 * We do not need to set the nbytes field, in fact during a fast fsync 3955 * its value may not even be correct, since a fast fsync does not wait 3956 * for ordered extent completion, which is where we update nbytes, it 3957 * only waits for writeback to complete. During log replay as we find 3958 * file extent items and replay them, we adjust the nbytes field of the 3959 * inode item in subvolume tree as needed (see overwrite_item()). 3960 */ 3961 3962 btrfs_set_token_inode_sequence(&token, item, inode_peek_iversion(inode)); 3963 btrfs_set_token_inode_transid(&token, item, trans->transid); 3964 btrfs_set_token_inode_rdev(&token, item, inode->i_rdev); 3965 btrfs_set_token_inode_flags(&token, item, BTRFS_I(inode)->flags); 3966 btrfs_set_token_inode_block_group(&token, item, 0); 3967 } 3968 3969 static int log_inode_item(struct btrfs_trans_handle *trans, 3970 struct btrfs_root *log, struct btrfs_path *path, 3971 struct btrfs_inode *inode) 3972 { 3973 struct btrfs_inode_item *inode_item; 3974 int ret; 3975 3976 ret = btrfs_insert_empty_item(trans, log, path, 3977 &inode->location, sizeof(*inode_item)); 3978 if (ret && ret != -EEXIST) 3979 return ret; 3980 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3981 struct btrfs_inode_item); 3982 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode, 3983 0, 0); 3984 btrfs_release_path(path); 3985 return 0; 3986 } 3987 3988 static int log_csums(struct btrfs_trans_handle *trans, 3989 struct btrfs_inode *inode, 3990 struct btrfs_root *log_root, 3991 struct btrfs_ordered_sum *sums) 3992 { 3993 const u64 lock_end = sums->bytenr + sums->len - 1; 3994 struct extent_state *cached_state = NULL; 3995 int ret; 3996 3997 /* 3998 * If this inode was not used for reflink operations in the current 3999 * transaction with new extents, then do the fast path, no need to 4000 * worry about logging checksum items with overlapping ranges. 4001 */ 4002 if (inode->last_reflink_trans < trans->transid) 4003 return btrfs_csum_file_blocks(trans, log_root, sums); 4004 4005 /* 4006 * Serialize logging for checksums. This is to avoid racing with the 4007 * same checksum being logged by another task that is logging another 4008 * file which happens to refer to the same extent as well. Such races 4009 * can leave checksum items in the log with overlapping ranges. 4010 */ 4011 ret = lock_extent_bits(&log_root->log_csum_range, sums->bytenr, 4012 lock_end, &cached_state); 4013 if (ret) 4014 return ret; 4015 /* 4016 * Due to extent cloning, we might have logged a csum item that covers a 4017 * subrange of a cloned extent, and later we can end up logging a csum 4018 * item for a larger subrange of the same extent or the entire range. 4019 * This would leave csum items in the log tree that cover the same range 4020 * and break the searches for checksums in the log tree, resulting in 4021 * some checksums missing in the fs/subvolume tree. So just delete (or 4022 * trim and adjust) any existing csum items in the log for this range. 4023 */ 4024 ret = btrfs_del_csums(trans, log_root, sums->bytenr, sums->len); 4025 if (!ret) 4026 ret = btrfs_csum_file_blocks(trans, log_root, sums); 4027 4028 unlock_extent_cached(&log_root->log_csum_range, sums->bytenr, lock_end, 4029 &cached_state); 4030 4031 return ret; 4032 } 4033 4034 static noinline int copy_items(struct btrfs_trans_handle *trans, 4035 struct btrfs_inode *inode, 4036 struct btrfs_path *dst_path, 4037 struct btrfs_path *src_path, 4038 int start_slot, int nr, int inode_only, 4039 u64 logged_isize) 4040 { 4041 struct btrfs_fs_info *fs_info = trans->fs_info; 4042 unsigned long src_offset; 4043 unsigned long dst_offset; 4044 struct btrfs_root *log = inode->root->log_root; 4045 struct btrfs_file_extent_item *extent; 4046 struct btrfs_inode_item *inode_item; 4047 struct extent_buffer *src = src_path->nodes[0]; 4048 int ret; 4049 struct btrfs_key *ins_keys; 4050 u32 *ins_sizes; 4051 char *ins_data; 4052 int i; 4053 struct list_head ordered_sums; 4054 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM; 4055 4056 INIT_LIST_HEAD(&ordered_sums); 4057 4058 ins_data = kmalloc(nr * sizeof(struct btrfs_key) + 4059 nr * sizeof(u32), GFP_NOFS); 4060 if (!ins_data) 4061 return -ENOMEM; 4062 4063 ins_sizes = (u32 *)ins_data; 4064 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); 4065 4066 for (i = 0; i < nr; i++) { 4067 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); 4068 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); 4069 } 4070 ret = btrfs_insert_empty_items(trans, log, dst_path, 4071 ins_keys, ins_sizes, nr); 4072 if (ret) { 4073 kfree(ins_data); 4074 return ret; 4075 } 4076 4077 for (i = 0; i < nr; i++, dst_path->slots[0]++) { 4078 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], 4079 dst_path->slots[0]); 4080 4081 src_offset = btrfs_item_ptr_offset(src, start_slot + i); 4082 4083 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { 4084 inode_item = btrfs_item_ptr(dst_path->nodes[0], 4085 dst_path->slots[0], 4086 struct btrfs_inode_item); 4087 fill_inode_item(trans, dst_path->nodes[0], inode_item, 4088 &inode->vfs_inode, 4089 inode_only == LOG_INODE_EXISTS, 4090 logged_isize); 4091 } else { 4092 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 4093 src_offset, ins_sizes[i]); 4094 } 4095 4096 /* take a reference on file data extents so that truncates 4097 * or deletes of this inode don't have to relog the inode 4098 * again 4099 */ 4100 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY && 4101 !skip_csum) { 4102 int found_type; 4103 extent = btrfs_item_ptr(src, start_slot + i, 4104 struct btrfs_file_extent_item); 4105 4106 if (btrfs_file_extent_generation(src, extent) < trans->transid) 4107 continue; 4108 4109 found_type = btrfs_file_extent_type(src, extent); 4110 if (found_type == BTRFS_FILE_EXTENT_REG) { 4111 u64 ds, dl, cs, cl; 4112 ds = btrfs_file_extent_disk_bytenr(src, 4113 extent); 4114 /* ds == 0 is a hole */ 4115 if (ds == 0) 4116 continue; 4117 4118 dl = btrfs_file_extent_disk_num_bytes(src, 4119 extent); 4120 cs = btrfs_file_extent_offset(src, extent); 4121 cl = btrfs_file_extent_num_bytes(src, 4122 extent); 4123 if (btrfs_file_extent_compression(src, 4124 extent)) { 4125 cs = 0; 4126 cl = dl; 4127 } 4128 4129 ret = btrfs_lookup_csums_range( 4130 fs_info->csum_root, 4131 ds + cs, ds + cs + cl - 1, 4132 &ordered_sums, 0); 4133 if (ret) 4134 break; 4135 } 4136 } 4137 } 4138 4139 btrfs_mark_buffer_dirty(dst_path->nodes[0]); 4140 btrfs_release_path(dst_path); 4141 kfree(ins_data); 4142 4143 /* 4144 * we have to do this after the loop above to avoid changing the 4145 * log tree while trying to change the log tree. 4146 */ 4147 while (!list_empty(&ordered_sums)) { 4148 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 4149 struct btrfs_ordered_sum, 4150 list); 4151 if (!ret) 4152 ret = log_csums(trans, inode, log, sums); 4153 list_del(&sums->list); 4154 kfree(sums); 4155 } 4156 4157 return ret; 4158 } 4159 4160 static int extent_cmp(void *priv, const struct list_head *a, 4161 const struct list_head *b) 4162 { 4163 struct extent_map *em1, *em2; 4164 4165 em1 = list_entry(a, struct extent_map, list); 4166 em2 = list_entry(b, struct extent_map, list); 4167 4168 if (em1->start < em2->start) 4169 return -1; 4170 else if (em1->start > em2->start) 4171 return 1; 4172 return 0; 4173 } 4174 4175 static int log_extent_csums(struct btrfs_trans_handle *trans, 4176 struct btrfs_inode *inode, 4177 struct btrfs_root *log_root, 4178 const struct extent_map *em, 4179 struct btrfs_log_ctx *ctx) 4180 { 4181 struct btrfs_ordered_extent *ordered; 4182 u64 csum_offset; 4183 u64 csum_len; 4184 u64 mod_start = em->mod_start; 4185 u64 mod_len = em->mod_len; 4186 LIST_HEAD(ordered_sums); 4187 int ret = 0; 4188 4189 if (inode->flags & BTRFS_INODE_NODATASUM || 4190 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 4191 em->block_start == EXTENT_MAP_HOLE) 4192 return 0; 4193 4194 list_for_each_entry(ordered, &ctx->ordered_extents, log_list) { 4195 const u64 ordered_end = ordered->file_offset + ordered->num_bytes; 4196 const u64 mod_end = mod_start + mod_len; 4197 struct btrfs_ordered_sum *sums; 4198 4199 if (mod_len == 0) 4200 break; 4201 4202 if (ordered_end <= mod_start) 4203 continue; 4204 if (mod_end <= ordered->file_offset) 4205 break; 4206 4207 /* 4208 * We are going to copy all the csums on this ordered extent, so 4209 * go ahead and adjust mod_start and mod_len in case this ordered 4210 * extent has already been logged. 4211 */ 4212 if (ordered->file_offset > mod_start) { 4213 if (ordered_end >= mod_end) 4214 mod_len = ordered->file_offset - mod_start; 4215 /* 4216 * If we have this case 4217 * 4218 * |--------- logged extent ---------| 4219 * |----- ordered extent ----| 4220 * 4221 * Just don't mess with mod_start and mod_len, we'll 4222 * just end up logging more csums than we need and it 4223 * will be ok. 4224 */ 4225 } else { 4226 if (ordered_end < mod_end) { 4227 mod_len = mod_end - ordered_end; 4228 mod_start = ordered_end; 4229 } else { 4230 mod_len = 0; 4231 } 4232 } 4233 4234 /* 4235 * To keep us from looping for the above case of an ordered 4236 * extent that falls inside of the logged extent. 4237 */ 4238 if (test_and_set_bit(BTRFS_ORDERED_LOGGED_CSUM, &ordered->flags)) 4239 continue; 4240 4241 list_for_each_entry(sums, &ordered->list, list) { 4242 ret = log_csums(trans, inode, log_root, sums); 4243 if (ret) 4244 return ret; 4245 } 4246 } 4247 4248 /* We're done, found all csums in the ordered extents. */ 4249 if (mod_len == 0) 4250 return 0; 4251 4252 /* If we're compressed we have to save the entire range of csums. */ 4253 if (em->compress_type) { 4254 csum_offset = 0; 4255 csum_len = max(em->block_len, em->orig_block_len); 4256 } else { 4257 csum_offset = mod_start - em->start; 4258 csum_len = mod_len; 4259 } 4260 4261 /* block start is already adjusted for the file extent offset. */ 4262 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root, 4263 em->block_start + csum_offset, 4264 em->block_start + csum_offset + 4265 csum_len - 1, &ordered_sums, 0); 4266 if (ret) 4267 return ret; 4268 4269 while (!list_empty(&ordered_sums)) { 4270 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 4271 struct btrfs_ordered_sum, 4272 list); 4273 if (!ret) 4274 ret = log_csums(trans, inode, log_root, sums); 4275 list_del(&sums->list); 4276 kfree(sums); 4277 } 4278 4279 return ret; 4280 } 4281 4282 static int log_one_extent(struct btrfs_trans_handle *trans, 4283 struct btrfs_inode *inode, struct btrfs_root *root, 4284 const struct extent_map *em, 4285 struct btrfs_path *path, 4286 struct btrfs_log_ctx *ctx) 4287 { 4288 struct btrfs_drop_extents_args drop_args = { 0 }; 4289 struct btrfs_root *log = root->log_root; 4290 struct btrfs_file_extent_item *fi; 4291 struct extent_buffer *leaf; 4292 struct btrfs_map_token token; 4293 struct btrfs_key key; 4294 u64 extent_offset = em->start - em->orig_start; 4295 u64 block_len; 4296 int ret; 4297 4298 ret = log_extent_csums(trans, inode, log, em, ctx); 4299 if (ret) 4300 return ret; 4301 4302 drop_args.path = path; 4303 drop_args.start = em->start; 4304 drop_args.end = em->start + em->len; 4305 drop_args.replace_extent = true; 4306 drop_args.extent_item_size = sizeof(*fi); 4307 ret = btrfs_drop_extents(trans, log, inode, &drop_args); 4308 if (ret) 4309 return ret; 4310 4311 if (!drop_args.extent_inserted) { 4312 key.objectid = btrfs_ino(inode); 4313 key.type = BTRFS_EXTENT_DATA_KEY; 4314 key.offset = em->start; 4315 4316 ret = btrfs_insert_empty_item(trans, log, path, &key, 4317 sizeof(*fi)); 4318 if (ret) 4319 return ret; 4320 } 4321 leaf = path->nodes[0]; 4322 btrfs_init_map_token(&token, leaf); 4323 fi = btrfs_item_ptr(leaf, path->slots[0], 4324 struct btrfs_file_extent_item); 4325 4326 btrfs_set_token_file_extent_generation(&token, fi, trans->transid); 4327 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 4328 btrfs_set_token_file_extent_type(&token, fi, 4329 BTRFS_FILE_EXTENT_PREALLOC); 4330 else 4331 btrfs_set_token_file_extent_type(&token, fi, 4332 BTRFS_FILE_EXTENT_REG); 4333 4334 block_len = max(em->block_len, em->orig_block_len); 4335 if (em->compress_type != BTRFS_COMPRESS_NONE) { 4336 btrfs_set_token_file_extent_disk_bytenr(&token, fi, 4337 em->block_start); 4338 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len); 4339 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { 4340 btrfs_set_token_file_extent_disk_bytenr(&token, fi, 4341 em->block_start - 4342 extent_offset); 4343 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, block_len); 4344 } else { 4345 btrfs_set_token_file_extent_disk_bytenr(&token, fi, 0); 4346 btrfs_set_token_file_extent_disk_num_bytes(&token, fi, 0); 4347 } 4348 4349 btrfs_set_token_file_extent_offset(&token, fi, extent_offset); 4350 btrfs_set_token_file_extent_num_bytes(&token, fi, em->len); 4351 btrfs_set_token_file_extent_ram_bytes(&token, fi, em->ram_bytes); 4352 btrfs_set_token_file_extent_compression(&token, fi, em->compress_type); 4353 btrfs_set_token_file_extent_encryption(&token, fi, 0); 4354 btrfs_set_token_file_extent_other_encoding(&token, fi, 0); 4355 btrfs_mark_buffer_dirty(leaf); 4356 4357 btrfs_release_path(path); 4358 4359 return ret; 4360 } 4361 4362 /* 4363 * Log all prealloc extents beyond the inode's i_size to make sure we do not 4364 * lose them after doing a fast fsync and replaying the log. We scan the 4365 * subvolume's root instead of iterating the inode's extent map tree because 4366 * otherwise we can log incorrect extent items based on extent map conversion. 4367 * That can happen due to the fact that extent maps are merged when they 4368 * are not in the extent map tree's list of modified extents. 4369 */ 4370 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, 4371 struct btrfs_inode *inode, 4372 struct btrfs_path *path) 4373 { 4374 struct btrfs_root *root = inode->root; 4375 struct btrfs_key key; 4376 const u64 i_size = i_size_read(&inode->vfs_inode); 4377 const u64 ino = btrfs_ino(inode); 4378 struct btrfs_path *dst_path = NULL; 4379 bool dropped_extents = false; 4380 u64 truncate_offset = i_size; 4381 struct extent_buffer *leaf; 4382 int slot; 4383 int ins_nr = 0; 4384 int start_slot; 4385 int ret; 4386 4387 if (!(inode->flags & BTRFS_INODE_PREALLOC)) 4388 return 0; 4389 4390 key.objectid = ino; 4391 key.type = BTRFS_EXTENT_DATA_KEY; 4392 key.offset = i_size; 4393 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4394 if (ret < 0) 4395 goto out; 4396 4397 /* 4398 * We must check if there is a prealloc extent that starts before the 4399 * i_size and crosses the i_size boundary. This is to ensure later we 4400 * truncate down to the end of that extent and not to the i_size, as 4401 * otherwise we end up losing part of the prealloc extent after a log 4402 * replay and with an implicit hole if there is another prealloc extent 4403 * that starts at an offset beyond i_size. 4404 */ 4405 ret = btrfs_previous_item(root, path, ino, BTRFS_EXTENT_DATA_KEY); 4406 if (ret < 0) 4407 goto out; 4408 4409 if (ret == 0) { 4410 struct btrfs_file_extent_item *ei; 4411 4412 leaf = path->nodes[0]; 4413 slot = path->slots[0]; 4414 ei = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item); 4415 4416 if (btrfs_file_extent_type(leaf, ei) == 4417 BTRFS_FILE_EXTENT_PREALLOC) { 4418 u64 extent_end; 4419 4420 btrfs_item_key_to_cpu(leaf, &key, slot); 4421 extent_end = key.offset + 4422 btrfs_file_extent_num_bytes(leaf, ei); 4423 4424 if (extent_end > i_size) 4425 truncate_offset = extent_end; 4426 } 4427 } else { 4428 ret = 0; 4429 } 4430 4431 while (true) { 4432 leaf = path->nodes[0]; 4433 slot = path->slots[0]; 4434 4435 if (slot >= btrfs_header_nritems(leaf)) { 4436 if (ins_nr > 0) { 4437 ret = copy_items(trans, inode, dst_path, path, 4438 start_slot, ins_nr, 1, 0); 4439 if (ret < 0) 4440 goto out; 4441 ins_nr = 0; 4442 } 4443 ret = btrfs_next_leaf(root, path); 4444 if (ret < 0) 4445 goto out; 4446 if (ret > 0) { 4447 ret = 0; 4448 break; 4449 } 4450 continue; 4451 } 4452 4453 btrfs_item_key_to_cpu(leaf, &key, slot); 4454 if (key.objectid > ino) 4455 break; 4456 if (WARN_ON_ONCE(key.objectid < ino) || 4457 key.type < BTRFS_EXTENT_DATA_KEY || 4458 key.offset < i_size) { 4459 path->slots[0]++; 4460 continue; 4461 } 4462 if (!dropped_extents) { 4463 /* 4464 * Avoid logging extent items logged in past fsync calls 4465 * and leading to duplicate keys in the log tree. 4466 */ 4467 do { 4468 ret = btrfs_truncate_inode_items(trans, 4469 root->log_root, 4470 inode, truncate_offset, 4471 BTRFS_EXTENT_DATA_KEY, 4472 NULL); 4473 } while (ret == -EAGAIN); 4474 if (ret) 4475 goto out; 4476 dropped_extents = true; 4477 } 4478 if (ins_nr == 0) 4479 start_slot = slot; 4480 ins_nr++; 4481 path->slots[0]++; 4482 if (!dst_path) { 4483 dst_path = btrfs_alloc_path(); 4484 if (!dst_path) { 4485 ret = -ENOMEM; 4486 goto out; 4487 } 4488 } 4489 } 4490 if (ins_nr > 0) 4491 ret = copy_items(trans, inode, dst_path, path, 4492 start_slot, ins_nr, 1, 0); 4493 out: 4494 btrfs_release_path(path); 4495 btrfs_free_path(dst_path); 4496 return ret; 4497 } 4498 4499 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, 4500 struct btrfs_root *root, 4501 struct btrfs_inode *inode, 4502 struct btrfs_path *path, 4503 struct btrfs_log_ctx *ctx) 4504 { 4505 struct btrfs_ordered_extent *ordered; 4506 struct btrfs_ordered_extent *tmp; 4507 struct extent_map *em, *n; 4508 struct list_head extents; 4509 struct extent_map_tree *tree = &inode->extent_tree; 4510 int ret = 0; 4511 int num = 0; 4512 4513 INIT_LIST_HEAD(&extents); 4514 4515 write_lock(&tree->lock); 4516 4517 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { 4518 list_del_init(&em->list); 4519 /* 4520 * Just an arbitrary number, this can be really CPU intensive 4521 * once we start getting a lot of extents, and really once we 4522 * have a bunch of extents we just want to commit since it will 4523 * be faster. 4524 */ 4525 if (++num > 32768) { 4526 list_del_init(&tree->modified_extents); 4527 ret = -EFBIG; 4528 goto process; 4529 } 4530 4531 if (em->generation < trans->transid) 4532 continue; 4533 4534 /* We log prealloc extents beyond eof later. */ 4535 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && 4536 em->start >= i_size_read(&inode->vfs_inode)) 4537 continue; 4538 4539 /* Need a ref to keep it from getting evicted from cache */ 4540 refcount_inc(&em->refs); 4541 set_bit(EXTENT_FLAG_LOGGING, &em->flags); 4542 list_add_tail(&em->list, &extents); 4543 num++; 4544 } 4545 4546 list_sort(NULL, &extents, extent_cmp); 4547 process: 4548 while (!list_empty(&extents)) { 4549 em = list_entry(extents.next, struct extent_map, list); 4550 4551 list_del_init(&em->list); 4552 4553 /* 4554 * If we had an error we just need to delete everybody from our 4555 * private list. 4556 */ 4557 if (ret) { 4558 clear_em_logging(tree, em); 4559 free_extent_map(em); 4560 continue; 4561 } 4562 4563 write_unlock(&tree->lock); 4564 4565 ret = log_one_extent(trans, inode, root, em, path, ctx); 4566 write_lock(&tree->lock); 4567 clear_em_logging(tree, em); 4568 free_extent_map(em); 4569 } 4570 WARN_ON(!list_empty(&extents)); 4571 write_unlock(&tree->lock); 4572 4573 btrfs_release_path(path); 4574 if (!ret) 4575 ret = btrfs_log_prealloc_extents(trans, inode, path); 4576 if (ret) 4577 return ret; 4578 4579 /* 4580 * We have logged all extents successfully, now make sure the commit of 4581 * the current transaction waits for the ordered extents to complete 4582 * before it commits and wipes out the log trees, otherwise we would 4583 * lose data if an ordered extents completes after the transaction 4584 * commits and a power failure happens after the transaction commit. 4585 */ 4586 list_for_each_entry_safe(ordered, tmp, &ctx->ordered_extents, log_list) { 4587 list_del_init(&ordered->log_list); 4588 set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags); 4589 4590 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { 4591 spin_lock_irq(&inode->ordered_tree.lock); 4592 if (!test_bit(BTRFS_ORDERED_COMPLETE, &ordered->flags)) { 4593 set_bit(BTRFS_ORDERED_PENDING, &ordered->flags); 4594 atomic_inc(&trans->transaction->pending_ordered); 4595 } 4596 spin_unlock_irq(&inode->ordered_tree.lock); 4597 } 4598 btrfs_put_ordered_extent(ordered); 4599 } 4600 4601 return 0; 4602 } 4603 4604 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode, 4605 struct btrfs_path *path, u64 *size_ret) 4606 { 4607 struct btrfs_key key; 4608 int ret; 4609 4610 key.objectid = btrfs_ino(inode); 4611 key.type = BTRFS_INODE_ITEM_KEY; 4612 key.offset = 0; 4613 4614 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); 4615 if (ret < 0) { 4616 return ret; 4617 } else if (ret > 0) { 4618 *size_ret = 0; 4619 } else { 4620 struct btrfs_inode_item *item; 4621 4622 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4623 struct btrfs_inode_item); 4624 *size_ret = btrfs_inode_size(path->nodes[0], item); 4625 /* 4626 * If the in-memory inode's i_size is smaller then the inode 4627 * size stored in the btree, return the inode's i_size, so 4628 * that we get a correct inode size after replaying the log 4629 * when before a power failure we had a shrinking truncate 4630 * followed by addition of a new name (rename / new hard link). 4631 * Otherwise return the inode size from the btree, to avoid 4632 * data loss when replaying a log due to previously doing a 4633 * write that expands the inode's size and logging a new name 4634 * immediately after. 4635 */ 4636 if (*size_ret > inode->vfs_inode.i_size) 4637 *size_ret = inode->vfs_inode.i_size; 4638 } 4639 4640 btrfs_release_path(path); 4641 return 0; 4642 } 4643 4644 /* 4645 * At the moment we always log all xattrs. This is to figure out at log replay 4646 * time which xattrs must have their deletion replayed. If a xattr is missing 4647 * in the log tree and exists in the fs/subvol tree, we delete it. This is 4648 * because if a xattr is deleted, the inode is fsynced and a power failure 4649 * happens, causing the log to be replayed the next time the fs is mounted, 4650 * we want the xattr to not exist anymore (same behaviour as other filesystems 4651 * with a journal, ext3/4, xfs, f2fs, etc). 4652 */ 4653 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, 4654 struct btrfs_root *root, 4655 struct btrfs_inode *inode, 4656 struct btrfs_path *path, 4657 struct btrfs_path *dst_path) 4658 { 4659 int ret; 4660 struct btrfs_key key; 4661 const u64 ino = btrfs_ino(inode); 4662 int ins_nr = 0; 4663 int start_slot = 0; 4664 bool found_xattrs = false; 4665 4666 if (test_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags)) 4667 return 0; 4668 4669 key.objectid = ino; 4670 key.type = BTRFS_XATTR_ITEM_KEY; 4671 key.offset = 0; 4672 4673 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4674 if (ret < 0) 4675 return ret; 4676 4677 while (true) { 4678 int slot = path->slots[0]; 4679 struct extent_buffer *leaf = path->nodes[0]; 4680 int nritems = btrfs_header_nritems(leaf); 4681 4682 if (slot >= nritems) { 4683 if (ins_nr > 0) { 4684 ret = copy_items(trans, inode, dst_path, path, 4685 start_slot, ins_nr, 1, 0); 4686 if (ret < 0) 4687 return ret; 4688 ins_nr = 0; 4689 } 4690 ret = btrfs_next_leaf(root, path); 4691 if (ret < 0) 4692 return ret; 4693 else if (ret > 0) 4694 break; 4695 continue; 4696 } 4697 4698 btrfs_item_key_to_cpu(leaf, &key, slot); 4699 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) 4700 break; 4701 4702 if (ins_nr == 0) 4703 start_slot = slot; 4704 ins_nr++; 4705 path->slots[0]++; 4706 found_xattrs = true; 4707 cond_resched(); 4708 } 4709 if (ins_nr > 0) { 4710 ret = copy_items(trans, inode, dst_path, path, 4711 start_slot, ins_nr, 1, 0); 4712 if (ret < 0) 4713 return ret; 4714 } 4715 4716 if (!found_xattrs) 4717 set_bit(BTRFS_INODE_NO_XATTRS, &inode->runtime_flags); 4718 4719 return 0; 4720 } 4721 4722 /* 4723 * When using the NO_HOLES feature if we punched a hole that causes the 4724 * deletion of entire leafs or all the extent items of the first leaf (the one 4725 * that contains the inode item and references) we may end up not processing 4726 * any extents, because there are no leafs with a generation matching the 4727 * current transaction that have extent items for our inode. So we need to find 4728 * if any holes exist and then log them. We also need to log holes after any 4729 * truncate operation that changes the inode's size. 4730 */ 4731 static int btrfs_log_holes(struct btrfs_trans_handle *trans, 4732 struct btrfs_root *root, 4733 struct btrfs_inode *inode, 4734 struct btrfs_path *path) 4735 { 4736 struct btrfs_fs_info *fs_info = root->fs_info; 4737 struct btrfs_key key; 4738 const u64 ino = btrfs_ino(inode); 4739 const u64 i_size = i_size_read(&inode->vfs_inode); 4740 u64 prev_extent_end = 0; 4741 int ret; 4742 4743 if (!btrfs_fs_incompat(fs_info, NO_HOLES) || i_size == 0) 4744 return 0; 4745 4746 key.objectid = ino; 4747 key.type = BTRFS_EXTENT_DATA_KEY; 4748 key.offset = 0; 4749 4750 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4751 if (ret < 0) 4752 return ret; 4753 4754 while (true) { 4755 struct extent_buffer *leaf = path->nodes[0]; 4756 4757 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { 4758 ret = btrfs_next_leaf(root, path); 4759 if (ret < 0) 4760 return ret; 4761 if (ret > 0) { 4762 ret = 0; 4763 break; 4764 } 4765 leaf = path->nodes[0]; 4766 } 4767 4768 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4769 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) 4770 break; 4771 4772 /* We have a hole, log it. */ 4773 if (prev_extent_end < key.offset) { 4774 const u64 hole_len = key.offset - prev_extent_end; 4775 4776 /* 4777 * Release the path to avoid deadlocks with other code 4778 * paths that search the root while holding locks on 4779 * leafs from the log root. 4780 */ 4781 btrfs_release_path(path); 4782 ret = btrfs_insert_file_extent(trans, root->log_root, 4783 ino, prev_extent_end, 0, 4784 0, hole_len, 0, hole_len, 4785 0, 0, 0); 4786 if (ret < 0) 4787 return ret; 4788 4789 /* 4790 * Search for the same key again in the root. Since it's 4791 * an extent item and we are holding the inode lock, the 4792 * key must still exist. If it doesn't just emit warning 4793 * and return an error to fall back to a transaction 4794 * commit. 4795 */ 4796 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4797 if (ret < 0) 4798 return ret; 4799 if (WARN_ON(ret > 0)) 4800 return -ENOENT; 4801 leaf = path->nodes[0]; 4802 } 4803 4804 prev_extent_end = btrfs_file_extent_end(path); 4805 path->slots[0]++; 4806 cond_resched(); 4807 } 4808 4809 if (prev_extent_end < i_size) { 4810 u64 hole_len; 4811 4812 btrfs_release_path(path); 4813 hole_len = ALIGN(i_size - prev_extent_end, fs_info->sectorsize); 4814 ret = btrfs_insert_file_extent(trans, root->log_root, 4815 ino, prev_extent_end, 0, 0, 4816 hole_len, 0, hole_len, 4817 0, 0, 0); 4818 if (ret < 0) 4819 return ret; 4820 } 4821 4822 return 0; 4823 } 4824 4825 /* 4826 * When we are logging a new inode X, check if it doesn't have a reference that 4827 * matches the reference from some other inode Y created in a past transaction 4828 * and that was renamed in the current transaction. If we don't do this, then at 4829 * log replay time we can lose inode Y (and all its files if it's a directory): 4830 * 4831 * mkdir /mnt/x 4832 * echo "hello world" > /mnt/x/foobar 4833 * sync 4834 * mv /mnt/x /mnt/y 4835 * mkdir /mnt/x # or touch /mnt/x 4836 * xfs_io -c fsync /mnt/x 4837 * <power fail> 4838 * mount fs, trigger log replay 4839 * 4840 * After the log replay procedure, we would lose the first directory and all its 4841 * files (file foobar). 4842 * For the case where inode Y is not a directory we simply end up losing it: 4843 * 4844 * echo "123" > /mnt/foo 4845 * sync 4846 * mv /mnt/foo /mnt/bar 4847 * echo "abc" > /mnt/foo 4848 * xfs_io -c fsync /mnt/foo 4849 * <power fail> 4850 * 4851 * We also need this for cases where a snapshot entry is replaced by some other 4852 * entry (file or directory) otherwise we end up with an unreplayable log due to 4853 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as 4854 * if it were a regular entry: 4855 * 4856 * mkdir /mnt/x 4857 * btrfs subvolume snapshot /mnt /mnt/x/snap 4858 * btrfs subvolume delete /mnt/x/snap 4859 * rmdir /mnt/x 4860 * mkdir /mnt/x 4861 * fsync /mnt/x or fsync some new file inside it 4862 * <power fail> 4863 * 4864 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in 4865 * the same transaction. 4866 */ 4867 static int btrfs_check_ref_name_override(struct extent_buffer *eb, 4868 const int slot, 4869 const struct btrfs_key *key, 4870 struct btrfs_inode *inode, 4871 u64 *other_ino, u64 *other_parent) 4872 { 4873 int ret; 4874 struct btrfs_path *search_path; 4875 char *name = NULL; 4876 u32 name_len = 0; 4877 u32 item_size = btrfs_item_size_nr(eb, slot); 4878 u32 cur_offset = 0; 4879 unsigned long ptr = btrfs_item_ptr_offset(eb, slot); 4880 4881 search_path = btrfs_alloc_path(); 4882 if (!search_path) 4883 return -ENOMEM; 4884 search_path->search_commit_root = 1; 4885 search_path->skip_locking = 1; 4886 4887 while (cur_offset < item_size) { 4888 u64 parent; 4889 u32 this_name_len; 4890 u32 this_len; 4891 unsigned long name_ptr; 4892 struct btrfs_dir_item *di; 4893 4894 if (key->type == BTRFS_INODE_REF_KEY) { 4895 struct btrfs_inode_ref *iref; 4896 4897 iref = (struct btrfs_inode_ref *)(ptr + cur_offset); 4898 parent = key->offset; 4899 this_name_len = btrfs_inode_ref_name_len(eb, iref); 4900 name_ptr = (unsigned long)(iref + 1); 4901 this_len = sizeof(*iref) + this_name_len; 4902 } else { 4903 struct btrfs_inode_extref *extref; 4904 4905 extref = (struct btrfs_inode_extref *)(ptr + 4906 cur_offset); 4907 parent = btrfs_inode_extref_parent(eb, extref); 4908 this_name_len = btrfs_inode_extref_name_len(eb, extref); 4909 name_ptr = (unsigned long)&extref->name; 4910 this_len = sizeof(*extref) + this_name_len; 4911 } 4912 4913 if (this_name_len > name_len) { 4914 char *new_name; 4915 4916 new_name = krealloc(name, this_name_len, GFP_NOFS); 4917 if (!new_name) { 4918 ret = -ENOMEM; 4919 goto out; 4920 } 4921 name_len = this_name_len; 4922 name = new_name; 4923 } 4924 4925 read_extent_buffer(eb, name, name_ptr, this_name_len); 4926 di = btrfs_lookup_dir_item(NULL, inode->root, search_path, 4927 parent, name, this_name_len, 0); 4928 if (di && !IS_ERR(di)) { 4929 struct btrfs_key di_key; 4930 4931 btrfs_dir_item_key_to_cpu(search_path->nodes[0], 4932 di, &di_key); 4933 if (di_key.type == BTRFS_INODE_ITEM_KEY) { 4934 if (di_key.objectid != key->objectid) { 4935 ret = 1; 4936 *other_ino = di_key.objectid; 4937 *other_parent = parent; 4938 } else { 4939 ret = 0; 4940 } 4941 } else { 4942 ret = -EAGAIN; 4943 } 4944 goto out; 4945 } else if (IS_ERR(di)) { 4946 ret = PTR_ERR(di); 4947 goto out; 4948 } 4949 btrfs_release_path(search_path); 4950 4951 cur_offset += this_len; 4952 } 4953 ret = 0; 4954 out: 4955 btrfs_free_path(search_path); 4956 kfree(name); 4957 return ret; 4958 } 4959 4960 struct btrfs_ino_list { 4961 u64 ino; 4962 u64 parent; 4963 struct list_head list; 4964 }; 4965 4966 static int log_conflicting_inodes(struct btrfs_trans_handle *trans, 4967 struct btrfs_root *root, 4968 struct btrfs_path *path, 4969 struct btrfs_log_ctx *ctx, 4970 u64 ino, u64 parent) 4971 { 4972 struct btrfs_ino_list *ino_elem; 4973 LIST_HEAD(inode_list); 4974 int ret = 0; 4975 4976 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); 4977 if (!ino_elem) 4978 return -ENOMEM; 4979 ino_elem->ino = ino; 4980 ino_elem->parent = parent; 4981 list_add_tail(&ino_elem->list, &inode_list); 4982 4983 while (!list_empty(&inode_list)) { 4984 struct btrfs_fs_info *fs_info = root->fs_info; 4985 struct btrfs_key key; 4986 struct inode *inode; 4987 4988 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list, 4989 list); 4990 ino = ino_elem->ino; 4991 parent = ino_elem->parent; 4992 list_del(&ino_elem->list); 4993 kfree(ino_elem); 4994 if (ret) 4995 continue; 4996 4997 btrfs_release_path(path); 4998 4999 inode = btrfs_iget(fs_info->sb, ino, root); 5000 /* 5001 * If the other inode that had a conflicting dir entry was 5002 * deleted in the current transaction, we need to log its parent 5003 * directory. 5004 */ 5005 if (IS_ERR(inode)) { 5006 ret = PTR_ERR(inode); 5007 if (ret == -ENOENT) { 5008 inode = btrfs_iget(fs_info->sb, parent, root); 5009 if (IS_ERR(inode)) { 5010 ret = PTR_ERR(inode); 5011 } else { 5012 ret = btrfs_log_inode(trans, root, 5013 BTRFS_I(inode), 5014 LOG_OTHER_INODE_ALL, 5015 ctx); 5016 btrfs_add_delayed_iput(inode); 5017 } 5018 } 5019 continue; 5020 } 5021 /* 5022 * If the inode was already logged skip it - otherwise we can 5023 * hit an infinite loop. Example: 5024 * 5025 * From the commit root (previous transaction) we have the 5026 * following inodes: 5027 * 5028 * inode 257 a directory 5029 * inode 258 with references "zz" and "zz_link" on inode 257 5030 * inode 259 with reference "a" on inode 257 5031 * 5032 * And in the current (uncommitted) transaction we have: 5033 * 5034 * inode 257 a directory, unchanged 5035 * inode 258 with references "a" and "a2" on inode 257 5036 * inode 259 with reference "zz_link" on inode 257 5037 * inode 261 with reference "zz" on inode 257 5038 * 5039 * When logging inode 261 the following infinite loop could 5040 * happen if we don't skip already logged inodes: 5041 * 5042 * - we detect inode 258 as a conflicting inode, with inode 261 5043 * on reference "zz", and log it; 5044 * 5045 * - we detect inode 259 as a conflicting inode, with inode 258 5046 * on reference "a", and log it; 5047 * 5048 * - we detect inode 258 as a conflicting inode, with inode 259 5049 * on reference "zz_link", and log it - again! After this we 5050 * repeat the above steps forever. 5051 */ 5052 spin_lock(&BTRFS_I(inode)->lock); 5053 /* 5054 * Check the inode's logged_trans only instead of 5055 * btrfs_inode_in_log(). This is because the last_log_commit of 5056 * the inode is not updated when we only log that it exists and 5057 * it has the full sync bit set (see btrfs_log_inode()). 5058 */ 5059 if (BTRFS_I(inode)->logged_trans == trans->transid) { 5060 spin_unlock(&BTRFS_I(inode)->lock); 5061 btrfs_add_delayed_iput(inode); 5062 continue; 5063 } 5064 spin_unlock(&BTRFS_I(inode)->lock); 5065 /* 5066 * We are safe logging the other inode without acquiring its 5067 * lock as long as we log with the LOG_INODE_EXISTS mode. We 5068 * are safe against concurrent renames of the other inode as 5069 * well because during a rename we pin the log and update the 5070 * log with the new name before we unpin it. 5071 */ 5072 ret = btrfs_log_inode(trans, root, BTRFS_I(inode), 5073 LOG_OTHER_INODE, ctx); 5074 if (ret) { 5075 btrfs_add_delayed_iput(inode); 5076 continue; 5077 } 5078 5079 key.objectid = ino; 5080 key.type = BTRFS_INODE_REF_KEY; 5081 key.offset = 0; 5082 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5083 if (ret < 0) { 5084 btrfs_add_delayed_iput(inode); 5085 continue; 5086 } 5087 5088 while (true) { 5089 struct extent_buffer *leaf = path->nodes[0]; 5090 int slot = path->slots[0]; 5091 u64 other_ino = 0; 5092 u64 other_parent = 0; 5093 5094 if (slot >= btrfs_header_nritems(leaf)) { 5095 ret = btrfs_next_leaf(root, path); 5096 if (ret < 0) { 5097 break; 5098 } else if (ret > 0) { 5099 ret = 0; 5100 break; 5101 } 5102 continue; 5103 } 5104 5105 btrfs_item_key_to_cpu(leaf, &key, slot); 5106 if (key.objectid != ino || 5107 (key.type != BTRFS_INODE_REF_KEY && 5108 key.type != BTRFS_INODE_EXTREF_KEY)) { 5109 ret = 0; 5110 break; 5111 } 5112 5113 ret = btrfs_check_ref_name_override(leaf, slot, &key, 5114 BTRFS_I(inode), &other_ino, 5115 &other_parent); 5116 if (ret < 0) 5117 break; 5118 if (ret > 0) { 5119 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); 5120 if (!ino_elem) { 5121 ret = -ENOMEM; 5122 break; 5123 } 5124 ino_elem->ino = other_ino; 5125 ino_elem->parent = other_parent; 5126 list_add_tail(&ino_elem->list, &inode_list); 5127 ret = 0; 5128 } 5129 path->slots[0]++; 5130 } 5131 btrfs_add_delayed_iput(inode); 5132 } 5133 5134 return ret; 5135 } 5136 5137 static int copy_inode_items_to_log(struct btrfs_trans_handle *trans, 5138 struct btrfs_inode *inode, 5139 struct btrfs_key *min_key, 5140 const struct btrfs_key *max_key, 5141 struct btrfs_path *path, 5142 struct btrfs_path *dst_path, 5143 const u64 logged_isize, 5144 const bool recursive_logging, 5145 const int inode_only, 5146 struct btrfs_log_ctx *ctx, 5147 bool *need_log_inode_item) 5148 { 5149 struct btrfs_root *root = inode->root; 5150 int ins_start_slot = 0; 5151 int ins_nr = 0; 5152 int ret; 5153 5154 while (1) { 5155 ret = btrfs_search_forward(root, min_key, path, trans->transid); 5156 if (ret < 0) 5157 return ret; 5158 if (ret > 0) { 5159 ret = 0; 5160 break; 5161 } 5162 again: 5163 /* Note, ins_nr might be > 0 here, cleanup outside the loop */ 5164 if (min_key->objectid != max_key->objectid) 5165 break; 5166 if (min_key->type > max_key->type) 5167 break; 5168 5169 if (min_key->type == BTRFS_INODE_ITEM_KEY) 5170 *need_log_inode_item = false; 5171 5172 if ((min_key->type == BTRFS_INODE_REF_KEY || 5173 min_key->type == BTRFS_INODE_EXTREF_KEY) && 5174 inode->generation == trans->transid && 5175 !recursive_logging) { 5176 u64 other_ino = 0; 5177 u64 other_parent = 0; 5178 5179 ret = btrfs_check_ref_name_override(path->nodes[0], 5180 path->slots[0], min_key, inode, 5181 &other_ino, &other_parent); 5182 if (ret < 0) { 5183 return ret; 5184 } else if (ret > 0 && ctx && 5185 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { 5186 if (ins_nr > 0) { 5187 ins_nr++; 5188 } else { 5189 ins_nr = 1; 5190 ins_start_slot = path->slots[0]; 5191 } 5192 ret = copy_items(trans, inode, dst_path, path, 5193 ins_start_slot, ins_nr, 5194 inode_only, logged_isize); 5195 if (ret < 0) 5196 return ret; 5197 ins_nr = 0; 5198 5199 ret = log_conflicting_inodes(trans, root, path, 5200 ctx, other_ino, other_parent); 5201 if (ret) 5202 return ret; 5203 btrfs_release_path(path); 5204 goto next_key; 5205 } 5206 } 5207 5208 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ 5209 if (min_key->type == BTRFS_XATTR_ITEM_KEY) { 5210 if (ins_nr == 0) 5211 goto next_slot; 5212 ret = copy_items(trans, inode, dst_path, path, 5213 ins_start_slot, 5214 ins_nr, inode_only, logged_isize); 5215 if (ret < 0) 5216 return ret; 5217 ins_nr = 0; 5218 goto next_slot; 5219 } 5220 5221 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 5222 ins_nr++; 5223 goto next_slot; 5224 } else if (!ins_nr) { 5225 ins_start_slot = path->slots[0]; 5226 ins_nr = 1; 5227 goto next_slot; 5228 } 5229 5230 ret = copy_items(trans, inode, dst_path, path, ins_start_slot, 5231 ins_nr, inode_only, logged_isize); 5232 if (ret < 0) 5233 return ret; 5234 ins_nr = 1; 5235 ins_start_slot = path->slots[0]; 5236 next_slot: 5237 path->slots[0]++; 5238 if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) { 5239 btrfs_item_key_to_cpu(path->nodes[0], min_key, 5240 path->slots[0]); 5241 goto again; 5242 } 5243 if (ins_nr) { 5244 ret = copy_items(trans, inode, dst_path, path, 5245 ins_start_slot, ins_nr, inode_only, 5246 logged_isize); 5247 if (ret < 0) 5248 return ret; 5249 ins_nr = 0; 5250 } 5251 btrfs_release_path(path); 5252 next_key: 5253 if (min_key->offset < (u64)-1) { 5254 min_key->offset++; 5255 } else if (min_key->type < max_key->type) { 5256 min_key->type++; 5257 min_key->offset = 0; 5258 } else { 5259 break; 5260 } 5261 } 5262 if (ins_nr) 5263 ret = copy_items(trans, inode, dst_path, path, ins_start_slot, 5264 ins_nr, inode_only, logged_isize); 5265 5266 return ret; 5267 } 5268 5269 /* log a single inode in the tree log. 5270 * At least one parent directory for this inode must exist in the tree 5271 * or be logged already. 5272 * 5273 * Any items from this inode changed by the current transaction are copied 5274 * to the log tree. An extra reference is taken on any extents in this 5275 * file, allowing us to avoid a whole pile of corner cases around logging 5276 * blocks that have been removed from the tree. 5277 * 5278 * See LOG_INODE_ALL and related defines for a description of what inode_only 5279 * does. 5280 * 5281 * This handles both files and directories. 5282 */ 5283 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 5284 struct btrfs_root *root, struct btrfs_inode *inode, 5285 int inode_only, 5286 struct btrfs_log_ctx *ctx) 5287 { 5288 struct btrfs_path *path; 5289 struct btrfs_path *dst_path; 5290 struct btrfs_key min_key; 5291 struct btrfs_key max_key; 5292 struct btrfs_root *log = root->log_root; 5293 int err = 0; 5294 int ret = 0; 5295 bool fast_search = false; 5296 u64 ino = btrfs_ino(inode); 5297 struct extent_map_tree *em_tree = &inode->extent_tree; 5298 u64 logged_isize = 0; 5299 bool need_log_inode_item = true; 5300 bool xattrs_logged = false; 5301 bool recursive_logging = false; 5302 5303 path = btrfs_alloc_path(); 5304 if (!path) 5305 return -ENOMEM; 5306 dst_path = btrfs_alloc_path(); 5307 if (!dst_path) { 5308 btrfs_free_path(path); 5309 return -ENOMEM; 5310 } 5311 5312 min_key.objectid = ino; 5313 min_key.type = BTRFS_INODE_ITEM_KEY; 5314 min_key.offset = 0; 5315 5316 max_key.objectid = ino; 5317 5318 5319 /* today the code can only do partial logging of directories */ 5320 if (S_ISDIR(inode->vfs_inode.i_mode) || 5321 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5322 &inode->runtime_flags) && 5323 inode_only >= LOG_INODE_EXISTS)) 5324 max_key.type = BTRFS_XATTR_ITEM_KEY; 5325 else 5326 max_key.type = (u8)-1; 5327 max_key.offset = (u64)-1; 5328 5329 /* 5330 * Only run delayed items if we are a directory. We want to make sure 5331 * all directory indexes hit the fs/subvolume tree so we can find them 5332 * and figure out which index ranges have to be logged. 5333 * 5334 * Otherwise commit the delayed inode only if the full sync flag is set, 5335 * as we want to make sure an up to date version is in the subvolume 5336 * tree so copy_inode_items_to_log() / copy_items() can find it and copy 5337 * it to the log tree. For a non full sync, we always log the inode item 5338 * based on the in-memory struct btrfs_inode which is always up to date. 5339 */ 5340 if (S_ISDIR(inode->vfs_inode.i_mode)) 5341 ret = btrfs_commit_inode_delayed_items(trans, inode); 5342 else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags)) 5343 ret = btrfs_commit_inode_delayed_inode(inode); 5344 5345 if (ret) { 5346 btrfs_free_path(path); 5347 btrfs_free_path(dst_path); 5348 return ret; 5349 } 5350 5351 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) { 5352 recursive_logging = true; 5353 if (inode_only == LOG_OTHER_INODE) 5354 inode_only = LOG_INODE_EXISTS; 5355 else 5356 inode_only = LOG_INODE_ALL; 5357 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING); 5358 } else { 5359 mutex_lock(&inode->log_mutex); 5360 } 5361 5362 /* 5363 * This is for cases where logging a directory could result in losing a 5364 * a file after replaying the log. For example, if we move a file from a 5365 * directory A to a directory B, then fsync directory A, we have no way 5366 * to known the file was moved from A to B, so logging just A would 5367 * result in losing the file after a log replay. 5368 */ 5369 if (S_ISDIR(inode->vfs_inode.i_mode) && 5370 inode_only == LOG_INODE_ALL && 5371 inode->last_unlink_trans >= trans->transid) { 5372 btrfs_set_log_full_commit(trans); 5373 err = 1; 5374 goto out_unlock; 5375 } 5376 5377 /* 5378 * a brute force approach to making sure we get the most uptodate 5379 * copies of everything. 5380 */ 5381 if (S_ISDIR(inode->vfs_inode.i_mode)) { 5382 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; 5383 5384 clear_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags); 5385 if (inode_only == LOG_INODE_EXISTS) 5386 max_key_type = BTRFS_XATTR_ITEM_KEY; 5387 ret = drop_objectid_items(trans, log, path, ino, max_key_type); 5388 } else { 5389 if (inode_only == LOG_INODE_EXISTS) { 5390 /* 5391 * Make sure the new inode item we write to the log has 5392 * the same isize as the current one (if it exists). 5393 * This is necessary to prevent data loss after log 5394 * replay, and also to prevent doing a wrong expanding 5395 * truncate - for e.g. create file, write 4K into offset 5396 * 0, fsync, write 4K into offset 4096, add hard link, 5397 * fsync some other file (to sync log), power fail - if 5398 * we use the inode's current i_size, after log replay 5399 * we get a 8Kb file, with the last 4Kb extent as a hole 5400 * (zeroes), as if an expanding truncate happened, 5401 * instead of getting a file of 4Kb only. 5402 */ 5403 err = logged_inode_size(log, inode, path, &logged_isize); 5404 if (err) 5405 goto out_unlock; 5406 } 5407 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5408 &inode->runtime_flags)) { 5409 if (inode_only == LOG_INODE_EXISTS) { 5410 max_key.type = BTRFS_XATTR_ITEM_KEY; 5411 ret = drop_objectid_items(trans, log, path, ino, 5412 max_key.type); 5413 } else { 5414 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5415 &inode->runtime_flags); 5416 clear_bit(BTRFS_INODE_COPY_EVERYTHING, 5417 &inode->runtime_flags); 5418 while(1) { 5419 ret = btrfs_truncate_inode_items(trans, 5420 log, inode, 0, 0, NULL); 5421 if (ret != -EAGAIN) 5422 break; 5423 } 5424 } 5425 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, 5426 &inode->runtime_flags) || 5427 inode_only == LOG_INODE_EXISTS) { 5428 if (inode_only == LOG_INODE_ALL) 5429 fast_search = true; 5430 max_key.type = BTRFS_XATTR_ITEM_KEY; 5431 ret = drop_objectid_items(trans, log, path, ino, 5432 max_key.type); 5433 } else { 5434 if (inode_only == LOG_INODE_ALL) 5435 fast_search = true; 5436 goto log_extents; 5437 } 5438 5439 } 5440 if (ret) { 5441 err = ret; 5442 goto out_unlock; 5443 } 5444 5445 err = copy_inode_items_to_log(trans, inode, &min_key, &max_key, 5446 path, dst_path, logged_isize, 5447 recursive_logging, inode_only, ctx, 5448 &need_log_inode_item); 5449 if (err) 5450 goto out_unlock; 5451 5452 btrfs_release_path(path); 5453 btrfs_release_path(dst_path); 5454 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); 5455 if (err) 5456 goto out_unlock; 5457 xattrs_logged = true; 5458 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { 5459 btrfs_release_path(path); 5460 btrfs_release_path(dst_path); 5461 err = btrfs_log_holes(trans, root, inode, path); 5462 if (err) 5463 goto out_unlock; 5464 } 5465 log_extents: 5466 btrfs_release_path(path); 5467 btrfs_release_path(dst_path); 5468 if (need_log_inode_item) { 5469 err = log_inode_item(trans, log, dst_path, inode); 5470 if (err) 5471 goto out_unlock; 5472 /* 5473 * If we are doing a fast fsync and the inode was logged before 5474 * in this transaction, we don't need to log the xattrs because 5475 * they were logged before. If xattrs were added, changed or 5476 * deleted since the last time we logged the inode, then we have 5477 * already logged them because the inode had the runtime flag 5478 * BTRFS_INODE_COPY_EVERYTHING set. 5479 */ 5480 if (!xattrs_logged && inode->logged_trans < trans->transid) { 5481 err = btrfs_log_all_xattrs(trans, root, inode, path, 5482 dst_path); 5483 if (err) 5484 goto out_unlock; 5485 btrfs_release_path(path); 5486 } 5487 } 5488 if (fast_search) { 5489 ret = btrfs_log_changed_extents(trans, root, inode, dst_path, 5490 ctx); 5491 if (ret) { 5492 err = ret; 5493 goto out_unlock; 5494 } 5495 } else if (inode_only == LOG_INODE_ALL) { 5496 struct extent_map *em, *n; 5497 5498 write_lock(&em_tree->lock); 5499 list_for_each_entry_safe(em, n, &em_tree->modified_extents, list) 5500 list_del_init(&em->list); 5501 write_unlock(&em_tree->lock); 5502 } 5503 5504 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) { 5505 ret = log_directory_changes(trans, root, inode, path, dst_path, 5506 ctx); 5507 if (ret) { 5508 err = ret; 5509 goto out_unlock; 5510 } 5511 } 5512 5513 /* 5514 * If we are logging that an ancestor inode exists as part of logging a 5515 * new name from a link or rename operation, don't mark the inode as 5516 * logged - otherwise if an explicit fsync is made against an ancestor, 5517 * the fsync considers the inode in the log and doesn't sync the log, 5518 * resulting in the ancestor missing after a power failure unless the 5519 * log was synced as part of an fsync against any other unrelated inode. 5520 * So keep it simple for this case and just don't flag the ancestors as 5521 * logged. 5522 */ 5523 if (!ctx || 5524 !(S_ISDIR(inode->vfs_inode.i_mode) && ctx->logging_new_name && 5525 &inode->vfs_inode != ctx->inode)) { 5526 spin_lock(&inode->lock); 5527 inode->logged_trans = trans->transid; 5528 /* 5529 * Don't update last_log_commit if we logged that an inode exists. 5530 * We do this for two reasons: 5531 * 5532 * 1) We might have had buffered writes to this inode that were 5533 * flushed and had their ordered extents completed in this 5534 * transaction, but we did not previously log the inode with 5535 * LOG_INODE_ALL. Later the inode was evicted and after that 5536 * it was loaded again and this LOG_INODE_EXISTS log operation 5537 * happened. We must make sure that if an explicit fsync against 5538 * the inode is performed later, it logs the new extents, an 5539 * updated inode item, etc, and syncs the log. The same logic 5540 * applies to direct IO writes instead of buffered writes. 5541 * 5542 * 2) When we log the inode with LOG_INODE_EXISTS, its inode item 5543 * is logged with an i_size of 0 or whatever value was logged 5544 * before. If later the i_size of the inode is increased by a 5545 * truncate operation, the log is synced through an fsync of 5546 * some other inode and then finally an explicit fsync against 5547 * this inode is made, we must make sure this fsync logs the 5548 * inode with the new i_size, the hole between old i_size and 5549 * the new i_size, and syncs the log. 5550 */ 5551 if (inode_only != LOG_INODE_EXISTS) 5552 inode->last_log_commit = inode->last_sub_trans; 5553 spin_unlock(&inode->lock); 5554 } 5555 out_unlock: 5556 mutex_unlock(&inode->log_mutex); 5557 5558 btrfs_free_path(path); 5559 btrfs_free_path(dst_path); 5560 return err; 5561 } 5562 5563 /* 5564 * Check if we need to log an inode. This is used in contexts where while 5565 * logging an inode we need to log another inode (either that it exists or in 5566 * full mode). This is used instead of btrfs_inode_in_log() because the later 5567 * requires the inode to be in the log and have the log transaction committed, 5568 * while here we do not care if the log transaction was already committed - our 5569 * caller will commit the log later - and we want to avoid logging an inode 5570 * multiple times when multiple tasks have joined the same log transaction. 5571 */ 5572 static bool need_log_inode(struct btrfs_trans_handle *trans, 5573 struct btrfs_inode *inode) 5574 { 5575 /* 5576 * If this inode does not have new/updated/deleted xattrs since the last 5577 * time it was logged and is flagged as logged in the current transaction, 5578 * we can skip logging it. As for new/deleted names, those are updated in 5579 * the log by link/unlink/rename operations. 5580 * In case the inode was logged and then evicted and reloaded, its 5581 * logged_trans will be 0, in which case we have to fully log it since 5582 * logged_trans is a transient field, not persisted. 5583 */ 5584 if (inode->logged_trans == trans->transid && 5585 !test_bit(BTRFS_INODE_COPY_EVERYTHING, &inode->runtime_flags)) 5586 return false; 5587 5588 return true; 5589 } 5590 5591 struct btrfs_dir_list { 5592 u64 ino; 5593 struct list_head list; 5594 }; 5595 5596 /* 5597 * Log the inodes of the new dentries of a directory. See log_dir_items() for 5598 * details about the why it is needed. 5599 * This is a recursive operation - if an existing dentry corresponds to a 5600 * directory, that directory's new entries are logged too (same behaviour as 5601 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes 5602 * the dentries point to we do not lock their i_mutex, otherwise lockdep 5603 * complains about the following circular lock dependency / possible deadlock: 5604 * 5605 * CPU0 CPU1 5606 * ---- ---- 5607 * lock(&type->i_mutex_dir_key#3/2); 5608 * lock(sb_internal#2); 5609 * lock(&type->i_mutex_dir_key#3/2); 5610 * lock(&sb->s_type->i_mutex_key#14); 5611 * 5612 * Where sb_internal is the lock (a counter that works as a lock) acquired by 5613 * sb_start_intwrite() in btrfs_start_transaction(). 5614 * Not locking i_mutex of the inodes is still safe because: 5615 * 5616 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible 5617 * that while logging the inode new references (names) are added or removed 5618 * from the inode, leaving the logged inode item with a link count that does 5619 * not match the number of logged inode reference items. This is fine because 5620 * at log replay time we compute the real number of links and correct the 5621 * link count in the inode item (see replay_one_buffer() and 5622 * link_to_fixup_dir()); 5623 * 5624 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that 5625 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and 5626 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item 5627 * has a size that doesn't match the sum of the lengths of all the logged 5628 * names. This does not result in a problem because if a dir_item key is 5629 * logged but its matching dir_index key is not logged, at log replay time we 5630 * don't use it to replay the respective name (see replay_one_name()). On the 5631 * other hand if only the dir_index key ends up being logged, the respective 5632 * name is added to the fs/subvol tree with both the dir_item and dir_index 5633 * keys created (see replay_one_name()). 5634 * The directory's inode item with a wrong i_size is not a problem as well, 5635 * since we don't use it at log replay time to set the i_size in the inode 5636 * item of the fs/subvol tree (see overwrite_item()). 5637 */ 5638 static int log_new_dir_dentries(struct btrfs_trans_handle *trans, 5639 struct btrfs_root *root, 5640 struct btrfs_inode *start_inode, 5641 struct btrfs_log_ctx *ctx) 5642 { 5643 struct btrfs_fs_info *fs_info = root->fs_info; 5644 struct btrfs_root *log = root->log_root; 5645 struct btrfs_path *path; 5646 LIST_HEAD(dir_list); 5647 struct btrfs_dir_list *dir_elem; 5648 int ret = 0; 5649 5650 path = btrfs_alloc_path(); 5651 if (!path) 5652 return -ENOMEM; 5653 5654 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS); 5655 if (!dir_elem) { 5656 btrfs_free_path(path); 5657 return -ENOMEM; 5658 } 5659 dir_elem->ino = btrfs_ino(start_inode); 5660 list_add_tail(&dir_elem->list, &dir_list); 5661 5662 while (!list_empty(&dir_list)) { 5663 struct extent_buffer *leaf; 5664 struct btrfs_key min_key; 5665 int nritems; 5666 int i; 5667 5668 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list, 5669 list); 5670 if (ret) 5671 goto next_dir_inode; 5672 5673 min_key.objectid = dir_elem->ino; 5674 min_key.type = BTRFS_DIR_ITEM_KEY; 5675 min_key.offset = 0; 5676 again: 5677 btrfs_release_path(path); 5678 ret = btrfs_search_forward(log, &min_key, path, trans->transid); 5679 if (ret < 0) { 5680 goto next_dir_inode; 5681 } else if (ret > 0) { 5682 ret = 0; 5683 goto next_dir_inode; 5684 } 5685 5686 process_leaf: 5687 leaf = path->nodes[0]; 5688 nritems = btrfs_header_nritems(leaf); 5689 for (i = path->slots[0]; i < nritems; i++) { 5690 struct btrfs_dir_item *di; 5691 struct btrfs_key di_key; 5692 struct inode *di_inode; 5693 struct btrfs_dir_list *new_dir_elem; 5694 int log_mode = LOG_INODE_EXISTS; 5695 int type; 5696 5697 btrfs_item_key_to_cpu(leaf, &min_key, i); 5698 if (min_key.objectid != dir_elem->ino || 5699 min_key.type != BTRFS_DIR_ITEM_KEY) 5700 goto next_dir_inode; 5701 5702 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item); 5703 type = btrfs_dir_type(leaf, di); 5704 if (btrfs_dir_transid(leaf, di) < trans->transid && 5705 type != BTRFS_FT_DIR) 5706 continue; 5707 btrfs_dir_item_key_to_cpu(leaf, di, &di_key); 5708 if (di_key.type == BTRFS_ROOT_ITEM_KEY) 5709 continue; 5710 5711 btrfs_release_path(path); 5712 di_inode = btrfs_iget(fs_info->sb, di_key.objectid, root); 5713 if (IS_ERR(di_inode)) { 5714 ret = PTR_ERR(di_inode); 5715 goto next_dir_inode; 5716 } 5717 5718 if (!need_log_inode(trans, BTRFS_I(di_inode))) { 5719 btrfs_add_delayed_iput(di_inode); 5720 break; 5721 } 5722 5723 ctx->log_new_dentries = false; 5724 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) 5725 log_mode = LOG_INODE_ALL; 5726 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode), 5727 log_mode, ctx); 5728 btrfs_add_delayed_iput(di_inode); 5729 if (ret) 5730 goto next_dir_inode; 5731 if (ctx->log_new_dentries) { 5732 new_dir_elem = kmalloc(sizeof(*new_dir_elem), 5733 GFP_NOFS); 5734 if (!new_dir_elem) { 5735 ret = -ENOMEM; 5736 goto next_dir_inode; 5737 } 5738 new_dir_elem->ino = di_key.objectid; 5739 list_add_tail(&new_dir_elem->list, &dir_list); 5740 } 5741 break; 5742 } 5743 if (i == nritems) { 5744 ret = btrfs_next_leaf(log, path); 5745 if (ret < 0) { 5746 goto next_dir_inode; 5747 } else if (ret > 0) { 5748 ret = 0; 5749 goto next_dir_inode; 5750 } 5751 goto process_leaf; 5752 } 5753 if (min_key.offset < (u64)-1) { 5754 min_key.offset++; 5755 goto again; 5756 } 5757 next_dir_inode: 5758 list_del(&dir_elem->list); 5759 kfree(dir_elem); 5760 } 5761 5762 btrfs_free_path(path); 5763 return ret; 5764 } 5765 5766 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, 5767 struct btrfs_inode *inode, 5768 struct btrfs_log_ctx *ctx) 5769 { 5770 struct btrfs_fs_info *fs_info = trans->fs_info; 5771 int ret; 5772 struct btrfs_path *path; 5773 struct btrfs_key key; 5774 struct btrfs_root *root = inode->root; 5775 const u64 ino = btrfs_ino(inode); 5776 5777 path = btrfs_alloc_path(); 5778 if (!path) 5779 return -ENOMEM; 5780 path->skip_locking = 1; 5781 path->search_commit_root = 1; 5782 5783 key.objectid = ino; 5784 key.type = BTRFS_INODE_REF_KEY; 5785 key.offset = 0; 5786 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5787 if (ret < 0) 5788 goto out; 5789 5790 while (true) { 5791 struct extent_buffer *leaf = path->nodes[0]; 5792 int slot = path->slots[0]; 5793 u32 cur_offset = 0; 5794 u32 item_size; 5795 unsigned long ptr; 5796 5797 if (slot >= btrfs_header_nritems(leaf)) { 5798 ret = btrfs_next_leaf(root, path); 5799 if (ret < 0) 5800 goto out; 5801 else if (ret > 0) 5802 break; 5803 continue; 5804 } 5805 5806 btrfs_item_key_to_cpu(leaf, &key, slot); 5807 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */ 5808 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY) 5809 break; 5810 5811 item_size = btrfs_item_size_nr(leaf, slot); 5812 ptr = btrfs_item_ptr_offset(leaf, slot); 5813 while (cur_offset < item_size) { 5814 struct btrfs_key inode_key; 5815 struct inode *dir_inode; 5816 5817 inode_key.type = BTRFS_INODE_ITEM_KEY; 5818 inode_key.offset = 0; 5819 5820 if (key.type == BTRFS_INODE_EXTREF_KEY) { 5821 struct btrfs_inode_extref *extref; 5822 5823 extref = (struct btrfs_inode_extref *) 5824 (ptr + cur_offset); 5825 inode_key.objectid = btrfs_inode_extref_parent( 5826 leaf, extref); 5827 cur_offset += sizeof(*extref); 5828 cur_offset += btrfs_inode_extref_name_len(leaf, 5829 extref); 5830 } else { 5831 inode_key.objectid = key.offset; 5832 cur_offset = item_size; 5833 } 5834 5835 dir_inode = btrfs_iget(fs_info->sb, inode_key.objectid, 5836 root); 5837 /* 5838 * If the parent inode was deleted, return an error to 5839 * fallback to a transaction commit. This is to prevent 5840 * getting an inode that was moved from one parent A to 5841 * a parent B, got its former parent A deleted and then 5842 * it got fsync'ed, from existing at both parents after 5843 * a log replay (and the old parent still existing). 5844 * Example: 5845 * 5846 * mkdir /mnt/A 5847 * mkdir /mnt/B 5848 * touch /mnt/B/bar 5849 * sync 5850 * mv /mnt/B/bar /mnt/A/bar 5851 * mv -T /mnt/A /mnt/B 5852 * fsync /mnt/B/bar 5853 * <power fail> 5854 * 5855 * If we ignore the old parent B which got deleted, 5856 * after a log replay we would have file bar linked 5857 * at both parents and the old parent B would still 5858 * exist. 5859 */ 5860 if (IS_ERR(dir_inode)) { 5861 ret = PTR_ERR(dir_inode); 5862 goto out; 5863 } 5864 5865 if (!need_log_inode(trans, BTRFS_I(dir_inode))) { 5866 btrfs_add_delayed_iput(dir_inode); 5867 continue; 5868 } 5869 5870 if (ctx) 5871 ctx->log_new_dentries = false; 5872 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode), 5873 LOG_INODE_ALL, ctx); 5874 if (!ret && ctx && ctx->log_new_dentries) 5875 ret = log_new_dir_dentries(trans, root, 5876 BTRFS_I(dir_inode), ctx); 5877 btrfs_add_delayed_iput(dir_inode); 5878 if (ret) 5879 goto out; 5880 } 5881 path->slots[0]++; 5882 } 5883 ret = 0; 5884 out: 5885 btrfs_free_path(path); 5886 return ret; 5887 } 5888 5889 static int log_new_ancestors(struct btrfs_trans_handle *trans, 5890 struct btrfs_root *root, 5891 struct btrfs_path *path, 5892 struct btrfs_log_ctx *ctx) 5893 { 5894 struct btrfs_key found_key; 5895 5896 btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); 5897 5898 while (true) { 5899 struct btrfs_fs_info *fs_info = root->fs_info; 5900 struct extent_buffer *leaf = path->nodes[0]; 5901 int slot = path->slots[0]; 5902 struct btrfs_key search_key; 5903 struct inode *inode; 5904 u64 ino; 5905 int ret = 0; 5906 5907 btrfs_release_path(path); 5908 5909 ino = found_key.offset; 5910 5911 search_key.objectid = found_key.offset; 5912 search_key.type = BTRFS_INODE_ITEM_KEY; 5913 search_key.offset = 0; 5914 inode = btrfs_iget(fs_info->sb, ino, root); 5915 if (IS_ERR(inode)) 5916 return PTR_ERR(inode); 5917 5918 if (BTRFS_I(inode)->generation >= trans->transid && 5919 need_log_inode(trans, BTRFS_I(inode))) 5920 ret = btrfs_log_inode(trans, root, BTRFS_I(inode), 5921 LOG_INODE_EXISTS, ctx); 5922 btrfs_add_delayed_iput(inode); 5923 if (ret) 5924 return ret; 5925 5926 if (search_key.objectid == BTRFS_FIRST_FREE_OBJECTID) 5927 break; 5928 5929 search_key.type = BTRFS_INODE_REF_KEY; 5930 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 5931 if (ret < 0) 5932 return ret; 5933 5934 leaf = path->nodes[0]; 5935 slot = path->slots[0]; 5936 if (slot >= btrfs_header_nritems(leaf)) { 5937 ret = btrfs_next_leaf(root, path); 5938 if (ret < 0) 5939 return ret; 5940 else if (ret > 0) 5941 return -ENOENT; 5942 leaf = path->nodes[0]; 5943 slot = path->slots[0]; 5944 } 5945 5946 btrfs_item_key_to_cpu(leaf, &found_key, slot); 5947 if (found_key.objectid != search_key.objectid || 5948 found_key.type != BTRFS_INODE_REF_KEY) 5949 return -ENOENT; 5950 } 5951 return 0; 5952 } 5953 5954 static int log_new_ancestors_fast(struct btrfs_trans_handle *trans, 5955 struct btrfs_inode *inode, 5956 struct dentry *parent, 5957 struct btrfs_log_ctx *ctx) 5958 { 5959 struct btrfs_root *root = inode->root; 5960 struct dentry *old_parent = NULL; 5961 struct super_block *sb = inode->vfs_inode.i_sb; 5962 int ret = 0; 5963 5964 while (true) { 5965 if (!parent || d_really_is_negative(parent) || 5966 sb != parent->d_sb) 5967 break; 5968 5969 inode = BTRFS_I(d_inode(parent)); 5970 if (root != inode->root) 5971 break; 5972 5973 if (inode->generation >= trans->transid && 5974 need_log_inode(trans, inode)) { 5975 ret = btrfs_log_inode(trans, root, inode, 5976 LOG_INODE_EXISTS, ctx); 5977 if (ret) 5978 break; 5979 } 5980 if (IS_ROOT(parent)) 5981 break; 5982 5983 parent = dget_parent(parent); 5984 dput(old_parent); 5985 old_parent = parent; 5986 } 5987 dput(old_parent); 5988 5989 return ret; 5990 } 5991 5992 static int log_all_new_ancestors(struct btrfs_trans_handle *trans, 5993 struct btrfs_inode *inode, 5994 struct dentry *parent, 5995 struct btrfs_log_ctx *ctx) 5996 { 5997 struct btrfs_root *root = inode->root; 5998 const u64 ino = btrfs_ino(inode); 5999 struct btrfs_path *path; 6000 struct btrfs_key search_key; 6001 int ret; 6002 6003 /* 6004 * For a single hard link case, go through a fast path that does not 6005 * need to iterate the fs/subvolume tree. 6006 */ 6007 if (inode->vfs_inode.i_nlink < 2) 6008 return log_new_ancestors_fast(trans, inode, parent, ctx); 6009 6010 path = btrfs_alloc_path(); 6011 if (!path) 6012 return -ENOMEM; 6013 6014 search_key.objectid = ino; 6015 search_key.type = BTRFS_INODE_REF_KEY; 6016 search_key.offset = 0; 6017 again: 6018 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 6019 if (ret < 0) 6020 goto out; 6021 if (ret == 0) 6022 path->slots[0]++; 6023 6024 while (true) { 6025 struct extent_buffer *leaf = path->nodes[0]; 6026 int slot = path->slots[0]; 6027 struct btrfs_key found_key; 6028 6029 if (slot >= btrfs_header_nritems(leaf)) { 6030 ret = btrfs_next_leaf(root, path); 6031 if (ret < 0) 6032 goto out; 6033 else if (ret > 0) 6034 break; 6035 continue; 6036 } 6037 6038 btrfs_item_key_to_cpu(leaf, &found_key, slot); 6039 if (found_key.objectid != ino || 6040 found_key.type > BTRFS_INODE_EXTREF_KEY) 6041 break; 6042 6043 /* 6044 * Don't deal with extended references because they are rare 6045 * cases and too complex to deal with (we would need to keep 6046 * track of which subitem we are processing for each item in 6047 * this loop, etc). So just return some error to fallback to 6048 * a transaction commit. 6049 */ 6050 if (found_key.type == BTRFS_INODE_EXTREF_KEY) { 6051 ret = -EMLINK; 6052 goto out; 6053 } 6054 6055 /* 6056 * Logging ancestors needs to do more searches on the fs/subvol 6057 * tree, so it releases the path as needed to avoid deadlocks. 6058 * Keep track of the last inode ref key and resume from that key 6059 * after logging all new ancestors for the current hard link. 6060 */ 6061 memcpy(&search_key, &found_key, sizeof(search_key)); 6062 6063 ret = log_new_ancestors(trans, root, path, ctx); 6064 if (ret) 6065 goto out; 6066 btrfs_release_path(path); 6067 goto again; 6068 } 6069 ret = 0; 6070 out: 6071 btrfs_free_path(path); 6072 return ret; 6073 } 6074 6075 /* 6076 * helper function around btrfs_log_inode to make sure newly created 6077 * parent directories also end up in the log. A minimal inode and backref 6078 * only logging is done of any parent directories that are older than 6079 * the last committed transaction 6080 */ 6081 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 6082 struct btrfs_inode *inode, 6083 struct dentry *parent, 6084 int inode_only, 6085 struct btrfs_log_ctx *ctx) 6086 { 6087 struct btrfs_root *root = inode->root; 6088 struct btrfs_fs_info *fs_info = root->fs_info; 6089 int ret = 0; 6090 bool log_dentries = false; 6091 6092 if (btrfs_test_opt(fs_info, NOTREELOG)) { 6093 ret = 1; 6094 goto end_no_trans; 6095 } 6096 6097 if (btrfs_root_refs(&root->root_item) == 0) { 6098 ret = 1; 6099 goto end_no_trans; 6100 } 6101 6102 /* 6103 * Skip already logged inodes or inodes corresponding to tmpfiles 6104 * (since logging them is pointless, a link count of 0 means they 6105 * will never be accessible). 6106 */ 6107 if ((btrfs_inode_in_log(inode, trans->transid) && 6108 list_empty(&ctx->ordered_extents)) || 6109 inode->vfs_inode.i_nlink == 0) { 6110 ret = BTRFS_NO_LOG_SYNC; 6111 goto end_no_trans; 6112 } 6113 6114 ret = start_log_trans(trans, root, ctx); 6115 if (ret) 6116 goto end_no_trans; 6117 6118 ret = btrfs_log_inode(trans, root, inode, inode_only, ctx); 6119 if (ret) 6120 goto end_trans; 6121 6122 /* 6123 * for regular files, if its inode is already on disk, we don't 6124 * have to worry about the parents at all. This is because 6125 * we can use the last_unlink_trans field to record renames 6126 * and other fun in this file. 6127 */ 6128 if (S_ISREG(inode->vfs_inode.i_mode) && 6129 inode->generation < trans->transid && 6130 inode->last_unlink_trans < trans->transid) { 6131 ret = 0; 6132 goto end_trans; 6133 } 6134 6135 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries) 6136 log_dentries = true; 6137 6138 /* 6139 * On unlink we must make sure all our current and old parent directory 6140 * inodes are fully logged. This is to prevent leaving dangling 6141 * directory index entries in directories that were our parents but are 6142 * not anymore. Not doing this results in old parent directory being 6143 * impossible to delete after log replay (rmdir will always fail with 6144 * error -ENOTEMPTY). 6145 * 6146 * Example 1: 6147 * 6148 * mkdir testdir 6149 * touch testdir/foo 6150 * ln testdir/foo testdir/bar 6151 * sync 6152 * unlink testdir/bar 6153 * xfs_io -c fsync testdir/foo 6154 * <power failure> 6155 * mount fs, triggers log replay 6156 * 6157 * If we don't log the parent directory (testdir), after log replay the 6158 * directory still has an entry pointing to the file inode using the bar 6159 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and 6160 * the file inode has a link count of 1. 6161 * 6162 * Example 2: 6163 * 6164 * mkdir testdir 6165 * touch foo 6166 * ln foo testdir/foo2 6167 * ln foo testdir/foo3 6168 * sync 6169 * unlink testdir/foo3 6170 * xfs_io -c fsync foo 6171 * <power failure> 6172 * mount fs, triggers log replay 6173 * 6174 * Similar as the first example, after log replay the parent directory 6175 * testdir still has an entry pointing to the inode file with name foo3 6176 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item 6177 * and has a link count of 2. 6178 */ 6179 if (inode->last_unlink_trans >= trans->transid) { 6180 ret = btrfs_log_all_parents(trans, inode, ctx); 6181 if (ret) 6182 goto end_trans; 6183 } 6184 6185 ret = log_all_new_ancestors(trans, inode, parent, ctx); 6186 if (ret) 6187 goto end_trans; 6188 6189 if (log_dentries) 6190 ret = log_new_dir_dentries(trans, root, inode, ctx); 6191 else 6192 ret = 0; 6193 end_trans: 6194 if (ret < 0) { 6195 btrfs_set_log_full_commit(trans); 6196 ret = 1; 6197 } 6198 6199 if (ret) 6200 btrfs_remove_log_ctx(root, ctx); 6201 btrfs_end_log_trans(root); 6202 end_no_trans: 6203 return ret; 6204 } 6205 6206 /* 6207 * it is not safe to log dentry if the chunk root has added new 6208 * chunks. This returns 0 if the dentry was logged, and 1 otherwise. 6209 * If this returns 1, you must commit the transaction to safely get your 6210 * data on disk. 6211 */ 6212 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 6213 struct dentry *dentry, 6214 struct btrfs_log_ctx *ctx) 6215 { 6216 struct dentry *parent = dget_parent(dentry); 6217 int ret; 6218 6219 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent, 6220 LOG_INODE_ALL, ctx); 6221 dput(parent); 6222 6223 return ret; 6224 } 6225 6226 /* 6227 * should be called during mount to recover any replay any log trees 6228 * from the FS 6229 */ 6230 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) 6231 { 6232 int ret; 6233 struct btrfs_path *path; 6234 struct btrfs_trans_handle *trans; 6235 struct btrfs_key key; 6236 struct btrfs_key found_key; 6237 struct btrfs_root *log; 6238 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 6239 struct walk_control wc = { 6240 .process_func = process_one_buffer, 6241 .stage = LOG_WALK_PIN_ONLY, 6242 }; 6243 6244 path = btrfs_alloc_path(); 6245 if (!path) 6246 return -ENOMEM; 6247 6248 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 6249 6250 trans = btrfs_start_transaction(fs_info->tree_root, 0); 6251 if (IS_ERR(trans)) { 6252 ret = PTR_ERR(trans); 6253 goto error; 6254 } 6255 6256 wc.trans = trans; 6257 wc.pin = 1; 6258 6259 ret = walk_log_tree(trans, log_root_tree, &wc); 6260 if (ret) { 6261 btrfs_handle_fs_error(fs_info, ret, 6262 "Failed to pin buffers while recovering log root tree."); 6263 goto error; 6264 } 6265 6266 again: 6267 key.objectid = BTRFS_TREE_LOG_OBJECTID; 6268 key.offset = (u64)-1; 6269 key.type = BTRFS_ROOT_ITEM_KEY; 6270 6271 while (1) { 6272 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 6273 6274 if (ret < 0) { 6275 btrfs_handle_fs_error(fs_info, ret, 6276 "Couldn't find tree log root."); 6277 goto error; 6278 } 6279 if (ret > 0) { 6280 if (path->slots[0] == 0) 6281 break; 6282 path->slots[0]--; 6283 } 6284 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 6285 path->slots[0]); 6286 btrfs_release_path(path); 6287 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 6288 break; 6289 6290 log = btrfs_read_tree_root(log_root_tree, &found_key); 6291 if (IS_ERR(log)) { 6292 ret = PTR_ERR(log); 6293 btrfs_handle_fs_error(fs_info, ret, 6294 "Couldn't read tree log root."); 6295 goto error; 6296 } 6297 6298 wc.replay_dest = btrfs_get_fs_root(fs_info, found_key.offset, 6299 true); 6300 if (IS_ERR(wc.replay_dest)) { 6301 ret = PTR_ERR(wc.replay_dest); 6302 6303 /* 6304 * We didn't find the subvol, likely because it was 6305 * deleted. This is ok, simply skip this log and go to 6306 * the next one. 6307 * 6308 * We need to exclude the root because we can't have 6309 * other log replays overwriting this log as we'll read 6310 * it back in a few more times. This will keep our 6311 * block from being modified, and we'll just bail for 6312 * each subsequent pass. 6313 */ 6314 if (ret == -ENOENT) 6315 ret = btrfs_pin_extent_for_log_replay(trans, 6316 log->node->start, 6317 log->node->len); 6318 btrfs_put_root(log); 6319 6320 if (!ret) 6321 goto next; 6322 btrfs_handle_fs_error(fs_info, ret, 6323 "Couldn't read target root for tree log recovery."); 6324 goto error; 6325 } 6326 6327 wc.replay_dest->log_root = log; 6328 ret = btrfs_record_root_in_trans(trans, wc.replay_dest); 6329 if (ret) 6330 /* The loop needs to continue due to the root refs */ 6331 btrfs_handle_fs_error(fs_info, ret, 6332 "failed to record the log root in transaction"); 6333 else 6334 ret = walk_log_tree(trans, log, &wc); 6335 6336 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 6337 ret = fixup_inode_link_counts(trans, wc.replay_dest, 6338 path); 6339 } 6340 6341 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 6342 struct btrfs_root *root = wc.replay_dest; 6343 6344 btrfs_release_path(path); 6345 6346 /* 6347 * We have just replayed everything, and the highest 6348 * objectid of fs roots probably has changed in case 6349 * some inode_item's got replayed. 6350 * 6351 * root->objectid_mutex is not acquired as log replay 6352 * could only happen during mount. 6353 */ 6354 ret = btrfs_init_root_free_objectid(root); 6355 } 6356 6357 wc.replay_dest->log_root = NULL; 6358 btrfs_put_root(wc.replay_dest); 6359 btrfs_put_root(log); 6360 6361 if (ret) 6362 goto error; 6363 next: 6364 if (found_key.offset == 0) 6365 break; 6366 key.offset = found_key.offset - 1; 6367 } 6368 btrfs_release_path(path); 6369 6370 /* step one is to pin it all, step two is to replay just inodes */ 6371 if (wc.pin) { 6372 wc.pin = 0; 6373 wc.process_func = replay_one_buffer; 6374 wc.stage = LOG_WALK_REPLAY_INODES; 6375 goto again; 6376 } 6377 /* step three is to replay everything */ 6378 if (wc.stage < LOG_WALK_REPLAY_ALL) { 6379 wc.stage++; 6380 goto again; 6381 } 6382 6383 btrfs_free_path(path); 6384 6385 /* step 4: commit the transaction, which also unpins the blocks */ 6386 ret = btrfs_commit_transaction(trans); 6387 if (ret) 6388 return ret; 6389 6390 log_root_tree->log_root = NULL; 6391 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 6392 btrfs_put_root(log_root_tree); 6393 6394 return 0; 6395 error: 6396 if (wc.trans) 6397 btrfs_end_transaction(wc.trans); 6398 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 6399 btrfs_free_path(path); 6400 return ret; 6401 } 6402 6403 /* 6404 * there are some corner cases where we want to force a full 6405 * commit instead of allowing a directory to be logged. 6406 * 6407 * They revolve around files there were unlinked from the directory, and 6408 * this function updates the parent directory so that a full commit is 6409 * properly done if it is fsync'd later after the unlinks are done. 6410 * 6411 * Must be called before the unlink operations (updates to the subvolume tree, 6412 * inodes, etc) are done. 6413 */ 6414 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 6415 struct btrfs_inode *dir, struct btrfs_inode *inode, 6416 int for_rename) 6417 { 6418 /* 6419 * when we're logging a file, if it hasn't been renamed 6420 * or unlinked, and its inode is fully committed on disk, 6421 * we don't have to worry about walking up the directory chain 6422 * to log its parents. 6423 * 6424 * So, we use the last_unlink_trans field to put this transid 6425 * into the file. When the file is logged we check it and 6426 * don't log the parents if the file is fully on disk. 6427 */ 6428 mutex_lock(&inode->log_mutex); 6429 inode->last_unlink_trans = trans->transid; 6430 mutex_unlock(&inode->log_mutex); 6431 6432 /* 6433 * if this directory was already logged any new 6434 * names for this file/dir will get recorded 6435 */ 6436 if (dir->logged_trans == trans->transid) 6437 return; 6438 6439 /* 6440 * if the inode we're about to unlink was logged, 6441 * the log will be properly updated for any new names 6442 */ 6443 if (inode->logged_trans == trans->transid) 6444 return; 6445 6446 /* 6447 * when renaming files across directories, if the directory 6448 * there we're unlinking from gets fsync'd later on, there's 6449 * no way to find the destination directory later and fsync it 6450 * properly. So, we have to be conservative and force commits 6451 * so the new name gets discovered. 6452 */ 6453 if (for_rename) 6454 goto record; 6455 6456 /* we can safely do the unlink without any special recording */ 6457 return; 6458 6459 record: 6460 mutex_lock(&dir->log_mutex); 6461 dir->last_unlink_trans = trans->transid; 6462 mutex_unlock(&dir->log_mutex); 6463 } 6464 6465 /* 6466 * Make sure that if someone attempts to fsync the parent directory of a deleted 6467 * snapshot, it ends up triggering a transaction commit. This is to guarantee 6468 * that after replaying the log tree of the parent directory's root we will not 6469 * see the snapshot anymore and at log replay time we will not see any log tree 6470 * corresponding to the deleted snapshot's root, which could lead to replaying 6471 * it after replaying the log tree of the parent directory (which would replay 6472 * the snapshot delete operation). 6473 * 6474 * Must be called before the actual snapshot destroy operation (updates to the 6475 * parent root and tree of tree roots trees, etc) are done. 6476 */ 6477 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, 6478 struct btrfs_inode *dir) 6479 { 6480 mutex_lock(&dir->log_mutex); 6481 dir->last_unlink_trans = trans->transid; 6482 mutex_unlock(&dir->log_mutex); 6483 } 6484 6485 /* 6486 * Call this after adding a new name for a file and it will properly 6487 * update the log to reflect the new name. 6488 */ 6489 void btrfs_log_new_name(struct btrfs_trans_handle *trans, 6490 struct btrfs_inode *inode, struct btrfs_inode *old_dir, 6491 struct dentry *parent) 6492 { 6493 struct btrfs_log_ctx ctx; 6494 6495 /* 6496 * this will force the logging code to walk the dentry chain 6497 * up for the file 6498 */ 6499 if (!S_ISDIR(inode->vfs_inode.i_mode)) 6500 inode->last_unlink_trans = trans->transid; 6501 6502 /* 6503 * if this inode hasn't been logged and directory we're renaming it 6504 * from hasn't been logged, we don't need to log it 6505 */ 6506 if (inode->logged_trans < trans->transid && 6507 (!old_dir || old_dir->logged_trans < trans->transid)) 6508 return; 6509 6510 /* 6511 * If we are doing a rename (old_dir is not NULL) from a directory that 6512 * was previously logged, make sure the next log attempt on the directory 6513 * is not skipped and logs the inode again. This is because the log may 6514 * not currently be authoritative for a range including the old 6515 * BTRFS_DIR_ITEM_KEY and BTRFS_DIR_INDEX_KEY keys, so we want to make 6516 * sure after a log replay we do not end up with both the new and old 6517 * dentries around (in case the inode is a directory we would have a 6518 * directory with two hard links and 2 inode references for different 6519 * parents). The next log attempt of old_dir will happen at 6520 * btrfs_log_all_parents(), called through btrfs_log_inode_parent() 6521 * below, because we have previously set inode->last_unlink_trans to the 6522 * current transaction ID, either here or at btrfs_record_unlink_dir() in 6523 * case inode is a directory. 6524 */ 6525 if (old_dir) 6526 old_dir->logged_trans = 0; 6527 6528 btrfs_init_log_ctx(&ctx, &inode->vfs_inode); 6529 ctx.logging_new_name = true; 6530 /* 6531 * We don't care about the return value. If we fail to log the new name 6532 * then we know the next attempt to sync the log will fallback to a full 6533 * transaction commit (due to a call to btrfs_set_log_full_commit()), so 6534 * we don't need to worry about getting a log committed that has an 6535 * inconsistent state after a rename operation. 6536 */ 6537 btrfs_log_inode_parent(trans, inode, parent, LOG_INODE_EXISTS, &ctx); 6538 } 6539 6540