1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2008 Oracle. All rights reserved. 4 */ 5 6 #include <linux/sched.h> 7 #include <linux/slab.h> 8 #include <linux/blkdev.h> 9 #include <linux/list_sort.h> 10 #include <linux/iversion.h> 11 #include "ctree.h" 12 #include "tree-log.h" 13 #include "disk-io.h" 14 #include "locking.h" 15 #include "print-tree.h" 16 #include "backref.h" 17 #include "compression.h" 18 #include "qgroup.h" 19 #include "inode-map.h" 20 21 /* magic values for the inode_only field in btrfs_log_inode: 22 * 23 * LOG_INODE_ALL means to log everything 24 * LOG_INODE_EXISTS means to log just enough to recreate the inode 25 * during log replay 26 */ 27 #define LOG_INODE_ALL 0 28 #define LOG_INODE_EXISTS 1 29 #define LOG_OTHER_INODE 2 30 #define LOG_OTHER_INODE_ALL 3 31 32 /* 33 * directory trouble cases 34 * 35 * 1) on rename or unlink, if the inode being unlinked isn't in the fsync 36 * log, we must force a full commit before doing an fsync of the directory 37 * where the unlink was done. 38 * ---> record transid of last unlink/rename per directory 39 * 40 * mkdir foo/some_dir 41 * normal commit 42 * rename foo/some_dir foo2/some_dir 43 * mkdir foo/some_dir 44 * fsync foo/some_dir/some_file 45 * 46 * The fsync above will unlink the original some_dir without recording 47 * it in its new location (foo2). After a crash, some_dir will be gone 48 * unless the fsync of some_file forces a full commit 49 * 50 * 2) we must log any new names for any file or dir that is in the fsync 51 * log. ---> check inode while renaming/linking. 52 * 53 * 2a) we must log any new names for any file or dir during rename 54 * when the directory they are being removed from was logged. 55 * ---> check inode and old parent dir during rename 56 * 57 * 2a is actually the more important variant. With the extra logging 58 * a crash might unlink the old name without recreating the new one 59 * 60 * 3) after a crash, we must go through any directories with a link count 61 * of zero and redo the rm -rf 62 * 63 * mkdir f1/foo 64 * normal commit 65 * rm -rf f1/foo 66 * fsync(f1) 67 * 68 * The directory f1 was fully removed from the FS, but fsync was never 69 * called on f1, only its parent dir. After a crash the rm -rf must 70 * be replayed. This must be able to recurse down the entire 71 * directory tree. The inode link count fixup code takes care of the 72 * ugly details. 73 */ 74 75 /* 76 * stages for the tree walking. The first 77 * stage (0) is to only pin down the blocks we find 78 * the second stage (1) is to make sure that all the inodes 79 * we find in the log are created in the subvolume. 80 * 81 * The last stage is to deal with directories and links and extents 82 * and all the other fun semantics 83 */ 84 #define LOG_WALK_PIN_ONLY 0 85 #define LOG_WALK_REPLAY_INODES 1 86 #define LOG_WALK_REPLAY_DIR_INDEX 2 87 #define LOG_WALK_REPLAY_ALL 3 88 89 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 90 struct btrfs_root *root, struct btrfs_inode *inode, 91 int inode_only, 92 const loff_t start, 93 const loff_t end, 94 struct btrfs_log_ctx *ctx); 95 static int link_to_fixup_dir(struct btrfs_trans_handle *trans, 96 struct btrfs_root *root, 97 struct btrfs_path *path, u64 objectid); 98 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 99 struct btrfs_root *root, 100 struct btrfs_root *log, 101 struct btrfs_path *path, 102 u64 dirid, int del_all); 103 104 /* 105 * tree logging is a special write ahead log used to make sure that 106 * fsyncs and O_SYNCs can happen without doing full tree commits. 107 * 108 * Full tree commits are expensive because they require commonly 109 * modified blocks to be recowed, creating many dirty pages in the 110 * extent tree an 4x-6x higher write load than ext3. 111 * 112 * Instead of doing a tree commit on every fsync, we use the 113 * key ranges and transaction ids to find items for a given file or directory 114 * that have changed in this transaction. Those items are copied into 115 * a special tree (one per subvolume root), that tree is written to disk 116 * and then the fsync is considered complete. 117 * 118 * After a crash, items are copied out of the log-tree back into the 119 * subvolume tree. Any file data extents found are recorded in the extent 120 * allocation tree, and the log-tree freed. 121 * 122 * The log tree is read three times, once to pin down all the extents it is 123 * using in ram and once, once to create all the inodes logged in the tree 124 * and once to do all the other items. 125 */ 126 127 /* 128 * start a sub transaction and setup the log tree 129 * this increments the log tree writer count to make the people 130 * syncing the tree wait for us to finish 131 */ 132 static int start_log_trans(struct btrfs_trans_handle *trans, 133 struct btrfs_root *root, 134 struct btrfs_log_ctx *ctx) 135 { 136 struct btrfs_fs_info *fs_info = root->fs_info; 137 int ret = 0; 138 139 mutex_lock(&root->log_mutex); 140 141 if (root->log_root) { 142 if (btrfs_need_log_full_commit(fs_info, trans)) { 143 ret = -EAGAIN; 144 goto out; 145 } 146 147 if (!root->log_start_pid) { 148 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 149 root->log_start_pid = current->pid; 150 } else if (root->log_start_pid != current->pid) { 151 set_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 152 } 153 } else { 154 mutex_lock(&fs_info->tree_log_mutex); 155 if (!fs_info->log_root_tree) 156 ret = btrfs_init_log_root_tree(trans, fs_info); 157 mutex_unlock(&fs_info->tree_log_mutex); 158 if (ret) 159 goto out; 160 161 ret = btrfs_add_log_tree(trans, root); 162 if (ret) 163 goto out; 164 165 clear_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state); 166 root->log_start_pid = current->pid; 167 } 168 169 atomic_inc(&root->log_batch); 170 atomic_inc(&root->log_writers); 171 if (ctx) { 172 int index = root->log_transid % 2; 173 list_add_tail(&ctx->list, &root->log_ctxs[index]); 174 ctx->log_transid = root->log_transid; 175 } 176 177 out: 178 mutex_unlock(&root->log_mutex); 179 return ret; 180 } 181 182 /* 183 * returns 0 if there was a log transaction running and we were able 184 * to join, or returns -ENOENT if there were not transactions 185 * in progress 186 */ 187 static int join_running_log_trans(struct btrfs_root *root) 188 { 189 int ret = -ENOENT; 190 191 smp_mb(); 192 if (!root->log_root) 193 return -ENOENT; 194 195 mutex_lock(&root->log_mutex); 196 if (root->log_root) { 197 ret = 0; 198 atomic_inc(&root->log_writers); 199 } 200 mutex_unlock(&root->log_mutex); 201 return ret; 202 } 203 204 /* 205 * This either makes the current running log transaction wait 206 * until you call btrfs_end_log_trans() or it makes any future 207 * log transactions wait until you call btrfs_end_log_trans() 208 */ 209 void btrfs_pin_log_trans(struct btrfs_root *root) 210 { 211 mutex_lock(&root->log_mutex); 212 atomic_inc(&root->log_writers); 213 mutex_unlock(&root->log_mutex); 214 } 215 216 /* 217 * indicate we're done making changes to the log tree 218 * and wake up anyone waiting to do a sync 219 */ 220 void btrfs_end_log_trans(struct btrfs_root *root) 221 { 222 if (atomic_dec_and_test(&root->log_writers)) { 223 /* atomic_dec_and_test implies a barrier */ 224 cond_wake_up_nomb(&root->log_writer_wait); 225 } 226 } 227 228 229 /* 230 * the walk control struct is used to pass state down the chain when 231 * processing the log tree. The stage field tells us which part 232 * of the log tree processing we are currently doing. The others 233 * are state fields used for that specific part 234 */ 235 struct walk_control { 236 /* should we free the extent on disk when done? This is used 237 * at transaction commit time while freeing a log tree 238 */ 239 int free; 240 241 /* should we write out the extent buffer? This is used 242 * while flushing the log tree to disk during a sync 243 */ 244 int write; 245 246 /* should we wait for the extent buffer io to finish? Also used 247 * while flushing the log tree to disk for a sync 248 */ 249 int wait; 250 251 /* pin only walk, we record which extents on disk belong to the 252 * log trees 253 */ 254 int pin; 255 256 /* what stage of the replay code we're currently in */ 257 int stage; 258 259 /* 260 * Ignore any items from the inode currently being processed. Needs 261 * to be set every time we find a BTRFS_INODE_ITEM_KEY and we are in 262 * the LOG_WALK_REPLAY_INODES stage. 263 */ 264 bool ignore_cur_inode; 265 266 /* the root we are currently replaying */ 267 struct btrfs_root *replay_dest; 268 269 /* the trans handle for the current replay */ 270 struct btrfs_trans_handle *trans; 271 272 /* the function that gets used to process blocks we find in the 273 * tree. Note the extent_buffer might not be up to date when it is 274 * passed in, and it must be checked or read if you need the data 275 * inside it 276 */ 277 int (*process_func)(struct btrfs_root *log, struct extent_buffer *eb, 278 struct walk_control *wc, u64 gen, int level); 279 }; 280 281 /* 282 * process_func used to pin down extents, write them or wait on them 283 */ 284 static int process_one_buffer(struct btrfs_root *log, 285 struct extent_buffer *eb, 286 struct walk_control *wc, u64 gen, int level) 287 { 288 struct btrfs_fs_info *fs_info = log->fs_info; 289 int ret = 0; 290 291 /* 292 * If this fs is mixed then we need to be able to process the leaves to 293 * pin down any logged extents, so we have to read the block. 294 */ 295 if (btrfs_fs_incompat(fs_info, MIXED_GROUPS)) { 296 ret = btrfs_read_buffer(eb, gen, level, NULL); 297 if (ret) 298 return ret; 299 } 300 301 if (wc->pin) 302 ret = btrfs_pin_extent_for_log_replay(fs_info, eb->start, 303 eb->len); 304 305 if (!ret && btrfs_buffer_uptodate(eb, gen, 0)) { 306 if (wc->pin && btrfs_header_level(eb) == 0) 307 ret = btrfs_exclude_logged_extents(fs_info, eb); 308 if (wc->write) 309 btrfs_write_tree_block(eb); 310 if (wc->wait) 311 btrfs_wait_tree_block_writeback(eb); 312 } 313 return ret; 314 } 315 316 /* 317 * Item overwrite used by replay and tree logging. eb, slot and key all refer 318 * to the src data we are copying out. 319 * 320 * root is the tree we are copying into, and path is a scratch 321 * path for use in this function (it should be released on entry and 322 * will be released on exit). 323 * 324 * If the key is already in the destination tree the existing item is 325 * overwritten. If the existing item isn't big enough, it is extended. 326 * If it is too large, it is truncated. 327 * 328 * If the key isn't in the destination yet, a new item is inserted. 329 */ 330 static noinline int overwrite_item(struct btrfs_trans_handle *trans, 331 struct btrfs_root *root, 332 struct btrfs_path *path, 333 struct extent_buffer *eb, int slot, 334 struct btrfs_key *key) 335 { 336 struct btrfs_fs_info *fs_info = root->fs_info; 337 int ret; 338 u32 item_size; 339 u64 saved_i_size = 0; 340 int save_old_i_size = 0; 341 unsigned long src_ptr; 342 unsigned long dst_ptr; 343 int overwrite_root = 0; 344 bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; 345 346 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) 347 overwrite_root = 1; 348 349 item_size = btrfs_item_size_nr(eb, slot); 350 src_ptr = btrfs_item_ptr_offset(eb, slot); 351 352 /* look for the key in the destination tree */ 353 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 354 if (ret < 0) 355 return ret; 356 357 if (ret == 0) { 358 char *src_copy; 359 char *dst_copy; 360 u32 dst_size = btrfs_item_size_nr(path->nodes[0], 361 path->slots[0]); 362 if (dst_size != item_size) 363 goto insert; 364 365 if (item_size == 0) { 366 btrfs_release_path(path); 367 return 0; 368 } 369 dst_copy = kmalloc(item_size, GFP_NOFS); 370 src_copy = kmalloc(item_size, GFP_NOFS); 371 if (!dst_copy || !src_copy) { 372 btrfs_release_path(path); 373 kfree(dst_copy); 374 kfree(src_copy); 375 return -ENOMEM; 376 } 377 378 read_extent_buffer(eb, src_copy, src_ptr, item_size); 379 380 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 381 read_extent_buffer(path->nodes[0], dst_copy, dst_ptr, 382 item_size); 383 ret = memcmp(dst_copy, src_copy, item_size); 384 385 kfree(dst_copy); 386 kfree(src_copy); 387 /* 388 * they have the same contents, just return, this saves 389 * us from cowing blocks in the destination tree and doing 390 * extra writes that may not have been done by a previous 391 * sync 392 */ 393 if (ret == 0) { 394 btrfs_release_path(path); 395 return 0; 396 } 397 398 /* 399 * We need to load the old nbytes into the inode so when we 400 * replay the extents we've logged we get the right nbytes. 401 */ 402 if (inode_item) { 403 struct btrfs_inode_item *item; 404 u64 nbytes; 405 u32 mode; 406 407 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 408 struct btrfs_inode_item); 409 nbytes = btrfs_inode_nbytes(path->nodes[0], item); 410 item = btrfs_item_ptr(eb, slot, 411 struct btrfs_inode_item); 412 btrfs_set_inode_nbytes(eb, item, nbytes); 413 414 /* 415 * If this is a directory we need to reset the i_size to 416 * 0 so that we can set it up properly when replaying 417 * the rest of the items in this log. 418 */ 419 mode = btrfs_inode_mode(eb, item); 420 if (S_ISDIR(mode)) 421 btrfs_set_inode_size(eb, item, 0); 422 } 423 } else if (inode_item) { 424 struct btrfs_inode_item *item; 425 u32 mode; 426 427 /* 428 * New inode, set nbytes to 0 so that the nbytes comes out 429 * properly when we replay the extents. 430 */ 431 item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); 432 btrfs_set_inode_nbytes(eb, item, 0); 433 434 /* 435 * If this is a directory we need to reset the i_size to 0 so 436 * that we can set it up properly when replaying the rest of 437 * the items in this log. 438 */ 439 mode = btrfs_inode_mode(eb, item); 440 if (S_ISDIR(mode)) 441 btrfs_set_inode_size(eb, item, 0); 442 } 443 insert: 444 btrfs_release_path(path); 445 /* try to insert the key into the destination tree */ 446 path->skip_release_on_error = 1; 447 ret = btrfs_insert_empty_item(trans, root, path, 448 key, item_size); 449 path->skip_release_on_error = 0; 450 451 /* make sure any existing item is the correct size */ 452 if (ret == -EEXIST || ret == -EOVERFLOW) { 453 u32 found_size; 454 found_size = btrfs_item_size_nr(path->nodes[0], 455 path->slots[0]); 456 if (found_size > item_size) 457 btrfs_truncate_item(fs_info, path, item_size, 1); 458 else if (found_size < item_size) 459 btrfs_extend_item(fs_info, path, 460 item_size - found_size); 461 } else if (ret) { 462 return ret; 463 } 464 dst_ptr = btrfs_item_ptr_offset(path->nodes[0], 465 path->slots[0]); 466 467 /* don't overwrite an existing inode if the generation number 468 * was logged as zero. This is done when the tree logging code 469 * is just logging an inode to make sure it exists after recovery. 470 * 471 * Also, don't overwrite i_size on directories during replay. 472 * log replay inserts and removes directory items based on the 473 * state of the tree found in the subvolume, and i_size is modified 474 * as it goes 475 */ 476 if (key->type == BTRFS_INODE_ITEM_KEY && ret == -EEXIST) { 477 struct btrfs_inode_item *src_item; 478 struct btrfs_inode_item *dst_item; 479 480 src_item = (struct btrfs_inode_item *)src_ptr; 481 dst_item = (struct btrfs_inode_item *)dst_ptr; 482 483 if (btrfs_inode_generation(eb, src_item) == 0) { 484 struct extent_buffer *dst_eb = path->nodes[0]; 485 const u64 ino_size = btrfs_inode_size(eb, src_item); 486 487 /* 488 * For regular files an ino_size == 0 is used only when 489 * logging that an inode exists, as part of a directory 490 * fsync, and the inode wasn't fsynced before. In this 491 * case don't set the size of the inode in the fs/subvol 492 * tree, otherwise we would be throwing valid data away. 493 */ 494 if (S_ISREG(btrfs_inode_mode(eb, src_item)) && 495 S_ISREG(btrfs_inode_mode(dst_eb, dst_item)) && 496 ino_size != 0) { 497 struct btrfs_map_token token; 498 499 btrfs_init_map_token(&token); 500 btrfs_set_token_inode_size(dst_eb, dst_item, 501 ino_size, &token); 502 } 503 goto no_copy; 504 } 505 506 if (overwrite_root && 507 S_ISDIR(btrfs_inode_mode(eb, src_item)) && 508 S_ISDIR(btrfs_inode_mode(path->nodes[0], dst_item))) { 509 save_old_i_size = 1; 510 saved_i_size = btrfs_inode_size(path->nodes[0], 511 dst_item); 512 } 513 } 514 515 copy_extent_buffer(path->nodes[0], eb, dst_ptr, 516 src_ptr, item_size); 517 518 if (save_old_i_size) { 519 struct btrfs_inode_item *dst_item; 520 dst_item = (struct btrfs_inode_item *)dst_ptr; 521 btrfs_set_inode_size(path->nodes[0], dst_item, saved_i_size); 522 } 523 524 /* make sure the generation is filled in */ 525 if (key->type == BTRFS_INODE_ITEM_KEY) { 526 struct btrfs_inode_item *dst_item; 527 dst_item = (struct btrfs_inode_item *)dst_ptr; 528 if (btrfs_inode_generation(path->nodes[0], dst_item) == 0) { 529 btrfs_set_inode_generation(path->nodes[0], dst_item, 530 trans->transid); 531 } 532 } 533 no_copy: 534 btrfs_mark_buffer_dirty(path->nodes[0]); 535 btrfs_release_path(path); 536 return 0; 537 } 538 539 /* 540 * simple helper to read an inode off the disk from a given root 541 * This can only be called for subvolume roots and not for the log 542 */ 543 static noinline struct inode *read_one_inode(struct btrfs_root *root, 544 u64 objectid) 545 { 546 struct btrfs_key key; 547 struct inode *inode; 548 549 key.objectid = objectid; 550 key.type = BTRFS_INODE_ITEM_KEY; 551 key.offset = 0; 552 inode = btrfs_iget(root->fs_info->sb, &key, root, NULL); 553 if (IS_ERR(inode)) 554 inode = NULL; 555 return inode; 556 } 557 558 /* replays a single extent in 'eb' at 'slot' with 'key' into the 559 * subvolume 'root'. path is released on entry and should be released 560 * on exit. 561 * 562 * extents in the log tree have not been allocated out of the extent 563 * tree yet. So, this completes the allocation, taking a reference 564 * as required if the extent already exists or creating a new extent 565 * if it isn't in the extent allocation tree yet. 566 * 567 * The extent is inserted into the file, dropping any existing extents 568 * from the file that overlap the new one. 569 */ 570 static noinline int replay_one_extent(struct btrfs_trans_handle *trans, 571 struct btrfs_root *root, 572 struct btrfs_path *path, 573 struct extent_buffer *eb, int slot, 574 struct btrfs_key *key) 575 { 576 struct btrfs_fs_info *fs_info = root->fs_info; 577 int found_type; 578 u64 extent_end; 579 u64 start = key->offset; 580 u64 nbytes = 0; 581 struct btrfs_file_extent_item *item; 582 struct inode *inode = NULL; 583 unsigned long size; 584 int ret = 0; 585 586 item = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item); 587 found_type = btrfs_file_extent_type(eb, item); 588 589 if (found_type == BTRFS_FILE_EXTENT_REG || 590 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 591 nbytes = btrfs_file_extent_num_bytes(eb, item); 592 extent_end = start + nbytes; 593 594 /* 595 * We don't add to the inodes nbytes if we are prealloc or a 596 * hole. 597 */ 598 if (btrfs_file_extent_disk_bytenr(eb, item) == 0) 599 nbytes = 0; 600 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 601 size = btrfs_file_extent_ram_bytes(eb, item); 602 nbytes = btrfs_file_extent_ram_bytes(eb, item); 603 extent_end = ALIGN(start + size, 604 fs_info->sectorsize); 605 } else { 606 ret = 0; 607 goto out; 608 } 609 610 inode = read_one_inode(root, key->objectid); 611 if (!inode) { 612 ret = -EIO; 613 goto out; 614 } 615 616 /* 617 * first check to see if we already have this extent in the 618 * file. This must be done before the btrfs_drop_extents run 619 * so we don't try to drop this extent. 620 */ 621 ret = btrfs_lookup_file_extent(trans, root, path, 622 btrfs_ino(BTRFS_I(inode)), start, 0); 623 624 if (ret == 0 && 625 (found_type == BTRFS_FILE_EXTENT_REG || 626 found_type == BTRFS_FILE_EXTENT_PREALLOC)) { 627 struct btrfs_file_extent_item cmp1; 628 struct btrfs_file_extent_item cmp2; 629 struct btrfs_file_extent_item *existing; 630 struct extent_buffer *leaf; 631 632 leaf = path->nodes[0]; 633 existing = btrfs_item_ptr(leaf, path->slots[0], 634 struct btrfs_file_extent_item); 635 636 read_extent_buffer(eb, &cmp1, (unsigned long)item, 637 sizeof(cmp1)); 638 read_extent_buffer(leaf, &cmp2, (unsigned long)existing, 639 sizeof(cmp2)); 640 641 /* 642 * we already have a pointer to this exact extent, 643 * we don't have to do anything 644 */ 645 if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { 646 btrfs_release_path(path); 647 goto out; 648 } 649 } 650 btrfs_release_path(path); 651 652 /* drop any overlapping extents */ 653 ret = btrfs_drop_extents(trans, root, inode, start, extent_end, 1); 654 if (ret) 655 goto out; 656 657 if (found_type == BTRFS_FILE_EXTENT_REG || 658 found_type == BTRFS_FILE_EXTENT_PREALLOC) { 659 u64 offset; 660 unsigned long dest_offset; 661 struct btrfs_key ins; 662 663 if (btrfs_file_extent_disk_bytenr(eb, item) == 0 && 664 btrfs_fs_incompat(fs_info, NO_HOLES)) 665 goto update_inode; 666 667 ret = btrfs_insert_empty_item(trans, root, path, key, 668 sizeof(*item)); 669 if (ret) 670 goto out; 671 dest_offset = btrfs_item_ptr_offset(path->nodes[0], 672 path->slots[0]); 673 copy_extent_buffer(path->nodes[0], eb, dest_offset, 674 (unsigned long)item, sizeof(*item)); 675 676 ins.objectid = btrfs_file_extent_disk_bytenr(eb, item); 677 ins.offset = btrfs_file_extent_disk_num_bytes(eb, item); 678 ins.type = BTRFS_EXTENT_ITEM_KEY; 679 offset = key->offset - btrfs_file_extent_offset(eb, item); 680 681 /* 682 * Manually record dirty extent, as here we did a shallow 683 * file extent item copy and skip normal backref update, 684 * but modifying extent tree all by ourselves. 685 * So need to manually record dirty extent for qgroup, 686 * as the owner of the file extent changed from log tree 687 * (doesn't affect qgroup) to fs/file tree(affects qgroup) 688 */ 689 ret = btrfs_qgroup_trace_extent(trans, 690 btrfs_file_extent_disk_bytenr(eb, item), 691 btrfs_file_extent_disk_num_bytes(eb, item), 692 GFP_NOFS); 693 if (ret < 0) 694 goto out; 695 696 if (ins.objectid > 0) { 697 u64 csum_start; 698 u64 csum_end; 699 LIST_HEAD(ordered_sums); 700 /* 701 * is this extent already allocated in the extent 702 * allocation tree? If so, just add a reference 703 */ 704 ret = btrfs_lookup_data_extent(fs_info, ins.objectid, 705 ins.offset); 706 if (ret == 0) { 707 ret = btrfs_inc_extent_ref(trans, root, 708 ins.objectid, ins.offset, 709 0, root->root_key.objectid, 710 key->objectid, offset); 711 if (ret) 712 goto out; 713 } else { 714 /* 715 * insert the extent pointer in the extent 716 * allocation tree 717 */ 718 ret = btrfs_alloc_logged_file_extent(trans, 719 root->root_key.objectid, 720 key->objectid, offset, &ins); 721 if (ret) 722 goto out; 723 } 724 btrfs_release_path(path); 725 726 if (btrfs_file_extent_compression(eb, item)) { 727 csum_start = ins.objectid; 728 csum_end = csum_start + ins.offset; 729 } else { 730 csum_start = ins.objectid + 731 btrfs_file_extent_offset(eb, item); 732 csum_end = csum_start + 733 btrfs_file_extent_num_bytes(eb, item); 734 } 735 736 ret = btrfs_lookup_csums_range(root->log_root, 737 csum_start, csum_end - 1, 738 &ordered_sums, 0); 739 if (ret) 740 goto out; 741 /* 742 * Now delete all existing cums in the csum root that 743 * cover our range. We do this because we can have an 744 * extent that is completely referenced by one file 745 * extent item and partially referenced by another 746 * file extent item (like after using the clone or 747 * extent_same ioctls). In this case if we end up doing 748 * the replay of the one that partially references the 749 * extent first, and we do not do the csum deletion 750 * below, we can get 2 csum items in the csum tree that 751 * overlap each other. For example, imagine our log has 752 * the two following file extent items: 753 * 754 * key (257 EXTENT_DATA 409600) 755 * extent data disk byte 12845056 nr 102400 756 * extent data offset 20480 nr 20480 ram 102400 757 * 758 * key (257 EXTENT_DATA 819200) 759 * extent data disk byte 12845056 nr 102400 760 * extent data offset 0 nr 102400 ram 102400 761 * 762 * Where the second one fully references the 100K extent 763 * that starts at disk byte 12845056, and the log tree 764 * has a single csum item that covers the entire range 765 * of the extent: 766 * 767 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 768 * 769 * After the first file extent item is replayed, the 770 * csum tree gets the following csum item: 771 * 772 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 773 * 774 * Which covers the 20K sub-range starting at offset 20K 775 * of our extent. Now when we replay the second file 776 * extent item, if we do not delete existing csum items 777 * that cover any of its blocks, we end up getting two 778 * csum items in our csum tree that overlap each other: 779 * 780 * key (EXTENT_CSUM EXTENT_CSUM 12845056) itemsize 100 781 * key (EXTENT_CSUM EXTENT_CSUM 12865536) itemsize 20 782 * 783 * Which is a problem, because after this anyone trying 784 * to lookup up for the checksum of any block of our 785 * extent starting at an offset of 40K or higher, will 786 * end up looking at the second csum item only, which 787 * does not contain the checksum for any block starting 788 * at offset 40K or higher of our extent. 789 */ 790 while (!list_empty(&ordered_sums)) { 791 struct btrfs_ordered_sum *sums; 792 sums = list_entry(ordered_sums.next, 793 struct btrfs_ordered_sum, 794 list); 795 if (!ret) 796 ret = btrfs_del_csums(trans, fs_info, 797 sums->bytenr, 798 sums->len); 799 if (!ret) 800 ret = btrfs_csum_file_blocks(trans, 801 fs_info->csum_root, sums); 802 list_del(&sums->list); 803 kfree(sums); 804 } 805 if (ret) 806 goto out; 807 } else { 808 btrfs_release_path(path); 809 } 810 } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { 811 /* inline extents are easy, we just overwrite them */ 812 ret = overwrite_item(trans, root, path, eb, slot, key); 813 if (ret) 814 goto out; 815 } 816 817 inode_add_bytes(inode, nbytes); 818 update_inode: 819 ret = btrfs_update_inode(trans, root, inode); 820 out: 821 if (inode) 822 iput(inode); 823 return ret; 824 } 825 826 /* 827 * when cleaning up conflicts between the directory names in the 828 * subvolume, directory names in the log and directory names in the 829 * inode back references, we may have to unlink inodes from directories. 830 * 831 * This is a helper function to do the unlink of a specific directory 832 * item 833 */ 834 static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, 835 struct btrfs_root *root, 836 struct btrfs_path *path, 837 struct btrfs_inode *dir, 838 struct btrfs_dir_item *di) 839 { 840 struct inode *inode; 841 char *name; 842 int name_len; 843 struct extent_buffer *leaf; 844 struct btrfs_key location; 845 int ret; 846 847 leaf = path->nodes[0]; 848 849 btrfs_dir_item_key_to_cpu(leaf, di, &location); 850 name_len = btrfs_dir_name_len(leaf, di); 851 name = kmalloc(name_len, GFP_NOFS); 852 if (!name) 853 return -ENOMEM; 854 855 read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); 856 btrfs_release_path(path); 857 858 inode = read_one_inode(root, location.objectid); 859 if (!inode) { 860 ret = -EIO; 861 goto out; 862 } 863 864 ret = link_to_fixup_dir(trans, root, path, location.objectid); 865 if (ret) 866 goto out; 867 868 ret = btrfs_unlink_inode(trans, root, dir, BTRFS_I(inode), name, 869 name_len); 870 if (ret) 871 goto out; 872 else 873 ret = btrfs_run_delayed_items(trans); 874 out: 875 kfree(name); 876 iput(inode); 877 return ret; 878 } 879 880 /* 881 * helper function to see if a given name and sequence number found 882 * in an inode back reference are already in a directory and correctly 883 * point to this inode 884 */ 885 static noinline int inode_in_dir(struct btrfs_root *root, 886 struct btrfs_path *path, 887 u64 dirid, u64 objectid, u64 index, 888 const char *name, int name_len) 889 { 890 struct btrfs_dir_item *di; 891 struct btrfs_key location; 892 int match = 0; 893 894 di = btrfs_lookup_dir_index_item(NULL, root, path, dirid, 895 index, name, name_len, 0); 896 if (di && !IS_ERR(di)) { 897 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 898 if (location.objectid != objectid) 899 goto out; 900 } else 901 goto out; 902 btrfs_release_path(path); 903 904 di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); 905 if (di && !IS_ERR(di)) { 906 btrfs_dir_item_key_to_cpu(path->nodes[0], di, &location); 907 if (location.objectid != objectid) 908 goto out; 909 } else 910 goto out; 911 match = 1; 912 out: 913 btrfs_release_path(path); 914 return match; 915 } 916 917 /* 918 * helper function to check a log tree for a named back reference in 919 * an inode. This is used to decide if a back reference that is 920 * found in the subvolume conflicts with what we find in the log. 921 * 922 * inode backreferences may have multiple refs in a single item, 923 * during replay we process one reference at a time, and we don't 924 * want to delete valid links to a file from the subvolume if that 925 * link is also in the log. 926 */ 927 static noinline int backref_in_log(struct btrfs_root *log, 928 struct btrfs_key *key, 929 u64 ref_objectid, 930 const char *name, int namelen) 931 { 932 struct btrfs_path *path; 933 struct btrfs_inode_ref *ref; 934 unsigned long ptr; 935 unsigned long ptr_end; 936 unsigned long name_ptr; 937 int found_name_len; 938 int item_size; 939 int ret; 940 int match = 0; 941 942 path = btrfs_alloc_path(); 943 if (!path) 944 return -ENOMEM; 945 946 ret = btrfs_search_slot(NULL, log, key, path, 0, 0); 947 if (ret != 0) 948 goto out; 949 950 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 951 952 if (key->type == BTRFS_INODE_EXTREF_KEY) { 953 if (btrfs_find_name_in_ext_backref(path->nodes[0], 954 path->slots[0], 955 ref_objectid, 956 name, namelen, NULL)) 957 match = 1; 958 959 goto out; 960 } 961 962 item_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); 963 ptr_end = ptr + item_size; 964 while (ptr < ptr_end) { 965 ref = (struct btrfs_inode_ref *)ptr; 966 found_name_len = btrfs_inode_ref_name_len(path->nodes[0], ref); 967 if (found_name_len == namelen) { 968 name_ptr = (unsigned long)(ref + 1); 969 ret = memcmp_extent_buffer(path->nodes[0], name, 970 name_ptr, namelen); 971 if (ret == 0) { 972 match = 1; 973 goto out; 974 } 975 } 976 ptr = (unsigned long)(ref + 1) + found_name_len; 977 } 978 out: 979 btrfs_free_path(path); 980 return match; 981 } 982 983 static inline int __add_inode_ref(struct btrfs_trans_handle *trans, 984 struct btrfs_root *root, 985 struct btrfs_path *path, 986 struct btrfs_root *log_root, 987 struct btrfs_inode *dir, 988 struct btrfs_inode *inode, 989 u64 inode_objectid, u64 parent_objectid, 990 u64 ref_index, char *name, int namelen, 991 int *search_done) 992 { 993 int ret; 994 char *victim_name; 995 int victim_name_len; 996 struct extent_buffer *leaf; 997 struct btrfs_dir_item *di; 998 struct btrfs_key search_key; 999 struct btrfs_inode_extref *extref; 1000 1001 again: 1002 /* Search old style refs */ 1003 search_key.objectid = inode_objectid; 1004 search_key.type = BTRFS_INODE_REF_KEY; 1005 search_key.offset = parent_objectid; 1006 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 1007 if (ret == 0) { 1008 struct btrfs_inode_ref *victim_ref; 1009 unsigned long ptr; 1010 unsigned long ptr_end; 1011 1012 leaf = path->nodes[0]; 1013 1014 /* are we trying to overwrite a back ref for the root directory 1015 * if so, just jump out, we're done 1016 */ 1017 if (search_key.objectid == search_key.offset) 1018 return 1; 1019 1020 /* check all the names in this back reference to see 1021 * if they are in the log. if so, we allow them to stay 1022 * otherwise they must be unlinked as a conflict 1023 */ 1024 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1025 ptr_end = ptr + btrfs_item_size_nr(leaf, path->slots[0]); 1026 while (ptr < ptr_end) { 1027 victim_ref = (struct btrfs_inode_ref *)ptr; 1028 victim_name_len = btrfs_inode_ref_name_len(leaf, 1029 victim_ref); 1030 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1031 if (!victim_name) 1032 return -ENOMEM; 1033 1034 read_extent_buffer(leaf, victim_name, 1035 (unsigned long)(victim_ref + 1), 1036 victim_name_len); 1037 1038 if (!backref_in_log(log_root, &search_key, 1039 parent_objectid, 1040 victim_name, 1041 victim_name_len)) { 1042 inc_nlink(&inode->vfs_inode); 1043 btrfs_release_path(path); 1044 1045 ret = btrfs_unlink_inode(trans, root, dir, inode, 1046 victim_name, victim_name_len); 1047 kfree(victim_name); 1048 if (ret) 1049 return ret; 1050 ret = btrfs_run_delayed_items(trans); 1051 if (ret) 1052 return ret; 1053 *search_done = 1; 1054 goto again; 1055 } 1056 kfree(victim_name); 1057 1058 ptr = (unsigned long)(victim_ref + 1) + victim_name_len; 1059 } 1060 1061 /* 1062 * NOTE: we have searched root tree and checked the 1063 * corresponding ref, it does not need to check again. 1064 */ 1065 *search_done = 1; 1066 } 1067 btrfs_release_path(path); 1068 1069 /* Same search but for extended refs */ 1070 extref = btrfs_lookup_inode_extref(NULL, root, path, name, namelen, 1071 inode_objectid, parent_objectid, 0, 1072 0); 1073 if (!IS_ERR_OR_NULL(extref)) { 1074 u32 item_size; 1075 u32 cur_offset = 0; 1076 unsigned long base; 1077 struct inode *victim_parent; 1078 1079 leaf = path->nodes[0]; 1080 1081 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1082 base = btrfs_item_ptr_offset(leaf, path->slots[0]); 1083 1084 while (cur_offset < item_size) { 1085 extref = (struct btrfs_inode_extref *)(base + cur_offset); 1086 1087 victim_name_len = btrfs_inode_extref_name_len(leaf, extref); 1088 1089 if (btrfs_inode_extref_parent(leaf, extref) != parent_objectid) 1090 goto next; 1091 1092 victim_name = kmalloc(victim_name_len, GFP_NOFS); 1093 if (!victim_name) 1094 return -ENOMEM; 1095 read_extent_buffer(leaf, victim_name, (unsigned long)&extref->name, 1096 victim_name_len); 1097 1098 search_key.objectid = inode_objectid; 1099 search_key.type = BTRFS_INODE_EXTREF_KEY; 1100 search_key.offset = btrfs_extref_hash(parent_objectid, 1101 victim_name, 1102 victim_name_len); 1103 ret = 0; 1104 if (!backref_in_log(log_root, &search_key, 1105 parent_objectid, victim_name, 1106 victim_name_len)) { 1107 ret = -ENOENT; 1108 victim_parent = read_one_inode(root, 1109 parent_objectid); 1110 if (victim_parent) { 1111 inc_nlink(&inode->vfs_inode); 1112 btrfs_release_path(path); 1113 1114 ret = btrfs_unlink_inode(trans, root, 1115 BTRFS_I(victim_parent), 1116 inode, 1117 victim_name, 1118 victim_name_len); 1119 if (!ret) 1120 ret = btrfs_run_delayed_items( 1121 trans); 1122 } 1123 iput(victim_parent); 1124 kfree(victim_name); 1125 if (ret) 1126 return ret; 1127 *search_done = 1; 1128 goto again; 1129 } 1130 kfree(victim_name); 1131 next: 1132 cur_offset += victim_name_len + sizeof(*extref); 1133 } 1134 *search_done = 1; 1135 } 1136 btrfs_release_path(path); 1137 1138 /* look for a conflicting sequence number */ 1139 di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir), 1140 ref_index, name, namelen, 0); 1141 if (di && !IS_ERR(di)) { 1142 ret = drop_one_dir_item(trans, root, path, dir, di); 1143 if (ret) 1144 return ret; 1145 } 1146 btrfs_release_path(path); 1147 1148 /* look for a conflicting name */ 1149 di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir), 1150 name, namelen, 0); 1151 if (di && !IS_ERR(di)) { 1152 ret = drop_one_dir_item(trans, root, path, dir, di); 1153 if (ret) 1154 return ret; 1155 } 1156 btrfs_release_path(path); 1157 1158 return 0; 1159 } 1160 1161 static int extref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1162 u32 *namelen, char **name, u64 *index, 1163 u64 *parent_objectid) 1164 { 1165 struct btrfs_inode_extref *extref; 1166 1167 extref = (struct btrfs_inode_extref *)ref_ptr; 1168 1169 *namelen = btrfs_inode_extref_name_len(eb, extref); 1170 *name = kmalloc(*namelen, GFP_NOFS); 1171 if (*name == NULL) 1172 return -ENOMEM; 1173 1174 read_extent_buffer(eb, *name, (unsigned long)&extref->name, 1175 *namelen); 1176 1177 if (index) 1178 *index = btrfs_inode_extref_index(eb, extref); 1179 if (parent_objectid) 1180 *parent_objectid = btrfs_inode_extref_parent(eb, extref); 1181 1182 return 0; 1183 } 1184 1185 static int ref_get_fields(struct extent_buffer *eb, unsigned long ref_ptr, 1186 u32 *namelen, char **name, u64 *index) 1187 { 1188 struct btrfs_inode_ref *ref; 1189 1190 ref = (struct btrfs_inode_ref *)ref_ptr; 1191 1192 *namelen = btrfs_inode_ref_name_len(eb, ref); 1193 *name = kmalloc(*namelen, GFP_NOFS); 1194 if (*name == NULL) 1195 return -ENOMEM; 1196 1197 read_extent_buffer(eb, *name, (unsigned long)(ref + 1), *namelen); 1198 1199 if (index) 1200 *index = btrfs_inode_ref_index(eb, ref); 1201 1202 return 0; 1203 } 1204 1205 /* 1206 * Take an inode reference item from the log tree and iterate all names from the 1207 * inode reference item in the subvolume tree with the same key (if it exists). 1208 * For any name that is not in the inode reference item from the log tree, do a 1209 * proper unlink of that name (that is, remove its entry from the inode 1210 * reference item and both dir index keys). 1211 */ 1212 static int unlink_old_inode_refs(struct btrfs_trans_handle *trans, 1213 struct btrfs_root *root, 1214 struct btrfs_path *path, 1215 struct btrfs_inode *inode, 1216 struct extent_buffer *log_eb, 1217 int log_slot, 1218 struct btrfs_key *key) 1219 { 1220 int ret; 1221 unsigned long ref_ptr; 1222 unsigned long ref_end; 1223 struct extent_buffer *eb; 1224 1225 again: 1226 btrfs_release_path(path); 1227 ret = btrfs_search_slot(NULL, root, key, path, 0, 0); 1228 if (ret > 0) { 1229 ret = 0; 1230 goto out; 1231 } 1232 if (ret < 0) 1233 goto out; 1234 1235 eb = path->nodes[0]; 1236 ref_ptr = btrfs_item_ptr_offset(eb, path->slots[0]); 1237 ref_end = ref_ptr + btrfs_item_size_nr(eb, path->slots[0]); 1238 while (ref_ptr < ref_end) { 1239 char *name = NULL; 1240 int namelen; 1241 u64 parent_id; 1242 1243 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1244 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1245 NULL, &parent_id); 1246 } else { 1247 parent_id = key->offset; 1248 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1249 NULL); 1250 } 1251 if (ret) 1252 goto out; 1253 1254 if (key->type == BTRFS_INODE_EXTREF_KEY) 1255 ret = btrfs_find_name_in_ext_backref(log_eb, log_slot, 1256 parent_id, name, 1257 namelen, NULL); 1258 else 1259 ret = btrfs_find_name_in_backref(log_eb, log_slot, name, 1260 namelen, NULL); 1261 1262 if (!ret) { 1263 struct inode *dir; 1264 1265 btrfs_release_path(path); 1266 dir = read_one_inode(root, parent_id); 1267 if (!dir) { 1268 ret = -ENOENT; 1269 kfree(name); 1270 goto out; 1271 } 1272 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 1273 inode, name, namelen); 1274 kfree(name); 1275 iput(dir); 1276 if (ret) 1277 goto out; 1278 goto again; 1279 } 1280 1281 kfree(name); 1282 ref_ptr += namelen; 1283 if (key->type == BTRFS_INODE_EXTREF_KEY) 1284 ref_ptr += sizeof(struct btrfs_inode_extref); 1285 else 1286 ref_ptr += sizeof(struct btrfs_inode_ref); 1287 } 1288 ret = 0; 1289 out: 1290 btrfs_release_path(path); 1291 return ret; 1292 } 1293 1294 static int btrfs_inode_ref_exists(struct inode *inode, struct inode *dir, 1295 const u8 ref_type, const char *name, 1296 const int namelen) 1297 { 1298 struct btrfs_key key; 1299 struct btrfs_path *path; 1300 const u64 parent_id = btrfs_ino(BTRFS_I(dir)); 1301 int ret; 1302 1303 path = btrfs_alloc_path(); 1304 if (!path) 1305 return -ENOMEM; 1306 1307 key.objectid = btrfs_ino(BTRFS_I(inode)); 1308 key.type = ref_type; 1309 if (key.type == BTRFS_INODE_REF_KEY) 1310 key.offset = parent_id; 1311 else 1312 key.offset = btrfs_extref_hash(parent_id, name, namelen); 1313 1314 ret = btrfs_search_slot(NULL, BTRFS_I(inode)->root, &key, path, 0, 0); 1315 if (ret < 0) 1316 goto out; 1317 if (ret > 0) { 1318 ret = 0; 1319 goto out; 1320 } 1321 if (key.type == BTRFS_INODE_EXTREF_KEY) 1322 ret = btrfs_find_name_in_ext_backref(path->nodes[0], 1323 path->slots[0], parent_id, 1324 name, namelen, NULL); 1325 else 1326 ret = btrfs_find_name_in_backref(path->nodes[0], path->slots[0], 1327 name, namelen, NULL); 1328 1329 out: 1330 btrfs_free_path(path); 1331 return ret; 1332 } 1333 1334 static int add_link(struct btrfs_trans_handle *trans, struct btrfs_root *root, 1335 struct inode *dir, struct inode *inode, const char *name, 1336 int namelen, u64 ref_index) 1337 { 1338 struct btrfs_dir_item *dir_item; 1339 struct btrfs_key key; 1340 struct btrfs_path *path; 1341 struct inode *other_inode = NULL; 1342 int ret; 1343 1344 path = btrfs_alloc_path(); 1345 if (!path) 1346 return -ENOMEM; 1347 1348 dir_item = btrfs_lookup_dir_item(NULL, root, path, 1349 btrfs_ino(BTRFS_I(dir)), 1350 name, namelen, 0); 1351 if (!dir_item) { 1352 btrfs_release_path(path); 1353 goto add_link; 1354 } else if (IS_ERR(dir_item)) { 1355 ret = PTR_ERR(dir_item); 1356 goto out; 1357 } 1358 1359 /* 1360 * Our inode's dentry collides with the dentry of another inode which is 1361 * in the log but not yet processed since it has a higher inode number. 1362 * So delete that other dentry. 1363 */ 1364 btrfs_dir_item_key_to_cpu(path->nodes[0], dir_item, &key); 1365 btrfs_release_path(path); 1366 other_inode = read_one_inode(root, key.objectid); 1367 if (!other_inode) { 1368 ret = -ENOENT; 1369 goto out; 1370 } 1371 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), BTRFS_I(other_inode), 1372 name, namelen); 1373 if (ret) 1374 goto out; 1375 /* 1376 * If we dropped the link count to 0, bump it so that later the iput() 1377 * on the inode will not free it. We will fixup the link count later. 1378 */ 1379 if (other_inode->i_nlink == 0) 1380 inc_nlink(other_inode); 1381 1382 ret = btrfs_run_delayed_items(trans); 1383 if (ret) 1384 goto out; 1385 add_link: 1386 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), 1387 name, namelen, 0, ref_index); 1388 out: 1389 iput(other_inode); 1390 btrfs_free_path(path); 1391 1392 return ret; 1393 } 1394 1395 /* 1396 * replay one inode back reference item found in the log tree. 1397 * eb, slot and key refer to the buffer and key found in the log tree. 1398 * root is the destination we are replaying into, and path is for temp 1399 * use by this function. (it should be released on return). 1400 */ 1401 static noinline int add_inode_ref(struct btrfs_trans_handle *trans, 1402 struct btrfs_root *root, 1403 struct btrfs_root *log, 1404 struct btrfs_path *path, 1405 struct extent_buffer *eb, int slot, 1406 struct btrfs_key *key) 1407 { 1408 struct inode *dir = NULL; 1409 struct inode *inode = NULL; 1410 unsigned long ref_ptr; 1411 unsigned long ref_end; 1412 char *name = NULL; 1413 int namelen; 1414 int ret; 1415 int search_done = 0; 1416 int log_ref_ver = 0; 1417 u64 parent_objectid; 1418 u64 inode_objectid; 1419 u64 ref_index = 0; 1420 int ref_struct_size; 1421 1422 ref_ptr = btrfs_item_ptr_offset(eb, slot); 1423 ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); 1424 1425 if (key->type == BTRFS_INODE_EXTREF_KEY) { 1426 struct btrfs_inode_extref *r; 1427 1428 ref_struct_size = sizeof(struct btrfs_inode_extref); 1429 log_ref_ver = 1; 1430 r = (struct btrfs_inode_extref *)ref_ptr; 1431 parent_objectid = btrfs_inode_extref_parent(eb, r); 1432 } else { 1433 ref_struct_size = sizeof(struct btrfs_inode_ref); 1434 parent_objectid = key->offset; 1435 } 1436 inode_objectid = key->objectid; 1437 1438 /* 1439 * it is possible that we didn't log all the parent directories 1440 * for a given inode. If we don't find the dir, just don't 1441 * copy the back ref in. The link count fixup code will take 1442 * care of the rest 1443 */ 1444 dir = read_one_inode(root, parent_objectid); 1445 if (!dir) { 1446 ret = -ENOENT; 1447 goto out; 1448 } 1449 1450 inode = read_one_inode(root, inode_objectid); 1451 if (!inode) { 1452 ret = -EIO; 1453 goto out; 1454 } 1455 1456 while (ref_ptr < ref_end) { 1457 if (log_ref_ver) { 1458 ret = extref_get_fields(eb, ref_ptr, &namelen, &name, 1459 &ref_index, &parent_objectid); 1460 /* 1461 * parent object can change from one array 1462 * item to another. 1463 */ 1464 if (!dir) 1465 dir = read_one_inode(root, parent_objectid); 1466 if (!dir) { 1467 ret = -ENOENT; 1468 goto out; 1469 } 1470 } else { 1471 ret = ref_get_fields(eb, ref_ptr, &namelen, &name, 1472 &ref_index); 1473 } 1474 if (ret) 1475 goto out; 1476 1477 /* if we already have a perfect match, we're done */ 1478 if (!inode_in_dir(root, path, btrfs_ino(BTRFS_I(dir)), 1479 btrfs_ino(BTRFS_I(inode)), ref_index, 1480 name, namelen)) { 1481 /* 1482 * look for a conflicting back reference in the 1483 * metadata. if we find one we have to unlink that name 1484 * of the file before we add our new link. Later on, we 1485 * overwrite any existing back reference, and we don't 1486 * want to create dangling pointers in the directory. 1487 */ 1488 1489 if (!search_done) { 1490 ret = __add_inode_ref(trans, root, path, log, 1491 BTRFS_I(dir), 1492 BTRFS_I(inode), 1493 inode_objectid, 1494 parent_objectid, 1495 ref_index, name, namelen, 1496 &search_done); 1497 if (ret) { 1498 if (ret == 1) 1499 ret = 0; 1500 goto out; 1501 } 1502 } 1503 1504 /* 1505 * If a reference item already exists for this inode 1506 * with the same parent and name, but different index, 1507 * drop it and the corresponding directory index entries 1508 * from the parent before adding the new reference item 1509 * and dir index entries, otherwise we would fail with 1510 * -EEXIST returned from btrfs_add_link() below. 1511 */ 1512 ret = btrfs_inode_ref_exists(inode, dir, key->type, 1513 name, namelen); 1514 if (ret > 0) { 1515 ret = btrfs_unlink_inode(trans, root, 1516 BTRFS_I(dir), 1517 BTRFS_I(inode), 1518 name, namelen); 1519 /* 1520 * If we dropped the link count to 0, bump it so 1521 * that later the iput() on the inode will not 1522 * free it. We will fixup the link count later. 1523 */ 1524 if (!ret && inode->i_nlink == 0) 1525 inc_nlink(inode); 1526 } 1527 if (ret < 0) 1528 goto out; 1529 1530 /* insert our name */ 1531 ret = add_link(trans, root, dir, inode, name, namelen, 1532 ref_index); 1533 if (ret) 1534 goto out; 1535 1536 btrfs_update_inode(trans, root, inode); 1537 } 1538 1539 ref_ptr = (unsigned long)(ref_ptr + ref_struct_size) + namelen; 1540 kfree(name); 1541 name = NULL; 1542 if (log_ref_ver) { 1543 iput(dir); 1544 dir = NULL; 1545 } 1546 } 1547 1548 /* 1549 * Before we overwrite the inode reference item in the subvolume tree 1550 * with the item from the log tree, we must unlink all names from the 1551 * parent directory that are in the subvolume's tree inode reference 1552 * item, otherwise we end up with an inconsistent subvolume tree where 1553 * dir index entries exist for a name but there is no inode reference 1554 * item with the same name. 1555 */ 1556 ret = unlink_old_inode_refs(trans, root, path, BTRFS_I(inode), eb, slot, 1557 key); 1558 if (ret) 1559 goto out; 1560 1561 /* finally write the back reference in the inode */ 1562 ret = overwrite_item(trans, root, path, eb, slot, key); 1563 out: 1564 btrfs_release_path(path); 1565 kfree(name); 1566 iput(dir); 1567 iput(inode); 1568 return ret; 1569 } 1570 1571 static int insert_orphan_item(struct btrfs_trans_handle *trans, 1572 struct btrfs_root *root, u64 ino) 1573 { 1574 int ret; 1575 1576 ret = btrfs_insert_orphan_item(trans, root, ino); 1577 if (ret == -EEXIST) 1578 ret = 0; 1579 1580 return ret; 1581 } 1582 1583 static int count_inode_extrefs(struct btrfs_root *root, 1584 struct btrfs_inode *inode, struct btrfs_path *path) 1585 { 1586 int ret = 0; 1587 int name_len; 1588 unsigned int nlink = 0; 1589 u32 item_size; 1590 u32 cur_offset = 0; 1591 u64 inode_objectid = btrfs_ino(inode); 1592 u64 offset = 0; 1593 unsigned long ptr; 1594 struct btrfs_inode_extref *extref; 1595 struct extent_buffer *leaf; 1596 1597 while (1) { 1598 ret = btrfs_find_one_extref(root, inode_objectid, offset, path, 1599 &extref, &offset); 1600 if (ret) 1601 break; 1602 1603 leaf = path->nodes[0]; 1604 item_size = btrfs_item_size_nr(leaf, path->slots[0]); 1605 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]); 1606 cur_offset = 0; 1607 1608 while (cur_offset < item_size) { 1609 extref = (struct btrfs_inode_extref *) (ptr + cur_offset); 1610 name_len = btrfs_inode_extref_name_len(leaf, extref); 1611 1612 nlink++; 1613 1614 cur_offset += name_len + sizeof(*extref); 1615 } 1616 1617 offset++; 1618 btrfs_release_path(path); 1619 } 1620 btrfs_release_path(path); 1621 1622 if (ret < 0 && ret != -ENOENT) 1623 return ret; 1624 return nlink; 1625 } 1626 1627 static int count_inode_refs(struct btrfs_root *root, 1628 struct btrfs_inode *inode, struct btrfs_path *path) 1629 { 1630 int ret; 1631 struct btrfs_key key; 1632 unsigned int nlink = 0; 1633 unsigned long ptr; 1634 unsigned long ptr_end; 1635 int name_len; 1636 u64 ino = btrfs_ino(inode); 1637 1638 key.objectid = ino; 1639 key.type = BTRFS_INODE_REF_KEY; 1640 key.offset = (u64)-1; 1641 1642 while (1) { 1643 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 1644 if (ret < 0) 1645 break; 1646 if (ret > 0) { 1647 if (path->slots[0] == 0) 1648 break; 1649 path->slots[0]--; 1650 } 1651 process_slot: 1652 btrfs_item_key_to_cpu(path->nodes[0], &key, 1653 path->slots[0]); 1654 if (key.objectid != ino || 1655 key.type != BTRFS_INODE_REF_KEY) 1656 break; 1657 ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); 1658 ptr_end = ptr + btrfs_item_size_nr(path->nodes[0], 1659 path->slots[0]); 1660 while (ptr < ptr_end) { 1661 struct btrfs_inode_ref *ref; 1662 1663 ref = (struct btrfs_inode_ref *)ptr; 1664 name_len = btrfs_inode_ref_name_len(path->nodes[0], 1665 ref); 1666 ptr = (unsigned long)(ref + 1) + name_len; 1667 nlink++; 1668 } 1669 1670 if (key.offset == 0) 1671 break; 1672 if (path->slots[0] > 0) { 1673 path->slots[0]--; 1674 goto process_slot; 1675 } 1676 key.offset--; 1677 btrfs_release_path(path); 1678 } 1679 btrfs_release_path(path); 1680 1681 return nlink; 1682 } 1683 1684 /* 1685 * There are a few corners where the link count of the file can't 1686 * be properly maintained during replay. So, instead of adding 1687 * lots of complexity to the log code, we just scan the backrefs 1688 * for any file that has been through replay. 1689 * 1690 * The scan will update the link count on the inode to reflect the 1691 * number of back refs found. If it goes down to zero, the iput 1692 * will free the inode. 1693 */ 1694 static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, 1695 struct btrfs_root *root, 1696 struct inode *inode) 1697 { 1698 struct btrfs_path *path; 1699 int ret; 1700 u64 nlink = 0; 1701 u64 ino = btrfs_ino(BTRFS_I(inode)); 1702 1703 path = btrfs_alloc_path(); 1704 if (!path) 1705 return -ENOMEM; 1706 1707 ret = count_inode_refs(root, BTRFS_I(inode), path); 1708 if (ret < 0) 1709 goto out; 1710 1711 nlink = ret; 1712 1713 ret = count_inode_extrefs(root, BTRFS_I(inode), path); 1714 if (ret < 0) 1715 goto out; 1716 1717 nlink += ret; 1718 1719 ret = 0; 1720 1721 if (nlink != inode->i_nlink) { 1722 set_nlink(inode, nlink); 1723 btrfs_update_inode(trans, root, inode); 1724 } 1725 BTRFS_I(inode)->index_cnt = (u64)-1; 1726 1727 if (inode->i_nlink == 0) { 1728 if (S_ISDIR(inode->i_mode)) { 1729 ret = replay_dir_deletes(trans, root, NULL, path, 1730 ino, 1); 1731 if (ret) 1732 goto out; 1733 } 1734 ret = insert_orphan_item(trans, root, ino); 1735 } 1736 1737 out: 1738 btrfs_free_path(path); 1739 return ret; 1740 } 1741 1742 static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, 1743 struct btrfs_root *root, 1744 struct btrfs_path *path) 1745 { 1746 int ret; 1747 struct btrfs_key key; 1748 struct inode *inode; 1749 1750 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1751 key.type = BTRFS_ORPHAN_ITEM_KEY; 1752 key.offset = (u64)-1; 1753 while (1) { 1754 ret = btrfs_search_slot(trans, root, &key, path, -1, 1); 1755 if (ret < 0) 1756 break; 1757 1758 if (ret == 1) { 1759 if (path->slots[0] == 0) 1760 break; 1761 path->slots[0]--; 1762 } 1763 1764 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 1765 if (key.objectid != BTRFS_TREE_LOG_FIXUP_OBJECTID || 1766 key.type != BTRFS_ORPHAN_ITEM_KEY) 1767 break; 1768 1769 ret = btrfs_del_item(trans, root, path); 1770 if (ret) 1771 goto out; 1772 1773 btrfs_release_path(path); 1774 inode = read_one_inode(root, key.offset); 1775 if (!inode) 1776 return -EIO; 1777 1778 ret = fixup_inode_link_count(trans, root, inode); 1779 iput(inode); 1780 if (ret) 1781 goto out; 1782 1783 /* 1784 * fixup on a directory may create new entries, 1785 * make sure we always look for the highset possible 1786 * offset 1787 */ 1788 key.offset = (u64)-1; 1789 } 1790 ret = 0; 1791 out: 1792 btrfs_release_path(path); 1793 return ret; 1794 } 1795 1796 1797 /* 1798 * record a given inode in the fixup dir so we can check its link 1799 * count when replay is done. The link count is incremented here 1800 * so the inode won't go away until we check it 1801 */ 1802 static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, 1803 struct btrfs_root *root, 1804 struct btrfs_path *path, 1805 u64 objectid) 1806 { 1807 struct btrfs_key key; 1808 int ret = 0; 1809 struct inode *inode; 1810 1811 inode = read_one_inode(root, objectid); 1812 if (!inode) 1813 return -EIO; 1814 1815 key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; 1816 key.type = BTRFS_ORPHAN_ITEM_KEY; 1817 key.offset = objectid; 1818 1819 ret = btrfs_insert_empty_item(trans, root, path, &key, 0); 1820 1821 btrfs_release_path(path); 1822 if (ret == 0) { 1823 if (!inode->i_nlink) 1824 set_nlink(inode, 1); 1825 else 1826 inc_nlink(inode); 1827 ret = btrfs_update_inode(trans, root, inode); 1828 } else if (ret == -EEXIST) { 1829 ret = 0; 1830 } else { 1831 BUG(); /* Logic Error */ 1832 } 1833 iput(inode); 1834 1835 return ret; 1836 } 1837 1838 /* 1839 * when replaying the log for a directory, we only insert names 1840 * for inodes that actually exist. This means an fsync on a directory 1841 * does not implicitly fsync all the new files in it 1842 */ 1843 static noinline int insert_one_name(struct btrfs_trans_handle *trans, 1844 struct btrfs_root *root, 1845 u64 dirid, u64 index, 1846 char *name, int name_len, 1847 struct btrfs_key *location) 1848 { 1849 struct inode *inode; 1850 struct inode *dir; 1851 int ret; 1852 1853 inode = read_one_inode(root, location->objectid); 1854 if (!inode) 1855 return -ENOENT; 1856 1857 dir = read_one_inode(root, dirid); 1858 if (!dir) { 1859 iput(inode); 1860 return -EIO; 1861 } 1862 1863 ret = btrfs_add_link(trans, BTRFS_I(dir), BTRFS_I(inode), name, 1864 name_len, 1, index); 1865 1866 /* FIXME, put inode into FIXUP list */ 1867 1868 iput(inode); 1869 iput(dir); 1870 return ret; 1871 } 1872 1873 /* 1874 * Return true if an inode reference exists in the log for the given name, 1875 * inode and parent inode. 1876 */ 1877 static bool name_in_log_ref(struct btrfs_root *log_root, 1878 const char *name, const int name_len, 1879 const u64 dirid, const u64 ino) 1880 { 1881 struct btrfs_key search_key; 1882 1883 search_key.objectid = ino; 1884 search_key.type = BTRFS_INODE_REF_KEY; 1885 search_key.offset = dirid; 1886 if (backref_in_log(log_root, &search_key, dirid, name, name_len)) 1887 return true; 1888 1889 search_key.type = BTRFS_INODE_EXTREF_KEY; 1890 search_key.offset = btrfs_extref_hash(dirid, name, name_len); 1891 if (backref_in_log(log_root, &search_key, dirid, name, name_len)) 1892 return true; 1893 1894 return false; 1895 } 1896 1897 /* 1898 * take a single entry in a log directory item and replay it into 1899 * the subvolume. 1900 * 1901 * if a conflicting item exists in the subdirectory already, 1902 * the inode it points to is unlinked and put into the link count 1903 * fix up tree. 1904 * 1905 * If a name from the log points to a file or directory that does 1906 * not exist in the FS, it is skipped. fsyncs on directories 1907 * do not force down inodes inside that directory, just changes to the 1908 * names or unlinks in a directory. 1909 * 1910 * Returns < 0 on error, 0 if the name wasn't replayed (dentry points to a 1911 * non-existing inode) and 1 if the name was replayed. 1912 */ 1913 static noinline int replay_one_name(struct btrfs_trans_handle *trans, 1914 struct btrfs_root *root, 1915 struct btrfs_path *path, 1916 struct extent_buffer *eb, 1917 struct btrfs_dir_item *di, 1918 struct btrfs_key *key) 1919 { 1920 char *name; 1921 int name_len; 1922 struct btrfs_dir_item *dst_di; 1923 struct btrfs_key found_key; 1924 struct btrfs_key log_key; 1925 struct inode *dir; 1926 u8 log_type; 1927 int exists; 1928 int ret = 0; 1929 bool update_size = (key->type == BTRFS_DIR_INDEX_KEY); 1930 bool name_added = false; 1931 1932 dir = read_one_inode(root, key->objectid); 1933 if (!dir) 1934 return -EIO; 1935 1936 name_len = btrfs_dir_name_len(eb, di); 1937 name = kmalloc(name_len, GFP_NOFS); 1938 if (!name) { 1939 ret = -ENOMEM; 1940 goto out; 1941 } 1942 1943 log_type = btrfs_dir_type(eb, di); 1944 read_extent_buffer(eb, name, (unsigned long)(di + 1), 1945 name_len); 1946 1947 btrfs_dir_item_key_to_cpu(eb, di, &log_key); 1948 exists = btrfs_lookup_inode(trans, root, path, &log_key, 0); 1949 if (exists == 0) 1950 exists = 1; 1951 else 1952 exists = 0; 1953 btrfs_release_path(path); 1954 1955 if (key->type == BTRFS_DIR_ITEM_KEY) { 1956 dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, 1957 name, name_len, 1); 1958 } else if (key->type == BTRFS_DIR_INDEX_KEY) { 1959 dst_di = btrfs_lookup_dir_index_item(trans, root, path, 1960 key->objectid, 1961 key->offset, name, 1962 name_len, 1); 1963 } else { 1964 /* Corruption */ 1965 ret = -EINVAL; 1966 goto out; 1967 } 1968 if (IS_ERR_OR_NULL(dst_di)) { 1969 /* we need a sequence number to insert, so we only 1970 * do inserts for the BTRFS_DIR_INDEX_KEY types 1971 */ 1972 if (key->type != BTRFS_DIR_INDEX_KEY) 1973 goto out; 1974 goto insert; 1975 } 1976 1977 btrfs_dir_item_key_to_cpu(path->nodes[0], dst_di, &found_key); 1978 /* the existing item matches the logged item */ 1979 if (found_key.objectid == log_key.objectid && 1980 found_key.type == log_key.type && 1981 found_key.offset == log_key.offset && 1982 btrfs_dir_type(path->nodes[0], dst_di) == log_type) { 1983 update_size = false; 1984 goto out; 1985 } 1986 1987 /* 1988 * don't drop the conflicting directory entry if the inode 1989 * for the new entry doesn't exist 1990 */ 1991 if (!exists) 1992 goto out; 1993 1994 ret = drop_one_dir_item(trans, root, path, BTRFS_I(dir), dst_di); 1995 if (ret) 1996 goto out; 1997 1998 if (key->type == BTRFS_DIR_INDEX_KEY) 1999 goto insert; 2000 out: 2001 btrfs_release_path(path); 2002 if (!ret && update_size) { 2003 btrfs_i_size_write(BTRFS_I(dir), dir->i_size + name_len * 2); 2004 ret = btrfs_update_inode(trans, root, dir); 2005 } 2006 kfree(name); 2007 iput(dir); 2008 if (!ret && name_added) 2009 ret = 1; 2010 return ret; 2011 2012 insert: 2013 if (name_in_log_ref(root->log_root, name, name_len, 2014 key->objectid, log_key.objectid)) { 2015 /* The dentry will be added later. */ 2016 ret = 0; 2017 update_size = false; 2018 goto out; 2019 } 2020 btrfs_release_path(path); 2021 ret = insert_one_name(trans, root, key->objectid, key->offset, 2022 name, name_len, &log_key); 2023 if (ret && ret != -ENOENT && ret != -EEXIST) 2024 goto out; 2025 if (!ret) 2026 name_added = true; 2027 update_size = false; 2028 ret = 0; 2029 goto out; 2030 } 2031 2032 /* 2033 * find all the names in a directory item and reconcile them into 2034 * the subvolume. Only BTRFS_DIR_ITEM_KEY types will have more than 2035 * one name in a directory item, but the same code gets used for 2036 * both directory index types 2037 */ 2038 static noinline int replay_one_dir_item(struct btrfs_trans_handle *trans, 2039 struct btrfs_root *root, 2040 struct btrfs_path *path, 2041 struct extent_buffer *eb, int slot, 2042 struct btrfs_key *key) 2043 { 2044 int ret = 0; 2045 u32 item_size = btrfs_item_size_nr(eb, slot); 2046 struct btrfs_dir_item *di; 2047 int name_len; 2048 unsigned long ptr; 2049 unsigned long ptr_end; 2050 struct btrfs_path *fixup_path = NULL; 2051 2052 ptr = btrfs_item_ptr_offset(eb, slot); 2053 ptr_end = ptr + item_size; 2054 while (ptr < ptr_end) { 2055 di = (struct btrfs_dir_item *)ptr; 2056 name_len = btrfs_dir_name_len(eb, di); 2057 ret = replay_one_name(trans, root, path, eb, di, key); 2058 if (ret < 0) 2059 break; 2060 ptr = (unsigned long)(di + 1); 2061 ptr += name_len; 2062 2063 /* 2064 * If this entry refers to a non-directory (directories can not 2065 * have a link count > 1) and it was added in the transaction 2066 * that was not committed, make sure we fixup the link count of 2067 * the inode it the entry points to. Otherwise something like 2068 * the following would result in a directory pointing to an 2069 * inode with a wrong link that does not account for this dir 2070 * entry: 2071 * 2072 * mkdir testdir 2073 * touch testdir/foo 2074 * touch testdir/bar 2075 * sync 2076 * 2077 * ln testdir/bar testdir/bar_link 2078 * ln testdir/foo testdir/foo_link 2079 * xfs_io -c "fsync" testdir/bar 2080 * 2081 * <power failure> 2082 * 2083 * mount fs, log replay happens 2084 * 2085 * File foo would remain with a link count of 1 when it has two 2086 * entries pointing to it in the directory testdir. This would 2087 * make it impossible to ever delete the parent directory has 2088 * it would result in stale dentries that can never be deleted. 2089 */ 2090 if (ret == 1 && btrfs_dir_type(eb, di) != BTRFS_FT_DIR) { 2091 struct btrfs_key di_key; 2092 2093 if (!fixup_path) { 2094 fixup_path = btrfs_alloc_path(); 2095 if (!fixup_path) { 2096 ret = -ENOMEM; 2097 break; 2098 } 2099 } 2100 2101 btrfs_dir_item_key_to_cpu(eb, di, &di_key); 2102 ret = link_to_fixup_dir(trans, root, fixup_path, 2103 di_key.objectid); 2104 if (ret) 2105 break; 2106 } 2107 ret = 0; 2108 } 2109 btrfs_free_path(fixup_path); 2110 return ret; 2111 } 2112 2113 /* 2114 * directory replay has two parts. There are the standard directory 2115 * items in the log copied from the subvolume, and range items 2116 * created in the log while the subvolume was logged. 2117 * 2118 * The range items tell us which parts of the key space the log 2119 * is authoritative for. During replay, if a key in the subvolume 2120 * directory is in a logged range item, but not actually in the log 2121 * that means it was deleted from the directory before the fsync 2122 * and should be removed. 2123 */ 2124 static noinline int find_dir_range(struct btrfs_root *root, 2125 struct btrfs_path *path, 2126 u64 dirid, int key_type, 2127 u64 *start_ret, u64 *end_ret) 2128 { 2129 struct btrfs_key key; 2130 u64 found_end; 2131 struct btrfs_dir_log_item *item; 2132 int ret; 2133 int nritems; 2134 2135 if (*start_ret == (u64)-1) 2136 return 1; 2137 2138 key.objectid = dirid; 2139 key.type = key_type; 2140 key.offset = *start_ret; 2141 2142 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 2143 if (ret < 0) 2144 goto out; 2145 if (ret > 0) { 2146 if (path->slots[0] == 0) 2147 goto out; 2148 path->slots[0]--; 2149 } 2150 if (ret != 0) 2151 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2152 2153 if (key.type != key_type || key.objectid != dirid) { 2154 ret = 1; 2155 goto next; 2156 } 2157 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2158 struct btrfs_dir_log_item); 2159 found_end = btrfs_dir_log_end(path->nodes[0], item); 2160 2161 if (*start_ret >= key.offset && *start_ret <= found_end) { 2162 ret = 0; 2163 *start_ret = key.offset; 2164 *end_ret = found_end; 2165 goto out; 2166 } 2167 ret = 1; 2168 next: 2169 /* check the next slot in the tree to see if it is a valid item */ 2170 nritems = btrfs_header_nritems(path->nodes[0]); 2171 path->slots[0]++; 2172 if (path->slots[0] >= nritems) { 2173 ret = btrfs_next_leaf(root, path); 2174 if (ret) 2175 goto out; 2176 } 2177 2178 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); 2179 2180 if (key.type != key_type || key.objectid != dirid) { 2181 ret = 1; 2182 goto out; 2183 } 2184 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 2185 struct btrfs_dir_log_item); 2186 found_end = btrfs_dir_log_end(path->nodes[0], item); 2187 *start_ret = key.offset; 2188 *end_ret = found_end; 2189 ret = 0; 2190 out: 2191 btrfs_release_path(path); 2192 return ret; 2193 } 2194 2195 /* 2196 * this looks for a given directory item in the log. If the directory 2197 * item is not in the log, the item is removed and the inode it points 2198 * to is unlinked 2199 */ 2200 static noinline int check_item_in_log(struct btrfs_trans_handle *trans, 2201 struct btrfs_root *root, 2202 struct btrfs_root *log, 2203 struct btrfs_path *path, 2204 struct btrfs_path *log_path, 2205 struct inode *dir, 2206 struct btrfs_key *dir_key) 2207 { 2208 int ret; 2209 struct extent_buffer *eb; 2210 int slot; 2211 u32 item_size; 2212 struct btrfs_dir_item *di; 2213 struct btrfs_dir_item *log_di; 2214 int name_len; 2215 unsigned long ptr; 2216 unsigned long ptr_end; 2217 char *name; 2218 struct inode *inode; 2219 struct btrfs_key location; 2220 2221 again: 2222 eb = path->nodes[0]; 2223 slot = path->slots[0]; 2224 item_size = btrfs_item_size_nr(eb, slot); 2225 ptr = btrfs_item_ptr_offset(eb, slot); 2226 ptr_end = ptr + item_size; 2227 while (ptr < ptr_end) { 2228 di = (struct btrfs_dir_item *)ptr; 2229 name_len = btrfs_dir_name_len(eb, di); 2230 name = kmalloc(name_len, GFP_NOFS); 2231 if (!name) { 2232 ret = -ENOMEM; 2233 goto out; 2234 } 2235 read_extent_buffer(eb, name, (unsigned long)(di + 1), 2236 name_len); 2237 log_di = NULL; 2238 if (log && dir_key->type == BTRFS_DIR_ITEM_KEY) { 2239 log_di = btrfs_lookup_dir_item(trans, log, log_path, 2240 dir_key->objectid, 2241 name, name_len, 0); 2242 } else if (log && dir_key->type == BTRFS_DIR_INDEX_KEY) { 2243 log_di = btrfs_lookup_dir_index_item(trans, log, 2244 log_path, 2245 dir_key->objectid, 2246 dir_key->offset, 2247 name, name_len, 0); 2248 } 2249 if (!log_di || log_di == ERR_PTR(-ENOENT)) { 2250 btrfs_dir_item_key_to_cpu(eb, di, &location); 2251 btrfs_release_path(path); 2252 btrfs_release_path(log_path); 2253 inode = read_one_inode(root, location.objectid); 2254 if (!inode) { 2255 kfree(name); 2256 return -EIO; 2257 } 2258 2259 ret = link_to_fixup_dir(trans, root, 2260 path, location.objectid); 2261 if (ret) { 2262 kfree(name); 2263 iput(inode); 2264 goto out; 2265 } 2266 2267 inc_nlink(inode); 2268 ret = btrfs_unlink_inode(trans, root, BTRFS_I(dir), 2269 BTRFS_I(inode), name, name_len); 2270 if (!ret) 2271 ret = btrfs_run_delayed_items(trans); 2272 kfree(name); 2273 iput(inode); 2274 if (ret) 2275 goto out; 2276 2277 /* there might still be more names under this key 2278 * check and repeat if required 2279 */ 2280 ret = btrfs_search_slot(NULL, root, dir_key, path, 2281 0, 0); 2282 if (ret == 0) 2283 goto again; 2284 ret = 0; 2285 goto out; 2286 } else if (IS_ERR(log_di)) { 2287 kfree(name); 2288 return PTR_ERR(log_di); 2289 } 2290 btrfs_release_path(log_path); 2291 kfree(name); 2292 2293 ptr = (unsigned long)(di + 1); 2294 ptr += name_len; 2295 } 2296 ret = 0; 2297 out: 2298 btrfs_release_path(path); 2299 btrfs_release_path(log_path); 2300 return ret; 2301 } 2302 2303 static int replay_xattr_deletes(struct btrfs_trans_handle *trans, 2304 struct btrfs_root *root, 2305 struct btrfs_root *log, 2306 struct btrfs_path *path, 2307 const u64 ino) 2308 { 2309 struct btrfs_key search_key; 2310 struct btrfs_path *log_path; 2311 int i; 2312 int nritems; 2313 int ret; 2314 2315 log_path = btrfs_alloc_path(); 2316 if (!log_path) 2317 return -ENOMEM; 2318 2319 search_key.objectid = ino; 2320 search_key.type = BTRFS_XATTR_ITEM_KEY; 2321 search_key.offset = 0; 2322 again: 2323 ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); 2324 if (ret < 0) 2325 goto out; 2326 process_leaf: 2327 nritems = btrfs_header_nritems(path->nodes[0]); 2328 for (i = path->slots[0]; i < nritems; i++) { 2329 struct btrfs_key key; 2330 struct btrfs_dir_item *di; 2331 struct btrfs_dir_item *log_di; 2332 u32 total_size; 2333 u32 cur; 2334 2335 btrfs_item_key_to_cpu(path->nodes[0], &key, i); 2336 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) { 2337 ret = 0; 2338 goto out; 2339 } 2340 2341 di = btrfs_item_ptr(path->nodes[0], i, struct btrfs_dir_item); 2342 total_size = btrfs_item_size_nr(path->nodes[0], i); 2343 cur = 0; 2344 while (cur < total_size) { 2345 u16 name_len = btrfs_dir_name_len(path->nodes[0], di); 2346 u16 data_len = btrfs_dir_data_len(path->nodes[0], di); 2347 u32 this_len = sizeof(*di) + name_len + data_len; 2348 char *name; 2349 2350 name = kmalloc(name_len, GFP_NOFS); 2351 if (!name) { 2352 ret = -ENOMEM; 2353 goto out; 2354 } 2355 read_extent_buffer(path->nodes[0], name, 2356 (unsigned long)(di + 1), name_len); 2357 2358 log_di = btrfs_lookup_xattr(NULL, log, log_path, ino, 2359 name, name_len, 0); 2360 btrfs_release_path(log_path); 2361 if (!log_di) { 2362 /* Doesn't exist in log tree, so delete it. */ 2363 btrfs_release_path(path); 2364 di = btrfs_lookup_xattr(trans, root, path, ino, 2365 name, name_len, -1); 2366 kfree(name); 2367 if (IS_ERR(di)) { 2368 ret = PTR_ERR(di); 2369 goto out; 2370 } 2371 ASSERT(di); 2372 ret = btrfs_delete_one_dir_name(trans, root, 2373 path, di); 2374 if (ret) 2375 goto out; 2376 btrfs_release_path(path); 2377 search_key = key; 2378 goto again; 2379 } 2380 kfree(name); 2381 if (IS_ERR(log_di)) { 2382 ret = PTR_ERR(log_di); 2383 goto out; 2384 } 2385 cur += this_len; 2386 di = (struct btrfs_dir_item *)((char *)di + this_len); 2387 } 2388 } 2389 ret = btrfs_next_leaf(root, path); 2390 if (ret > 0) 2391 ret = 0; 2392 else if (ret == 0) 2393 goto process_leaf; 2394 out: 2395 btrfs_free_path(log_path); 2396 btrfs_release_path(path); 2397 return ret; 2398 } 2399 2400 2401 /* 2402 * deletion replay happens before we copy any new directory items 2403 * out of the log or out of backreferences from inodes. It 2404 * scans the log to find ranges of keys that log is authoritative for, 2405 * and then scans the directory to find items in those ranges that are 2406 * not present in the log. 2407 * 2408 * Anything we don't find in the log is unlinked and removed from the 2409 * directory. 2410 */ 2411 static noinline int replay_dir_deletes(struct btrfs_trans_handle *trans, 2412 struct btrfs_root *root, 2413 struct btrfs_root *log, 2414 struct btrfs_path *path, 2415 u64 dirid, int del_all) 2416 { 2417 u64 range_start; 2418 u64 range_end; 2419 int key_type = BTRFS_DIR_LOG_ITEM_KEY; 2420 int ret = 0; 2421 struct btrfs_key dir_key; 2422 struct btrfs_key found_key; 2423 struct btrfs_path *log_path; 2424 struct inode *dir; 2425 2426 dir_key.objectid = dirid; 2427 dir_key.type = BTRFS_DIR_ITEM_KEY; 2428 log_path = btrfs_alloc_path(); 2429 if (!log_path) 2430 return -ENOMEM; 2431 2432 dir = read_one_inode(root, dirid); 2433 /* it isn't an error if the inode isn't there, that can happen 2434 * because we replay the deletes before we copy in the inode item 2435 * from the log 2436 */ 2437 if (!dir) { 2438 btrfs_free_path(log_path); 2439 return 0; 2440 } 2441 again: 2442 range_start = 0; 2443 range_end = 0; 2444 while (1) { 2445 if (del_all) 2446 range_end = (u64)-1; 2447 else { 2448 ret = find_dir_range(log, path, dirid, key_type, 2449 &range_start, &range_end); 2450 if (ret != 0) 2451 break; 2452 } 2453 2454 dir_key.offset = range_start; 2455 while (1) { 2456 int nritems; 2457 ret = btrfs_search_slot(NULL, root, &dir_key, path, 2458 0, 0); 2459 if (ret < 0) 2460 goto out; 2461 2462 nritems = btrfs_header_nritems(path->nodes[0]); 2463 if (path->slots[0] >= nritems) { 2464 ret = btrfs_next_leaf(root, path); 2465 if (ret == 1) 2466 break; 2467 else if (ret < 0) 2468 goto out; 2469 } 2470 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 2471 path->slots[0]); 2472 if (found_key.objectid != dirid || 2473 found_key.type != dir_key.type) 2474 goto next_type; 2475 2476 if (found_key.offset > range_end) 2477 break; 2478 2479 ret = check_item_in_log(trans, root, log, path, 2480 log_path, dir, 2481 &found_key); 2482 if (ret) 2483 goto out; 2484 if (found_key.offset == (u64)-1) 2485 break; 2486 dir_key.offset = found_key.offset + 1; 2487 } 2488 btrfs_release_path(path); 2489 if (range_end == (u64)-1) 2490 break; 2491 range_start = range_end + 1; 2492 } 2493 2494 next_type: 2495 ret = 0; 2496 if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { 2497 key_type = BTRFS_DIR_LOG_INDEX_KEY; 2498 dir_key.type = BTRFS_DIR_INDEX_KEY; 2499 btrfs_release_path(path); 2500 goto again; 2501 } 2502 out: 2503 btrfs_release_path(path); 2504 btrfs_free_path(log_path); 2505 iput(dir); 2506 return ret; 2507 } 2508 2509 /* 2510 * the process_func used to replay items from the log tree. This 2511 * gets called in two different stages. The first stage just looks 2512 * for inodes and makes sure they are all copied into the subvolume. 2513 * 2514 * The second stage copies all the other item types from the log into 2515 * the subvolume. The two stage approach is slower, but gets rid of 2516 * lots of complexity around inodes referencing other inodes that exist 2517 * only in the log (references come from either directory items or inode 2518 * back refs). 2519 */ 2520 static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb, 2521 struct walk_control *wc, u64 gen, int level) 2522 { 2523 int nritems; 2524 struct btrfs_path *path; 2525 struct btrfs_root *root = wc->replay_dest; 2526 struct btrfs_key key; 2527 int i; 2528 int ret; 2529 2530 ret = btrfs_read_buffer(eb, gen, level, NULL); 2531 if (ret) 2532 return ret; 2533 2534 level = btrfs_header_level(eb); 2535 2536 if (level != 0) 2537 return 0; 2538 2539 path = btrfs_alloc_path(); 2540 if (!path) 2541 return -ENOMEM; 2542 2543 nritems = btrfs_header_nritems(eb); 2544 for (i = 0; i < nritems; i++) { 2545 btrfs_item_key_to_cpu(eb, &key, i); 2546 2547 /* inode keys are done during the first stage */ 2548 if (key.type == BTRFS_INODE_ITEM_KEY && 2549 wc->stage == LOG_WALK_REPLAY_INODES) { 2550 struct btrfs_inode_item *inode_item; 2551 u32 mode; 2552 2553 inode_item = btrfs_item_ptr(eb, i, 2554 struct btrfs_inode_item); 2555 /* 2556 * If we have a tmpfile (O_TMPFILE) that got fsync'ed 2557 * and never got linked before the fsync, skip it, as 2558 * replaying it is pointless since it would be deleted 2559 * later. We skip logging tmpfiles, but it's always 2560 * possible we are replaying a log created with a kernel 2561 * that used to log tmpfiles. 2562 */ 2563 if (btrfs_inode_nlink(eb, inode_item) == 0) { 2564 wc->ignore_cur_inode = true; 2565 continue; 2566 } else { 2567 wc->ignore_cur_inode = false; 2568 } 2569 ret = replay_xattr_deletes(wc->trans, root, log, 2570 path, key.objectid); 2571 if (ret) 2572 break; 2573 mode = btrfs_inode_mode(eb, inode_item); 2574 if (S_ISDIR(mode)) { 2575 ret = replay_dir_deletes(wc->trans, 2576 root, log, path, key.objectid, 0); 2577 if (ret) 2578 break; 2579 } 2580 ret = overwrite_item(wc->trans, root, path, 2581 eb, i, &key); 2582 if (ret) 2583 break; 2584 2585 /* 2586 * Before replaying extents, truncate the inode to its 2587 * size. We need to do it now and not after log replay 2588 * because before an fsync we can have prealloc extents 2589 * added beyond the inode's i_size. If we did it after, 2590 * through orphan cleanup for example, we would drop 2591 * those prealloc extents just after replaying them. 2592 */ 2593 if (S_ISREG(mode)) { 2594 struct inode *inode; 2595 u64 from; 2596 2597 inode = read_one_inode(root, key.objectid); 2598 if (!inode) { 2599 ret = -EIO; 2600 break; 2601 } 2602 from = ALIGN(i_size_read(inode), 2603 root->fs_info->sectorsize); 2604 ret = btrfs_drop_extents(wc->trans, root, inode, 2605 from, (u64)-1, 1); 2606 if (!ret) { 2607 /* Update the inode's nbytes. */ 2608 ret = btrfs_update_inode(wc->trans, 2609 root, inode); 2610 } 2611 iput(inode); 2612 if (ret) 2613 break; 2614 } 2615 2616 ret = link_to_fixup_dir(wc->trans, root, 2617 path, key.objectid); 2618 if (ret) 2619 break; 2620 } 2621 2622 if (wc->ignore_cur_inode) 2623 continue; 2624 2625 if (key.type == BTRFS_DIR_INDEX_KEY && 2626 wc->stage == LOG_WALK_REPLAY_DIR_INDEX) { 2627 ret = replay_one_dir_item(wc->trans, root, path, 2628 eb, i, &key); 2629 if (ret) 2630 break; 2631 } 2632 2633 if (wc->stage < LOG_WALK_REPLAY_ALL) 2634 continue; 2635 2636 /* these keys are simply copied */ 2637 if (key.type == BTRFS_XATTR_ITEM_KEY) { 2638 ret = overwrite_item(wc->trans, root, path, 2639 eb, i, &key); 2640 if (ret) 2641 break; 2642 } else if (key.type == BTRFS_INODE_REF_KEY || 2643 key.type == BTRFS_INODE_EXTREF_KEY) { 2644 ret = add_inode_ref(wc->trans, root, log, path, 2645 eb, i, &key); 2646 if (ret && ret != -ENOENT) 2647 break; 2648 ret = 0; 2649 } else if (key.type == BTRFS_EXTENT_DATA_KEY) { 2650 ret = replay_one_extent(wc->trans, root, path, 2651 eb, i, &key); 2652 if (ret) 2653 break; 2654 } else if (key.type == BTRFS_DIR_ITEM_KEY) { 2655 ret = replay_one_dir_item(wc->trans, root, path, 2656 eb, i, &key); 2657 if (ret) 2658 break; 2659 } 2660 } 2661 btrfs_free_path(path); 2662 return ret; 2663 } 2664 2665 static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans, 2666 struct btrfs_root *root, 2667 struct btrfs_path *path, int *level, 2668 struct walk_control *wc) 2669 { 2670 struct btrfs_fs_info *fs_info = root->fs_info; 2671 u64 root_owner; 2672 u64 bytenr; 2673 u64 ptr_gen; 2674 struct extent_buffer *next; 2675 struct extent_buffer *cur; 2676 struct extent_buffer *parent; 2677 u32 blocksize; 2678 int ret = 0; 2679 2680 WARN_ON(*level < 0); 2681 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2682 2683 while (*level > 0) { 2684 struct btrfs_key first_key; 2685 2686 WARN_ON(*level < 0); 2687 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2688 cur = path->nodes[*level]; 2689 2690 WARN_ON(btrfs_header_level(cur) != *level); 2691 2692 if (path->slots[*level] >= 2693 btrfs_header_nritems(cur)) 2694 break; 2695 2696 bytenr = btrfs_node_blockptr(cur, path->slots[*level]); 2697 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]); 2698 btrfs_node_key_to_cpu(cur, &first_key, path->slots[*level]); 2699 blocksize = fs_info->nodesize; 2700 2701 parent = path->nodes[*level]; 2702 root_owner = btrfs_header_owner(parent); 2703 2704 next = btrfs_find_create_tree_block(fs_info, bytenr); 2705 if (IS_ERR(next)) 2706 return PTR_ERR(next); 2707 2708 if (*level == 1) { 2709 ret = wc->process_func(root, next, wc, ptr_gen, 2710 *level - 1); 2711 if (ret) { 2712 free_extent_buffer(next); 2713 return ret; 2714 } 2715 2716 path->slots[*level]++; 2717 if (wc->free) { 2718 ret = btrfs_read_buffer(next, ptr_gen, 2719 *level - 1, &first_key); 2720 if (ret) { 2721 free_extent_buffer(next); 2722 return ret; 2723 } 2724 2725 if (trans) { 2726 btrfs_tree_lock(next); 2727 btrfs_set_lock_blocking_write(next); 2728 clean_tree_block(fs_info, next); 2729 btrfs_wait_tree_block_writeback(next); 2730 btrfs_tree_unlock(next); 2731 } else { 2732 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2733 clear_extent_buffer_dirty(next); 2734 } 2735 2736 WARN_ON(root_owner != 2737 BTRFS_TREE_LOG_OBJECTID); 2738 ret = btrfs_free_and_pin_reserved_extent( 2739 fs_info, bytenr, 2740 blocksize); 2741 if (ret) { 2742 free_extent_buffer(next); 2743 return ret; 2744 } 2745 } 2746 free_extent_buffer(next); 2747 continue; 2748 } 2749 ret = btrfs_read_buffer(next, ptr_gen, *level - 1, &first_key); 2750 if (ret) { 2751 free_extent_buffer(next); 2752 return ret; 2753 } 2754 2755 WARN_ON(*level <= 0); 2756 if (path->nodes[*level-1]) 2757 free_extent_buffer(path->nodes[*level-1]); 2758 path->nodes[*level-1] = next; 2759 *level = btrfs_header_level(next); 2760 path->slots[*level] = 0; 2761 cond_resched(); 2762 } 2763 WARN_ON(*level < 0); 2764 WARN_ON(*level >= BTRFS_MAX_LEVEL); 2765 2766 path->slots[*level] = btrfs_header_nritems(path->nodes[*level]); 2767 2768 cond_resched(); 2769 return 0; 2770 } 2771 2772 static noinline int walk_up_log_tree(struct btrfs_trans_handle *trans, 2773 struct btrfs_root *root, 2774 struct btrfs_path *path, int *level, 2775 struct walk_control *wc) 2776 { 2777 struct btrfs_fs_info *fs_info = root->fs_info; 2778 u64 root_owner; 2779 int i; 2780 int slot; 2781 int ret; 2782 2783 for (i = *level; i < BTRFS_MAX_LEVEL - 1 && path->nodes[i]; i++) { 2784 slot = path->slots[i]; 2785 if (slot + 1 < btrfs_header_nritems(path->nodes[i])) { 2786 path->slots[i]++; 2787 *level = i; 2788 WARN_ON(*level == 0); 2789 return 0; 2790 } else { 2791 struct extent_buffer *parent; 2792 if (path->nodes[*level] == root->node) 2793 parent = path->nodes[*level]; 2794 else 2795 parent = path->nodes[*level + 1]; 2796 2797 root_owner = btrfs_header_owner(parent); 2798 ret = wc->process_func(root, path->nodes[*level], wc, 2799 btrfs_header_generation(path->nodes[*level]), 2800 *level); 2801 if (ret) 2802 return ret; 2803 2804 if (wc->free) { 2805 struct extent_buffer *next; 2806 2807 next = path->nodes[*level]; 2808 2809 if (trans) { 2810 btrfs_tree_lock(next); 2811 btrfs_set_lock_blocking_write(next); 2812 clean_tree_block(fs_info, next); 2813 btrfs_wait_tree_block_writeback(next); 2814 btrfs_tree_unlock(next); 2815 } else { 2816 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2817 clear_extent_buffer_dirty(next); 2818 } 2819 2820 WARN_ON(root_owner != BTRFS_TREE_LOG_OBJECTID); 2821 ret = btrfs_free_and_pin_reserved_extent( 2822 fs_info, 2823 path->nodes[*level]->start, 2824 path->nodes[*level]->len); 2825 if (ret) 2826 return ret; 2827 } 2828 free_extent_buffer(path->nodes[*level]); 2829 path->nodes[*level] = NULL; 2830 *level = i + 1; 2831 } 2832 } 2833 return 1; 2834 } 2835 2836 /* 2837 * drop the reference count on the tree rooted at 'snap'. This traverses 2838 * the tree freeing any blocks that have a ref count of zero after being 2839 * decremented. 2840 */ 2841 static int walk_log_tree(struct btrfs_trans_handle *trans, 2842 struct btrfs_root *log, struct walk_control *wc) 2843 { 2844 struct btrfs_fs_info *fs_info = log->fs_info; 2845 int ret = 0; 2846 int wret; 2847 int level; 2848 struct btrfs_path *path; 2849 int orig_level; 2850 2851 path = btrfs_alloc_path(); 2852 if (!path) 2853 return -ENOMEM; 2854 2855 level = btrfs_header_level(log->node); 2856 orig_level = level; 2857 path->nodes[level] = log->node; 2858 extent_buffer_get(log->node); 2859 path->slots[level] = 0; 2860 2861 while (1) { 2862 wret = walk_down_log_tree(trans, log, path, &level, wc); 2863 if (wret > 0) 2864 break; 2865 if (wret < 0) { 2866 ret = wret; 2867 goto out; 2868 } 2869 2870 wret = walk_up_log_tree(trans, log, path, &level, wc); 2871 if (wret > 0) 2872 break; 2873 if (wret < 0) { 2874 ret = wret; 2875 goto out; 2876 } 2877 } 2878 2879 /* was the root node processed? if not, catch it here */ 2880 if (path->nodes[orig_level]) { 2881 ret = wc->process_func(log, path->nodes[orig_level], wc, 2882 btrfs_header_generation(path->nodes[orig_level]), 2883 orig_level); 2884 if (ret) 2885 goto out; 2886 if (wc->free) { 2887 struct extent_buffer *next; 2888 2889 next = path->nodes[orig_level]; 2890 2891 if (trans) { 2892 btrfs_tree_lock(next); 2893 btrfs_set_lock_blocking_write(next); 2894 clean_tree_block(fs_info, next); 2895 btrfs_wait_tree_block_writeback(next); 2896 btrfs_tree_unlock(next); 2897 } else { 2898 if (test_and_clear_bit(EXTENT_BUFFER_DIRTY, &next->bflags)) 2899 clear_extent_buffer_dirty(next); 2900 } 2901 2902 WARN_ON(log->root_key.objectid != 2903 BTRFS_TREE_LOG_OBJECTID); 2904 ret = btrfs_free_and_pin_reserved_extent(fs_info, 2905 next->start, next->len); 2906 if (ret) 2907 goto out; 2908 } 2909 } 2910 2911 out: 2912 btrfs_free_path(path); 2913 return ret; 2914 } 2915 2916 /* 2917 * helper function to update the item for a given subvolumes log root 2918 * in the tree of log roots 2919 */ 2920 static int update_log_root(struct btrfs_trans_handle *trans, 2921 struct btrfs_root *log) 2922 { 2923 struct btrfs_fs_info *fs_info = log->fs_info; 2924 int ret; 2925 2926 if (log->log_transid == 1) { 2927 /* insert root item on the first sync */ 2928 ret = btrfs_insert_root(trans, fs_info->log_root_tree, 2929 &log->root_key, &log->root_item); 2930 } else { 2931 ret = btrfs_update_root(trans, fs_info->log_root_tree, 2932 &log->root_key, &log->root_item); 2933 } 2934 return ret; 2935 } 2936 2937 static void wait_log_commit(struct btrfs_root *root, int transid) 2938 { 2939 DEFINE_WAIT(wait); 2940 int index = transid % 2; 2941 2942 /* 2943 * we only allow two pending log transactions at a time, 2944 * so we know that if ours is more than 2 older than the 2945 * current transaction, we're done 2946 */ 2947 for (;;) { 2948 prepare_to_wait(&root->log_commit_wait[index], 2949 &wait, TASK_UNINTERRUPTIBLE); 2950 2951 if (!(root->log_transid_committed < transid && 2952 atomic_read(&root->log_commit[index]))) 2953 break; 2954 2955 mutex_unlock(&root->log_mutex); 2956 schedule(); 2957 mutex_lock(&root->log_mutex); 2958 } 2959 finish_wait(&root->log_commit_wait[index], &wait); 2960 } 2961 2962 static void wait_for_writer(struct btrfs_root *root) 2963 { 2964 DEFINE_WAIT(wait); 2965 2966 for (;;) { 2967 prepare_to_wait(&root->log_writer_wait, &wait, 2968 TASK_UNINTERRUPTIBLE); 2969 if (!atomic_read(&root->log_writers)) 2970 break; 2971 2972 mutex_unlock(&root->log_mutex); 2973 schedule(); 2974 mutex_lock(&root->log_mutex); 2975 } 2976 finish_wait(&root->log_writer_wait, &wait); 2977 } 2978 2979 static inline void btrfs_remove_log_ctx(struct btrfs_root *root, 2980 struct btrfs_log_ctx *ctx) 2981 { 2982 if (!ctx) 2983 return; 2984 2985 mutex_lock(&root->log_mutex); 2986 list_del_init(&ctx->list); 2987 mutex_unlock(&root->log_mutex); 2988 } 2989 2990 /* 2991 * Invoked in log mutex context, or be sure there is no other task which 2992 * can access the list. 2993 */ 2994 static inline void btrfs_remove_all_log_ctxs(struct btrfs_root *root, 2995 int index, int error) 2996 { 2997 struct btrfs_log_ctx *ctx; 2998 struct btrfs_log_ctx *safe; 2999 3000 list_for_each_entry_safe(ctx, safe, &root->log_ctxs[index], list) { 3001 list_del_init(&ctx->list); 3002 ctx->log_ret = error; 3003 } 3004 3005 INIT_LIST_HEAD(&root->log_ctxs[index]); 3006 } 3007 3008 /* 3009 * btrfs_sync_log does sends a given tree log down to the disk and 3010 * updates the super blocks to record it. When this call is done, 3011 * you know that any inodes previously logged are safely on disk only 3012 * if it returns 0. 3013 * 3014 * Any other return value means you need to call btrfs_commit_transaction. 3015 * Some of the edge cases for fsyncing directories that have had unlinks 3016 * or renames done in the past mean that sometimes the only safe 3017 * fsync is to commit the whole FS. When btrfs_sync_log returns -EAGAIN, 3018 * that has happened. 3019 */ 3020 int btrfs_sync_log(struct btrfs_trans_handle *trans, 3021 struct btrfs_root *root, struct btrfs_log_ctx *ctx) 3022 { 3023 int index1; 3024 int index2; 3025 int mark; 3026 int ret; 3027 struct btrfs_fs_info *fs_info = root->fs_info; 3028 struct btrfs_root *log = root->log_root; 3029 struct btrfs_root *log_root_tree = fs_info->log_root_tree; 3030 int log_transid = 0; 3031 struct btrfs_log_ctx root_log_ctx; 3032 struct blk_plug plug; 3033 3034 mutex_lock(&root->log_mutex); 3035 log_transid = ctx->log_transid; 3036 if (root->log_transid_committed >= log_transid) { 3037 mutex_unlock(&root->log_mutex); 3038 return ctx->log_ret; 3039 } 3040 3041 index1 = log_transid % 2; 3042 if (atomic_read(&root->log_commit[index1])) { 3043 wait_log_commit(root, log_transid); 3044 mutex_unlock(&root->log_mutex); 3045 return ctx->log_ret; 3046 } 3047 ASSERT(log_transid == root->log_transid); 3048 atomic_set(&root->log_commit[index1], 1); 3049 3050 /* wait for previous tree log sync to complete */ 3051 if (atomic_read(&root->log_commit[(index1 + 1) % 2])) 3052 wait_log_commit(root, log_transid - 1); 3053 3054 while (1) { 3055 int batch = atomic_read(&root->log_batch); 3056 /* when we're on an ssd, just kick the log commit out */ 3057 if (!btrfs_test_opt(fs_info, SSD) && 3058 test_bit(BTRFS_ROOT_MULTI_LOG_TASKS, &root->state)) { 3059 mutex_unlock(&root->log_mutex); 3060 schedule_timeout_uninterruptible(1); 3061 mutex_lock(&root->log_mutex); 3062 } 3063 wait_for_writer(root); 3064 if (batch == atomic_read(&root->log_batch)) 3065 break; 3066 } 3067 3068 /* bail out if we need to do a full commit */ 3069 if (btrfs_need_log_full_commit(fs_info, trans)) { 3070 ret = -EAGAIN; 3071 mutex_unlock(&root->log_mutex); 3072 goto out; 3073 } 3074 3075 if (log_transid % 2 == 0) 3076 mark = EXTENT_DIRTY; 3077 else 3078 mark = EXTENT_NEW; 3079 3080 /* we start IO on all the marked extents here, but we don't actually 3081 * wait for them until later. 3082 */ 3083 blk_start_plug(&plug); 3084 ret = btrfs_write_marked_extents(fs_info, &log->dirty_log_pages, mark); 3085 if (ret) { 3086 blk_finish_plug(&plug); 3087 btrfs_abort_transaction(trans, ret); 3088 btrfs_set_log_full_commit(fs_info, trans); 3089 mutex_unlock(&root->log_mutex); 3090 goto out; 3091 } 3092 3093 btrfs_set_root_node(&log->root_item, log->node); 3094 3095 root->log_transid++; 3096 log->log_transid = root->log_transid; 3097 root->log_start_pid = 0; 3098 /* 3099 * IO has been started, blocks of the log tree have WRITTEN flag set 3100 * in their headers. new modifications of the log will be written to 3101 * new positions. so it's safe to allow log writers to go in. 3102 */ 3103 mutex_unlock(&root->log_mutex); 3104 3105 btrfs_init_log_ctx(&root_log_ctx, NULL); 3106 3107 mutex_lock(&log_root_tree->log_mutex); 3108 atomic_inc(&log_root_tree->log_batch); 3109 atomic_inc(&log_root_tree->log_writers); 3110 3111 index2 = log_root_tree->log_transid % 2; 3112 list_add_tail(&root_log_ctx.list, &log_root_tree->log_ctxs[index2]); 3113 root_log_ctx.log_transid = log_root_tree->log_transid; 3114 3115 mutex_unlock(&log_root_tree->log_mutex); 3116 3117 ret = update_log_root(trans, log); 3118 3119 mutex_lock(&log_root_tree->log_mutex); 3120 if (atomic_dec_and_test(&log_root_tree->log_writers)) { 3121 /* atomic_dec_and_test implies a barrier */ 3122 cond_wake_up_nomb(&log_root_tree->log_writer_wait); 3123 } 3124 3125 if (ret) { 3126 if (!list_empty(&root_log_ctx.list)) 3127 list_del_init(&root_log_ctx.list); 3128 3129 blk_finish_plug(&plug); 3130 btrfs_set_log_full_commit(fs_info, trans); 3131 3132 if (ret != -ENOSPC) { 3133 btrfs_abort_transaction(trans, ret); 3134 mutex_unlock(&log_root_tree->log_mutex); 3135 goto out; 3136 } 3137 btrfs_wait_tree_log_extents(log, mark); 3138 mutex_unlock(&log_root_tree->log_mutex); 3139 ret = -EAGAIN; 3140 goto out; 3141 } 3142 3143 if (log_root_tree->log_transid_committed >= root_log_ctx.log_transid) { 3144 blk_finish_plug(&plug); 3145 list_del_init(&root_log_ctx.list); 3146 mutex_unlock(&log_root_tree->log_mutex); 3147 ret = root_log_ctx.log_ret; 3148 goto out; 3149 } 3150 3151 index2 = root_log_ctx.log_transid % 2; 3152 if (atomic_read(&log_root_tree->log_commit[index2])) { 3153 blk_finish_plug(&plug); 3154 ret = btrfs_wait_tree_log_extents(log, mark); 3155 wait_log_commit(log_root_tree, 3156 root_log_ctx.log_transid); 3157 mutex_unlock(&log_root_tree->log_mutex); 3158 if (!ret) 3159 ret = root_log_ctx.log_ret; 3160 goto out; 3161 } 3162 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); 3163 atomic_set(&log_root_tree->log_commit[index2], 1); 3164 3165 if (atomic_read(&log_root_tree->log_commit[(index2 + 1) % 2])) { 3166 wait_log_commit(log_root_tree, 3167 root_log_ctx.log_transid - 1); 3168 } 3169 3170 wait_for_writer(log_root_tree); 3171 3172 /* 3173 * now that we've moved on to the tree of log tree roots, 3174 * check the full commit flag again 3175 */ 3176 if (btrfs_need_log_full_commit(fs_info, trans)) { 3177 blk_finish_plug(&plug); 3178 btrfs_wait_tree_log_extents(log, mark); 3179 mutex_unlock(&log_root_tree->log_mutex); 3180 ret = -EAGAIN; 3181 goto out_wake_log_root; 3182 } 3183 3184 ret = btrfs_write_marked_extents(fs_info, 3185 &log_root_tree->dirty_log_pages, 3186 EXTENT_DIRTY | EXTENT_NEW); 3187 blk_finish_plug(&plug); 3188 if (ret) { 3189 btrfs_set_log_full_commit(fs_info, trans); 3190 btrfs_abort_transaction(trans, ret); 3191 mutex_unlock(&log_root_tree->log_mutex); 3192 goto out_wake_log_root; 3193 } 3194 ret = btrfs_wait_tree_log_extents(log, mark); 3195 if (!ret) 3196 ret = btrfs_wait_tree_log_extents(log_root_tree, 3197 EXTENT_NEW | EXTENT_DIRTY); 3198 if (ret) { 3199 btrfs_set_log_full_commit(fs_info, trans); 3200 mutex_unlock(&log_root_tree->log_mutex); 3201 goto out_wake_log_root; 3202 } 3203 3204 btrfs_set_super_log_root(fs_info->super_for_commit, 3205 log_root_tree->node->start); 3206 btrfs_set_super_log_root_level(fs_info->super_for_commit, 3207 btrfs_header_level(log_root_tree->node)); 3208 3209 log_root_tree->log_transid++; 3210 mutex_unlock(&log_root_tree->log_mutex); 3211 3212 /* 3213 * Nobody else is going to jump in and write the ctree 3214 * super here because the log_commit atomic below is protecting 3215 * us. We must be called with a transaction handle pinning 3216 * the running transaction open, so a full commit can't hop 3217 * in and cause problems either. 3218 */ 3219 ret = write_all_supers(fs_info, 1); 3220 if (ret) { 3221 btrfs_set_log_full_commit(fs_info, trans); 3222 btrfs_abort_transaction(trans, ret); 3223 goto out_wake_log_root; 3224 } 3225 3226 mutex_lock(&root->log_mutex); 3227 if (root->last_log_commit < log_transid) 3228 root->last_log_commit = log_transid; 3229 mutex_unlock(&root->log_mutex); 3230 3231 out_wake_log_root: 3232 mutex_lock(&log_root_tree->log_mutex); 3233 btrfs_remove_all_log_ctxs(log_root_tree, index2, ret); 3234 3235 log_root_tree->log_transid_committed++; 3236 atomic_set(&log_root_tree->log_commit[index2], 0); 3237 mutex_unlock(&log_root_tree->log_mutex); 3238 3239 /* 3240 * The barrier before waitqueue_active (in cond_wake_up) is needed so 3241 * all the updates above are seen by the woken threads. It might not be 3242 * necessary, but proving that seems to be hard. 3243 */ 3244 cond_wake_up(&log_root_tree->log_commit_wait[index2]); 3245 out: 3246 mutex_lock(&root->log_mutex); 3247 btrfs_remove_all_log_ctxs(root, index1, ret); 3248 root->log_transid_committed++; 3249 atomic_set(&root->log_commit[index1], 0); 3250 mutex_unlock(&root->log_mutex); 3251 3252 /* 3253 * The barrier before waitqueue_active (in cond_wake_up) is needed so 3254 * all the updates above are seen by the woken threads. It might not be 3255 * necessary, but proving that seems to be hard. 3256 */ 3257 cond_wake_up(&root->log_commit_wait[index1]); 3258 return ret; 3259 } 3260 3261 static void free_log_tree(struct btrfs_trans_handle *trans, 3262 struct btrfs_root *log) 3263 { 3264 int ret; 3265 struct walk_control wc = { 3266 .free = 1, 3267 .process_func = process_one_buffer 3268 }; 3269 3270 ret = walk_log_tree(trans, log, &wc); 3271 if (ret) { 3272 if (trans) 3273 btrfs_abort_transaction(trans, ret); 3274 else 3275 btrfs_handle_fs_error(log->fs_info, ret, NULL); 3276 } 3277 3278 clear_extent_bits(&log->dirty_log_pages, 0, (u64)-1, 3279 EXTENT_DIRTY | EXTENT_NEW | EXTENT_NEED_WAIT); 3280 free_extent_buffer(log->node); 3281 kfree(log); 3282 } 3283 3284 /* 3285 * free all the extents used by the tree log. This should be called 3286 * at commit time of the full transaction 3287 */ 3288 int btrfs_free_log(struct btrfs_trans_handle *trans, struct btrfs_root *root) 3289 { 3290 if (root->log_root) { 3291 free_log_tree(trans, root->log_root); 3292 root->log_root = NULL; 3293 } 3294 return 0; 3295 } 3296 3297 int btrfs_free_log_root_tree(struct btrfs_trans_handle *trans, 3298 struct btrfs_fs_info *fs_info) 3299 { 3300 if (fs_info->log_root_tree) { 3301 free_log_tree(trans, fs_info->log_root_tree); 3302 fs_info->log_root_tree = NULL; 3303 } 3304 return 0; 3305 } 3306 3307 /* 3308 * If both a file and directory are logged, and unlinks or renames are 3309 * mixed in, we have a few interesting corners: 3310 * 3311 * create file X in dir Y 3312 * link file X to X.link in dir Y 3313 * fsync file X 3314 * unlink file X but leave X.link 3315 * fsync dir Y 3316 * 3317 * After a crash we would expect only X.link to exist. But file X 3318 * didn't get fsync'd again so the log has back refs for X and X.link. 3319 * 3320 * We solve this by removing directory entries and inode backrefs from the 3321 * log when a file that was logged in the current transaction is 3322 * unlinked. Any later fsync will include the updated log entries, and 3323 * we'll be able to reconstruct the proper directory items from backrefs. 3324 * 3325 * This optimizations allows us to avoid relogging the entire inode 3326 * or the entire directory. 3327 */ 3328 int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, 3329 struct btrfs_root *root, 3330 const char *name, int name_len, 3331 struct btrfs_inode *dir, u64 index) 3332 { 3333 struct btrfs_root *log; 3334 struct btrfs_dir_item *di; 3335 struct btrfs_path *path; 3336 int ret; 3337 int err = 0; 3338 int bytes_del = 0; 3339 u64 dir_ino = btrfs_ino(dir); 3340 3341 if (dir->logged_trans < trans->transid) 3342 return 0; 3343 3344 ret = join_running_log_trans(root); 3345 if (ret) 3346 return 0; 3347 3348 mutex_lock(&dir->log_mutex); 3349 3350 log = root->log_root; 3351 path = btrfs_alloc_path(); 3352 if (!path) { 3353 err = -ENOMEM; 3354 goto out_unlock; 3355 } 3356 3357 di = btrfs_lookup_dir_item(trans, log, path, dir_ino, 3358 name, name_len, -1); 3359 if (IS_ERR(di)) { 3360 err = PTR_ERR(di); 3361 goto fail; 3362 } 3363 if (di) { 3364 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3365 bytes_del += name_len; 3366 if (ret) { 3367 err = ret; 3368 goto fail; 3369 } 3370 } 3371 btrfs_release_path(path); 3372 di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, 3373 index, name, name_len, -1); 3374 if (IS_ERR(di)) { 3375 err = PTR_ERR(di); 3376 goto fail; 3377 } 3378 if (di) { 3379 ret = btrfs_delete_one_dir_name(trans, log, path, di); 3380 bytes_del += name_len; 3381 if (ret) { 3382 err = ret; 3383 goto fail; 3384 } 3385 } 3386 3387 /* update the directory size in the log to reflect the names 3388 * we have removed 3389 */ 3390 if (bytes_del) { 3391 struct btrfs_key key; 3392 3393 key.objectid = dir_ino; 3394 key.offset = 0; 3395 key.type = BTRFS_INODE_ITEM_KEY; 3396 btrfs_release_path(path); 3397 3398 ret = btrfs_search_slot(trans, log, &key, path, 0, 1); 3399 if (ret < 0) { 3400 err = ret; 3401 goto fail; 3402 } 3403 if (ret == 0) { 3404 struct btrfs_inode_item *item; 3405 u64 i_size; 3406 3407 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3408 struct btrfs_inode_item); 3409 i_size = btrfs_inode_size(path->nodes[0], item); 3410 if (i_size > bytes_del) 3411 i_size -= bytes_del; 3412 else 3413 i_size = 0; 3414 btrfs_set_inode_size(path->nodes[0], item, i_size); 3415 btrfs_mark_buffer_dirty(path->nodes[0]); 3416 } else 3417 ret = 0; 3418 btrfs_release_path(path); 3419 } 3420 fail: 3421 btrfs_free_path(path); 3422 out_unlock: 3423 mutex_unlock(&dir->log_mutex); 3424 if (ret == -ENOSPC) { 3425 btrfs_set_log_full_commit(root->fs_info, trans); 3426 ret = 0; 3427 } else if (ret < 0) 3428 btrfs_abort_transaction(trans, ret); 3429 3430 btrfs_end_log_trans(root); 3431 3432 return err; 3433 } 3434 3435 /* see comments for btrfs_del_dir_entries_in_log */ 3436 int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, 3437 struct btrfs_root *root, 3438 const char *name, int name_len, 3439 struct btrfs_inode *inode, u64 dirid) 3440 { 3441 struct btrfs_fs_info *fs_info = root->fs_info; 3442 struct btrfs_root *log; 3443 u64 index; 3444 int ret; 3445 3446 if (inode->logged_trans < trans->transid) 3447 return 0; 3448 3449 ret = join_running_log_trans(root); 3450 if (ret) 3451 return 0; 3452 log = root->log_root; 3453 mutex_lock(&inode->log_mutex); 3454 3455 ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), 3456 dirid, &index); 3457 mutex_unlock(&inode->log_mutex); 3458 if (ret == -ENOSPC) { 3459 btrfs_set_log_full_commit(fs_info, trans); 3460 ret = 0; 3461 } else if (ret < 0 && ret != -ENOENT) 3462 btrfs_abort_transaction(trans, ret); 3463 btrfs_end_log_trans(root); 3464 3465 return ret; 3466 } 3467 3468 /* 3469 * creates a range item in the log for 'dirid'. first_offset and 3470 * last_offset tell us which parts of the key space the log should 3471 * be considered authoritative for. 3472 */ 3473 static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, 3474 struct btrfs_root *log, 3475 struct btrfs_path *path, 3476 int key_type, u64 dirid, 3477 u64 first_offset, u64 last_offset) 3478 { 3479 int ret; 3480 struct btrfs_key key; 3481 struct btrfs_dir_log_item *item; 3482 3483 key.objectid = dirid; 3484 key.offset = first_offset; 3485 if (key_type == BTRFS_DIR_ITEM_KEY) 3486 key.type = BTRFS_DIR_LOG_ITEM_KEY; 3487 else 3488 key.type = BTRFS_DIR_LOG_INDEX_KEY; 3489 ret = btrfs_insert_empty_item(trans, log, path, &key, sizeof(*item)); 3490 if (ret) 3491 return ret; 3492 3493 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3494 struct btrfs_dir_log_item); 3495 btrfs_set_dir_log_end(path->nodes[0], item, last_offset); 3496 btrfs_mark_buffer_dirty(path->nodes[0]); 3497 btrfs_release_path(path); 3498 return 0; 3499 } 3500 3501 /* 3502 * log all the items included in the current transaction for a given 3503 * directory. This also creates the range items in the log tree required 3504 * to replay anything deleted before the fsync 3505 */ 3506 static noinline int log_dir_items(struct btrfs_trans_handle *trans, 3507 struct btrfs_root *root, struct btrfs_inode *inode, 3508 struct btrfs_path *path, 3509 struct btrfs_path *dst_path, int key_type, 3510 struct btrfs_log_ctx *ctx, 3511 u64 min_offset, u64 *last_offset_ret) 3512 { 3513 struct btrfs_key min_key; 3514 struct btrfs_root *log = root->log_root; 3515 struct extent_buffer *src; 3516 int err = 0; 3517 int ret; 3518 int i; 3519 int nritems; 3520 u64 first_offset = min_offset; 3521 u64 last_offset = (u64)-1; 3522 u64 ino = btrfs_ino(inode); 3523 3524 log = root->log_root; 3525 3526 min_key.objectid = ino; 3527 min_key.type = key_type; 3528 min_key.offset = min_offset; 3529 3530 ret = btrfs_search_forward(root, &min_key, path, trans->transid); 3531 3532 /* 3533 * we didn't find anything from this transaction, see if there 3534 * is anything at all 3535 */ 3536 if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { 3537 min_key.objectid = ino; 3538 min_key.type = key_type; 3539 min_key.offset = (u64)-1; 3540 btrfs_release_path(path); 3541 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3542 if (ret < 0) { 3543 btrfs_release_path(path); 3544 return ret; 3545 } 3546 ret = btrfs_previous_item(root, path, ino, key_type); 3547 3548 /* if ret == 0 there are items for this type, 3549 * create a range to tell us the last key of this type. 3550 * otherwise, there are no items in this directory after 3551 * *min_offset, and we create a range to indicate that. 3552 */ 3553 if (ret == 0) { 3554 struct btrfs_key tmp; 3555 btrfs_item_key_to_cpu(path->nodes[0], &tmp, 3556 path->slots[0]); 3557 if (key_type == tmp.type) 3558 first_offset = max(min_offset, tmp.offset) + 1; 3559 } 3560 goto done; 3561 } 3562 3563 /* go backward to find any previous key */ 3564 ret = btrfs_previous_item(root, path, ino, key_type); 3565 if (ret == 0) { 3566 struct btrfs_key tmp; 3567 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3568 if (key_type == tmp.type) { 3569 first_offset = tmp.offset; 3570 ret = overwrite_item(trans, log, dst_path, 3571 path->nodes[0], path->slots[0], 3572 &tmp); 3573 if (ret) { 3574 err = ret; 3575 goto done; 3576 } 3577 } 3578 } 3579 btrfs_release_path(path); 3580 3581 /* 3582 * Find the first key from this transaction again. See the note for 3583 * log_new_dir_dentries, if we're logging a directory recursively we 3584 * won't be holding its i_mutex, which means we can modify the directory 3585 * while we're logging it. If we remove an entry between our first 3586 * search and this search we'll not find the key again and can just 3587 * bail. 3588 */ 3589 ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); 3590 if (ret != 0) 3591 goto done; 3592 3593 /* 3594 * we have a block from this transaction, log every item in it 3595 * from our directory 3596 */ 3597 while (1) { 3598 struct btrfs_key tmp; 3599 src = path->nodes[0]; 3600 nritems = btrfs_header_nritems(src); 3601 for (i = path->slots[0]; i < nritems; i++) { 3602 struct btrfs_dir_item *di; 3603 3604 btrfs_item_key_to_cpu(src, &min_key, i); 3605 3606 if (min_key.objectid != ino || min_key.type != key_type) 3607 goto done; 3608 ret = overwrite_item(trans, log, dst_path, src, i, 3609 &min_key); 3610 if (ret) { 3611 err = ret; 3612 goto done; 3613 } 3614 3615 /* 3616 * We must make sure that when we log a directory entry, 3617 * the corresponding inode, after log replay, has a 3618 * matching link count. For example: 3619 * 3620 * touch foo 3621 * mkdir mydir 3622 * sync 3623 * ln foo mydir/bar 3624 * xfs_io -c "fsync" mydir 3625 * <crash> 3626 * <mount fs and log replay> 3627 * 3628 * Would result in a fsync log that when replayed, our 3629 * file inode would have a link count of 1, but we get 3630 * two directory entries pointing to the same inode. 3631 * After removing one of the names, it would not be 3632 * possible to remove the other name, which resulted 3633 * always in stale file handle errors, and would not 3634 * be possible to rmdir the parent directory, since 3635 * its i_size could never decrement to the value 3636 * BTRFS_EMPTY_DIR_SIZE, resulting in -ENOTEMPTY errors. 3637 */ 3638 di = btrfs_item_ptr(src, i, struct btrfs_dir_item); 3639 btrfs_dir_item_key_to_cpu(src, di, &tmp); 3640 if (ctx && 3641 (btrfs_dir_transid(src, di) == trans->transid || 3642 btrfs_dir_type(src, di) == BTRFS_FT_DIR) && 3643 tmp.type != BTRFS_ROOT_ITEM_KEY) 3644 ctx->log_new_dentries = true; 3645 } 3646 path->slots[0] = nritems; 3647 3648 /* 3649 * look ahead to the next item and see if it is also 3650 * from this directory and from this transaction 3651 */ 3652 ret = btrfs_next_leaf(root, path); 3653 if (ret) { 3654 if (ret == 1) 3655 last_offset = (u64)-1; 3656 else 3657 err = ret; 3658 goto done; 3659 } 3660 btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); 3661 if (tmp.objectid != ino || tmp.type != key_type) { 3662 last_offset = (u64)-1; 3663 goto done; 3664 } 3665 if (btrfs_header_generation(path->nodes[0]) != trans->transid) { 3666 ret = overwrite_item(trans, log, dst_path, 3667 path->nodes[0], path->slots[0], 3668 &tmp); 3669 if (ret) 3670 err = ret; 3671 else 3672 last_offset = tmp.offset; 3673 goto done; 3674 } 3675 } 3676 done: 3677 btrfs_release_path(path); 3678 btrfs_release_path(dst_path); 3679 3680 if (err == 0) { 3681 *last_offset_ret = last_offset; 3682 /* 3683 * insert the log range keys to indicate where the log 3684 * is valid 3685 */ 3686 ret = insert_dir_log_key(trans, log, path, key_type, 3687 ino, first_offset, last_offset); 3688 if (ret) 3689 err = ret; 3690 } 3691 return err; 3692 } 3693 3694 /* 3695 * logging directories is very similar to logging inodes, We find all the items 3696 * from the current transaction and write them to the log. 3697 * 3698 * The recovery code scans the directory in the subvolume, and if it finds a 3699 * key in the range logged that is not present in the log tree, then it means 3700 * that dir entry was unlinked during the transaction. 3701 * 3702 * In order for that scan to work, we must include one key smaller than 3703 * the smallest logged by this transaction and one key larger than the largest 3704 * key logged by this transaction. 3705 */ 3706 static noinline int log_directory_changes(struct btrfs_trans_handle *trans, 3707 struct btrfs_root *root, struct btrfs_inode *inode, 3708 struct btrfs_path *path, 3709 struct btrfs_path *dst_path, 3710 struct btrfs_log_ctx *ctx) 3711 { 3712 u64 min_key; 3713 u64 max_key; 3714 int ret; 3715 int key_type = BTRFS_DIR_ITEM_KEY; 3716 3717 again: 3718 min_key = 0; 3719 max_key = 0; 3720 while (1) { 3721 ret = log_dir_items(trans, root, inode, path, dst_path, key_type, 3722 ctx, min_key, &max_key); 3723 if (ret) 3724 return ret; 3725 if (max_key == (u64)-1) 3726 break; 3727 min_key = max_key + 1; 3728 } 3729 3730 if (key_type == BTRFS_DIR_ITEM_KEY) { 3731 key_type = BTRFS_DIR_INDEX_KEY; 3732 goto again; 3733 } 3734 return 0; 3735 } 3736 3737 /* 3738 * a helper function to drop items from the log before we relog an 3739 * inode. max_key_type indicates the highest item type to remove. 3740 * This cannot be run for file data extents because it does not 3741 * free the extents they point to. 3742 */ 3743 static int drop_objectid_items(struct btrfs_trans_handle *trans, 3744 struct btrfs_root *log, 3745 struct btrfs_path *path, 3746 u64 objectid, int max_key_type) 3747 { 3748 int ret; 3749 struct btrfs_key key; 3750 struct btrfs_key found_key; 3751 int start_slot; 3752 3753 key.objectid = objectid; 3754 key.type = max_key_type; 3755 key.offset = (u64)-1; 3756 3757 while (1) { 3758 ret = btrfs_search_slot(trans, log, &key, path, -1, 1); 3759 BUG_ON(ret == 0); /* Logic error */ 3760 if (ret < 0) 3761 break; 3762 3763 if (path->slots[0] == 0) 3764 break; 3765 3766 path->slots[0]--; 3767 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 3768 path->slots[0]); 3769 3770 if (found_key.objectid != objectid) 3771 break; 3772 3773 found_key.offset = 0; 3774 found_key.type = 0; 3775 ret = btrfs_bin_search(path->nodes[0], &found_key, 0, 3776 &start_slot); 3777 if (ret < 0) 3778 break; 3779 3780 ret = btrfs_del_items(trans, log, path, start_slot, 3781 path->slots[0] - start_slot + 1); 3782 /* 3783 * If start slot isn't 0 then we don't need to re-search, we've 3784 * found the last guy with the objectid in this tree. 3785 */ 3786 if (ret || start_slot != 0) 3787 break; 3788 btrfs_release_path(path); 3789 } 3790 btrfs_release_path(path); 3791 if (ret > 0) 3792 ret = 0; 3793 return ret; 3794 } 3795 3796 static void fill_inode_item(struct btrfs_trans_handle *trans, 3797 struct extent_buffer *leaf, 3798 struct btrfs_inode_item *item, 3799 struct inode *inode, int log_inode_only, 3800 u64 logged_isize) 3801 { 3802 struct btrfs_map_token token; 3803 3804 btrfs_init_map_token(&token); 3805 3806 if (log_inode_only) { 3807 /* set the generation to zero so the recover code 3808 * can tell the difference between an logging 3809 * just to say 'this inode exists' and a logging 3810 * to say 'update this inode with these values' 3811 */ 3812 btrfs_set_token_inode_generation(leaf, item, 0, &token); 3813 btrfs_set_token_inode_size(leaf, item, logged_isize, &token); 3814 } else { 3815 btrfs_set_token_inode_generation(leaf, item, 3816 BTRFS_I(inode)->generation, 3817 &token); 3818 btrfs_set_token_inode_size(leaf, item, inode->i_size, &token); 3819 } 3820 3821 btrfs_set_token_inode_uid(leaf, item, i_uid_read(inode), &token); 3822 btrfs_set_token_inode_gid(leaf, item, i_gid_read(inode), &token); 3823 btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token); 3824 btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token); 3825 3826 btrfs_set_token_timespec_sec(leaf, &item->atime, 3827 inode->i_atime.tv_sec, &token); 3828 btrfs_set_token_timespec_nsec(leaf, &item->atime, 3829 inode->i_atime.tv_nsec, &token); 3830 3831 btrfs_set_token_timespec_sec(leaf, &item->mtime, 3832 inode->i_mtime.tv_sec, &token); 3833 btrfs_set_token_timespec_nsec(leaf, &item->mtime, 3834 inode->i_mtime.tv_nsec, &token); 3835 3836 btrfs_set_token_timespec_sec(leaf, &item->ctime, 3837 inode->i_ctime.tv_sec, &token); 3838 btrfs_set_token_timespec_nsec(leaf, &item->ctime, 3839 inode->i_ctime.tv_nsec, &token); 3840 3841 btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode), 3842 &token); 3843 3844 btrfs_set_token_inode_sequence(leaf, item, 3845 inode_peek_iversion(inode), &token); 3846 btrfs_set_token_inode_transid(leaf, item, trans->transid, &token); 3847 btrfs_set_token_inode_rdev(leaf, item, inode->i_rdev, &token); 3848 btrfs_set_token_inode_flags(leaf, item, BTRFS_I(inode)->flags, &token); 3849 btrfs_set_token_inode_block_group(leaf, item, 0, &token); 3850 } 3851 3852 static int log_inode_item(struct btrfs_trans_handle *trans, 3853 struct btrfs_root *log, struct btrfs_path *path, 3854 struct btrfs_inode *inode) 3855 { 3856 struct btrfs_inode_item *inode_item; 3857 int ret; 3858 3859 ret = btrfs_insert_empty_item(trans, log, path, 3860 &inode->location, sizeof(*inode_item)); 3861 if (ret && ret != -EEXIST) 3862 return ret; 3863 inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], 3864 struct btrfs_inode_item); 3865 fill_inode_item(trans, path->nodes[0], inode_item, &inode->vfs_inode, 3866 0, 0); 3867 btrfs_release_path(path); 3868 return 0; 3869 } 3870 3871 static noinline int copy_items(struct btrfs_trans_handle *trans, 3872 struct btrfs_inode *inode, 3873 struct btrfs_path *dst_path, 3874 struct btrfs_path *src_path, u64 *last_extent, 3875 int start_slot, int nr, int inode_only, 3876 u64 logged_isize) 3877 { 3878 struct btrfs_fs_info *fs_info = trans->fs_info; 3879 unsigned long src_offset; 3880 unsigned long dst_offset; 3881 struct btrfs_root *log = inode->root->log_root; 3882 struct btrfs_file_extent_item *extent; 3883 struct btrfs_inode_item *inode_item; 3884 struct extent_buffer *src = src_path->nodes[0]; 3885 struct btrfs_key first_key, last_key, key; 3886 int ret; 3887 struct btrfs_key *ins_keys; 3888 u32 *ins_sizes; 3889 char *ins_data; 3890 int i; 3891 struct list_head ordered_sums; 3892 int skip_csum = inode->flags & BTRFS_INODE_NODATASUM; 3893 bool has_extents = false; 3894 bool need_find_last_extent = true; 3895 bool done = false; 3896 3897 INIT_LIST_HEAD(&ordered_sums); 3898 3899 ins_data = kmalloc(nr * sizeof(struct btrfs_key) + 3900 nr * sizeof(u32), GFP_NOFS); 3901 if (!ins_data) 3902 return -ENOMEM; 3903 3904 first_key.objectid = (u64)-1; 3905 3906 ins_sizes = (u32 *)ins_data; 3907 ins_keys = (struct btrfs_key *)(ins_data + nr * sizeof(u32)); 3908 3909 for (i = 0; i < nr; i++) { 3910 ins_sizes[i] = btrfs_item_size_nr(src, i + start_slot); 3911 btrfs_item_key_to_cpu(src, ins_keys + i, i + start_slot); 3912 } 3913 ret = btrfs_insert_empty_items(trans, log, dst_path, 3914 ins_keys, ins_sizes, nr); 3915 if (ret) { 3916 kfree(ins_data); 3917 return ret; 3918 } 3919 3920 for (i = 0; i < nr; i++, dst_path->slots[0]++) { 3921 dst_offset = btrfs_item_ptr_offset(dst_path->nodes[0], 3922 dst_path->slots[0]); 3923 3924 src_offset = btrfs_item_ptr_offset(src, start_slot + i); 3925 3926 if (i == nr - 1) 3927 last_key = ins_keys[i]; 3928 3929 if (ins_keys[i].type == BTRFS_INODE_ITEM_KEY) { 3930 inode_item = btrfs_item_ptr(dst_path->nodes[0], 3931 dst_path->slots[0], 3932 struct btrfs_inode_item); 3933 fill_inode_item(trans, dst_path->nodes[0], inode_item, 3934 &inode->vfs_inode, 3935 inode_only == LOG_INODE_EXISTS, 3936 logged_isize); 3937 } else { 3938 copy_extent_buffer(dst_path->nodes[0], src, dst_offset, 3939 src_offset, ins_sizes[i]); 3940 } 3941 3942 /* 3943 * We set need_find_last_extent here in case we know we were 3944 * processing other items and then walk into the first extent in 3945 * the inode. If we don't hit an extent then nothing changes, 3946 * we'll do the last search the next time around. 3947 */ 3948 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY) { 3949 has_extents = true; 3950 if (first_key.objectid == (u64)-1) 3951 first_key = ins_keys[i]; 3952 } else { 3953 need_find_last_extent = false; 3954 } 3955 3956 /* take a reference on file data extents so that truncates 3957 * or deletes of this inode don't have to relog the inode 3958 * again 3959 */ 3960 if (ins_keys[i].type == BTRFS_EXTENT_DATA_KEY && 3961 !skip_csum) { 3962 int found_type; 3963 extent = btrfs_item_ptr(src, start_slot + i, 3964 struct btrfs_file_extent_item); 3965 3966 if (btrfs_file_extent_generation(src, extent) < trans->transid) 3967 continue; 3968 3969 found_type = btrfs_file_extent_type(src, extent); 3970 if (found_type == BTRFS_FILE_EXTENT_REG) { 3971 u64 ds, dl, cs, cl; 3972 ds = btrfs_file_extent_disk_bytenr(src, 3973 extent); 3974 /* ds == 0 is a hole */ 3975 if (ds == 0) 3976 continue; 3977 3978 dl = btrfs_file_extent_disk_num_bytes(src, 3979 extent); 3980 cs = btrfs_file_extent_offset(src, extent); 3981 cl = btrfs_file_extent_num_bytes(src, 3982 extent); 3983 if (btrfs_file_extent_compression(src, 3984 extent)) { 3985 cs = 0; 3986 cl = dl; 3987 } 3988 3989 ret = btrfs_lookup_csums_range( 3990 fs_info->csum_root, 3991 ds + cs, ds + cs + cl - 1, 3992 &ordered_sums, 0); 3993 if (ret) { 3994 btrfs_release_path(dst_path); 3995 kfree(ins_data); 3996 return ret; 3997 } 3998 } 3999 } 4000 } 4001 4002 btrfs_mark_buffer_dirty(dst_path->nodes[0]); 4003 btrfs_release_path(dst_path); 4004 kfree(ins_data); 4005 4006 /* 4007 * we have to do this after the loop above to avoid changing the 4008 * log tree while trying to change the log tree. 4009 */ 4010 ret = 0; 4011 while (!list_empty(&ordered_sums)) { 4012 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 4013 struct btrfs_ordered_sum, 4014 list); 4015 if (!ret) 4016 ret = btrfs_csum_file_blocks(trans, log, sums); 4017 list_del(&sums->list); 4018 kfree(sums); 4019 } 4020 4021 if (!has_extents) 4022 return ret; 4023 4024 if (need_find_last_extent && *last_extent == first_key.offset) { 4025 /* 4026 * We don't have any leafs between our current one and the one 4027 * we processed before that can have file extent items for our 4028 * inode (and have a generation number smaller than our current 4029 * transaction id). 4030 */ 4031 need_find_last_extent = false; 4032 } 4033 4034 /* 4035 * Because we use btrfs_search_forward we could skip leaves that were 4036 * not modified and then assume *last_extent is valid when it really 4037 * isn't. So back up to the previous leaf and read the end of the last 4038 * extent before we go and fill in holes. 4039 */ 4040 if (need_find_last_extent) { 4041 u64 len; 4042 4043 ret = btrfs_prev_leaf(inode->root, src_path); 4044 if (ret < 0) 4045 return ret; 4046 if (ret) 4047 goto fill_holes; 4048 if (src_path->slots[0]) 4049 src_path->slots[0]--; 4050 src = src_path->nodes[0]; 4051 btrfs_item_key_to_cpu(src, &key, src_path->slots[0]); 4052 if (key.objectid != btrfs_ino(inode) || 4053 key.type != BTRFS_EXTENT_DATA_KEY) 4054 goto fill_holes; 4055 extent = btrfs_item_ptr(src, src_path->slots[0], 4056 struct btrfs_file_extent_item); 4057 if (btrfs_file_extent_type(src, extent) == 4058 BTRFS_FILE_EXTENT_INLINE) { 4059 len = btrfs_file_extent_ram_bytes(src, extent); 4060 *last_extent = ALIGN(key.offset + len, 4061 fs_info->sectorsize); 4062 } else { 4063 len = btrfs_file_extent_num_bytes(src, extent); 4064 *last_extent = key.offset + len; 4065 } 4066 } 4067 fill_holes: 4068 /* So we did prev_leaf, now we need to move to the next leaf, but a few 4069 * things could have happened 4070 * 4071 * 1) A merge could have happened, so we could currently be on a leaf 4072 * that holds what we were copying in the first place. 4073 * 2) A split could have happened, and now not all of the items we want 4074 * are on the same leaf. 4075 * 4076 * So we need to adjust how we search for holes, we need to drop the 4077 * path and re-search for the first extent key we found, and then walk 4078 * forward until we hit the last one we copied. 4079 */ 4080 if (need_find_last_extent) { 4081 /* btrfs_prev_leaf could return 1 without releasing the path */ 4082 btrfs_release_path(src_path); 4083 ret = btrfs_search_slot(NULL, inode->root, &first_key, 4084 src_path, 0, 0); 4085 if (ret < 0) 4086 return ret; 4087 ASSERT(ret == 0); 4088 src = src_path->nodes[0]; 4089 i = src_path->slots[0]; 4090 } else { 4091 i = start_slot; 4092 } 4093 4094 /* 4095 * Ok so here we need to go through and fill in any holes we may have 4096 * to make sure that holes are punched for those areas in case they had 4097 * extents previously. 4098 */ 4099 while (!done) { 4100 u64 offset, len; 4101 u64 extent_end; 4102 4103 if (i >= btrfs_header_nritems(src_path->nodes[0])) { 4104 ret = btrfs_next_leaf(inode->root, src_path); 4105 if (ret < 0) 4106 return ret; 4107 ASSERT(ret == 0); 4108 src = src_path->nodes[0]; 4109 i = 0; 4110 need_find_last_extent = true; 4111 } 4112 4113 btrfs_item_key_to_cpu(src, &key, i); 4114 if (!btrfs_comp_cpu_keys(&key, &last_key)) 4115 done = true; 4116 if (key.objectid != btrfs_ino(inode) || 4117 key.type != BTRFS_EXTENT_DATA_KEY) { 4118 i++; 4119 continue; 4120 } 4121 extent = btrfs_item_ptr(src, i, struct btrfs_file_extent_item); 4122 if (btrfs_file_extent_type(src, extent) == 4123 BTRFS_FILE_EXTENT_INLINE) { 4124 len = btrfs_file_extent_ram_bytes(src, extent); 4125 extent_end = ALIGN(key.offset + len, 4126 fs_info->sectorsize); 4127 } else { 4128 len = btrfs_file_extent_num_bytes(src, extent); 4129 extent_end = key.offset + len; 4130 } 4131 i++; 4132 4133 if (*last_extent == key.offset) { 4134 *last_extent = extent_end; 4135 continue; 4136 } 4137 offset = *last_extent; 4138 len = key.offset - *last_extent; 4139 ret = btrfs_insert_file_extent(trans, log, btrfs_ino(inode), 4140 offset, 0, 0, len, 0, len, 0, 0, 0); 4141 if (ret) 4142 break; 4143 *last_extent = extent_end; 4144 } 4145 4146 /* 4147 * Check if there is a hole between the last extent found in our leaf 4148 * and the first extent in the next leaf. If there is one, we need to 4149 * log an explicit hole so that at replay time we can punch the hole. 4150 */ 4151 if (ret == 0 && 4152 key.objectid == btrfs_ino(inode) && 4153 key.type == BTRFS_EXTENT_DATA_KEY && 4154 i == btrfs_header_nritems(src_path->nodes[0])) { 4155 ret = btrfs_next_leaf(inode->root, src_path); 4156 need_find_last_extent = true; 4157 if (ret > 0) { 4158 ret = 0; 4159 } else if (ret == 0) { 4160 btrfs_item_key_to_cpu(src_path->nodes[0], &key, 4161 src_path->slots[0]); 4162 if (key.objectid == btrfs_ino(inode) && 4163 key.type == BTRFS_EXTENT_DATA_KEY && 4164 *last_extent < key.offset) { 4165 const u64 len = key.offset - *last_extent; 4166 4167 ret = btrfs_insert_file_extent(trans, log, 4168 btrfs_ino(inode), 4169 *last_extent, 0, 4170 0, len, 0, len, 4171 0, 0, 0); 4172 } 4173 } 4174 } 4175 /* 4176 * Need to let the callers know we dropped the path so they should 4177 * re-search. 4178 */ 4179 if (!ret && need_find_last_extent) 4180 ret = 1; 4181 return ret; 4182 } 4183 4184 static int extent_cmp(void *priv, struct list_head *a, struct list_head *b) 4185 { 4186 struct extent_map *em1, *em2; 4187 4188 em1 = list_entry(a, struct extent_map, list); 4189 em2 = list_entry(b, struct extent_map, list); 4190 4191 if (em1->start < em2->start) 4192 return -1; 4193 else if (em1->start > em2->start) 4194 return 1; 4195 return 0; 4196 } 4197 4198 static int log_extent_csums(struct btrfs_trans_handle *trans, 4199 struct btrfs_inode *inode, 4200 struct btrfs_root *log_root, 4201 const struct extent_map *em) 4202 { 4203 u64 csum_offset; 4204 u64 csum_len; 4205 LIST_HEAD(ordered_sums); 4206 int ret = 0; 4207 4208 if (inode->flags & BTRFS_INODE_NODATASUM || 4209 test_bit(EXTENT_FLAG_PREALLOC, &em->flags) || 4210 em->block_start == EXTENT_MAP_HOLE) 4211 return 0; 4212 4213 /* If we're compressed we have to save the entire range of csums. */ 4214 if (em->compress_type) { 4215 csum_offset = 0; 4216 csum_len = max(em->block_len, em->orig_block_len); 4217 } else { 4218 csum_offset = em->mod_start - em->start; 4219 csum_len = em->mod_len; 4220 } 4221 4222 /* block start is already adjusted for the file extent offset. */ 4223 ret = btrfs_lookup_csums_range(trans->fs_info->csum_root, 4224 em->block_start + csum_offset, 4225 em->block_start + csum_offset + 4226 csum_len - 1, &ordered_sums, 0); 4227 if (ret) 4228 return ret; 4229 4230 while (!list_empty(&ordered_sums)) { 4231 struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next, 4232 struct btrfs_ordered_sum, 4233 list); 4234 if (!ret) 4235 ret = btrfs_csum_file_blocks(trans, log_root, sums); 4236 list_del(&sums->list); 4237 kfree(sums); 4238 } 4239 4240 return ret; 4241 } 4242 4243 static int log_one_extent(struct btrfs_trans_handle *trans, 4244 struct btrfs_inode *inode, struct btrfs_root *root, 4245 const struct extent_map *em, 4246 struct btrfs_path *path, 4247 struct btrfs_log_ctx *ctx) 4248 { 4249 struct btrfs_root *log = root->log_root; 4250 struct btrfs_file_extent_item *fi; 4251 struct extent_buffer *leaf; 4252 struct btrfs_map_token token; 4253 struct btrfs_key key; 4254 u64 extent_offset = em->start - em->orig_start; 4255 u64 block_len; 4256 int ret; 4257 int extent_inserted = 0; 4258 4259 ret = log_extent_csums(trans, inode, log, em); 4260 if (ret) 4261 return ret; 4262 4263 btrfs_init_map_token(&token); 4264 4265 ret = __btrfs_drop_extents(trans, log, &inode->vfs_inode, path, em->start, 4266 em->start + em->len, NULL, 0, 1, 4267 sizeof(*fi), &extent_inserted); 4268 if (ret) 4269 return ret; 4270 4271 if (!extent_inserted) { 4272 key.objectid = btrfs_ino(inode); 4273 key.type = BTRFS_EXTENT_DATA_KEY; 4274 key.offset = em->start; 4275 4276 ret = btrfs_insert_empty_item(trans, log, path, &key, 4277 sizeof(*fi)); 4278 if (ret) 4279 return ret; 4280 } 4281 leaf = path->nodes[0]; 4282 fi = btrfs_item_ptr(leaf, path->slots[0], 4283 struct btrfs_file_extent_item); 4284 4285 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid, 4286 &token); 4287 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 4288 btrfs_set_token_file_extent_type(leaf, fi, 4289 BTRFS_FILE_EXTENT_PREALLOC, 4290 &token); 4291 else 4292 btrfs_set_token_file_extent_type(leaf, fi, 4293 BTRFS_FILE_EXTENT_REG, 4294 &token); 4295 4296 block_len = max(em->block_len, em->orig_block_len); 4297 if (em->compress_type != BTRFS_COMPRESS_NONE) { 4298 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4299 em->block_start, 4300 &token); 4301 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4302 &token); 4303 } else if (em->block_start < EXTENT_MAP_LAST_BYTE) { 4304 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 4305 em->block_start - 4306 extent_offset, &token); 4307 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, block_len, 4308 &token); 4309 } else { 4310 btrfs_set_token_file_extent_disk_bytenr(leaf, fi, 0, &token); 4311 btrfs_set_token_file_extent_disk_num_bytes(leaf, fi, 0, 4312 &token); 4313 } 4314 4315 btrfs_set_token_file_extent_offset(leaf, fi, extent_offset, &token); 4316 btrfs_set_token_file_extent_num_bytes(leaf, fi, em->len, &token); 4317 btrfs_set_token_file_extent_ram_bytes(leaf, fi, em->ram_bytes, &token); 4318 btrfs_set_token_file_extent_compression(leaf, fi, em->compress_type, 4319 &token); 4320 btrfs_set_token_file_extent_encryption(leaf, fi, 0, &token); 4321 btrfs_set_token_file_extent_other_encoding(leaf, fi, 0, &token); 4322 btrfs_mark_buffer_dirty(leaf); 4323 4324 btrfs_release_path(path); 4325 4326 return ret; 4327 } 4328 4329 /* 4330 * Log all prealloc extents beyond the inode's i_size to make sure we do not 4331 * lose them after doing a fast fsync and replaying the log. We scan the 4332 * subvolume's root instead of iterating the inode's extent map tree because 4333 * otherwise we can log incorrect extent items based on extent map conversion. 4334 * That can happen due to the fact that extent maps are merged when they 4335 * are not in the extent map tree's list of modified extents. 4336 */ 4337 static int btrfs_log_prealloc_extents(struct btrfs_trans_handle *trans, 4338 struct btrfs_inode *inode, 4339 struct btrfs_path *path) 4340 { 4341 struct btrfs_root *root = inode->root; 4342 struct btrfs_key key; 4343 const u64 i_size = i_size_read(&inode->vfs_inode); 4344 const u64 ino = btrfs_ino(inode); 4345 struct btrfs_path *dst_path = NULL; 4346 u64 last_extent = (u64)-1; 4347 int ins_nr = 0; 4348 int start_slot; 4349 int ret; 4350 4351 if (!(inode->flags & BTRFS_INODE_PREALLOC)) 4352 return 0; 4353 4354 key.objectid = ino; 4355 key.type = BTRFS_EXTENT_DATA_KEY; 4356 key.offset = i_size; 4357 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4358 if (ret < 0) 4359 goto out; 4360 4361 while (true) { 4362 struct extent_buffer *leaf = path->nodes[0]; 4363 int slot = path->slots[0]; 4364 4365 if (slot >= btrfs_header_nritems(leaf)) { 4366 if (ins_nr > 0) { 4367 ret = copy_items(trans, inode, dst_path, path, 4368 &last_extent, start_slot, 4369 ins_nr, 1, 0); 4370 if (ret < 0) 4371 goto out; 4372 ins_nr = 0; 4373 } 4374 ret = btrfs_next_leaf(root, path); 4375 if (ret < 0) 4376 goto out; 4377 if (ret > 0) { 4378 ret = 0; 4379 break; 4380 } 4381 continue; 4382 } 4383 4384 btrfs_item_key_to_cpu(leaf, &key, slot); 4385 if (key.objectid > ino) 4386 break; 4387 if (WARN_ON_ONCE(key.objectid < ino) || 4388 key.type < BTRFS_EXTENT_DATA_KEY || 4389 key.offset < i_size) { 4390 path->slots[0]++; 4391 continue; 4392 } 4393 if (last_extent == (u64)-1) { 4394 last_extent = key.offset; 4395 /* 4396 * Avoid logging extent items logged in past fsync calls 4397 * and leading to duplicate keys in the log tree. 4398 */ 4399 do { 4400 ret = btrfs_truncate_inode_items(trans, 4401 root->log_root, 4402 &inode->vfs_inode, 4403 i_size, 4404 BTRFS_EXTENT_DATA_KEY); 4405 } while (ret == -EAGAIN); 4406 if (ret) 4407 goto out; 4408 } 4409 if (ins_nr == 0) 4410 start_slot = slot; 4411 ins_nr++; 4412 path->slots[0]++; 4413 if (!dst_path) { 4414 dst_path = btrfs_alloc_path(); 4415 if (!dst_path) { 4416 ret = -ENOMEM; 4417 goto out; 4418 } 4419 } 4420 } 4421 if (ins_nr > 0) { 4422 ret = copy_items(trans, inode, dst_path, path, &last_extent, 4423 start_slot, ins_nr, 1, 0); 4424 if (ret > 0) 4425 ret = 0; 4426 } 4427 out: 4428 btrfs_release_path(path); 4429 btrfs_free_path(dst_path); 4430 return ret; 4431 } 4432 4433 static int btrfs_log_changed_extents(struct btrfs_trans_handle *trans, 4434 struct btrfs_root *root, 4435 struct btrfs_inode *inode, 4436 struct btrfs_path *path, 4437 struct btrfs_log_ctx *ctx, 4438 const u64 start, 4439 const u64 end) 4440 { 4441 struct extent_map *em, *n; 4442 struct list_head extents; 4443 struct extent_map_tree *tree = &inode->extent_tree; 4444 u64 test_gen; 4445 int ret = 0; 4446 int num = 0; 4447 4448 INIT_LIST_HEAD(&extents); 4449 4450 write_lock(&tree->lock); 4451 test_gen = root->fs_info->last_trans_committed; 4452 4453 list_for_each_entry_safe(em, n, &tree->modified_extents, list) { 4454 /* 4455 * Skip extents outside our logging range. It's important to do 4456 * it for correctness because if we don't ignore them, we may 4457 * log them before their ordered extent completes, and therefore 4458 * we could log them without logging their respective checksums 4459 * (the checksum items are added to the csum tree at the very 4460 * end of btrfs_finish_ordered_io()). Also leave such extents 4461 * outside of our range in the list, since we may have another 4462 * ranged fsync in the near future that needs them. If an extent 4463 * outside our range corresponds to a hole, log it to avoid 4464 * leaving gaps between extents (fsck will complain when we are 4465 * not using the NO_HOLES feature). 4466 */ 4467 if ((em->start > end || em->start + em->len <= start) && 4468 em->block_start != EXTENT_MAP_HOLE) 4469 continue; 4470 4471 list_del_init(&em->list); 4472 /* 4473 * Just an arbitrary number, this can be really CPU intensive 4474 * once we start getting a lot of extents, and really once we 4475 * have a bunch of extents we just want to commit since it will 4476 * be faster. 4477 */ 4478 if (++num > 32768) { 4479 list_del_init(&tree->modified_extents); 4480 ret = -EFBIG; 4481 goto process; 4482 } 4483 4484 if (em->generation <= test_gen) 4485 continue; 4486 4487 /* We log prealloc extents beyond eof later. */ 4488 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags) && 4489 em->start >= i_size_read(&inode->vfs_inode)) 4490 continue; 4491 4492 /* Need a ref to keep it from getting evicted from cache */ 4493 refcount_inc(&em->refs); 4494 set_bit(EXTENT_FLAG_LOGGING, &em->flags); 4495 list_add_tail(&em->list, &extents); 4496 num++; 4497 } 4498 4499 list_sort(NULL, &extents, extent_cmp); 4500 process: 4501 while (!list_empty(&extents)) { 4502 em = list_entry(extents.next, struct extent_map, list); 4503 4504 list_del_init(&em->list); 4505 4506 /* 4507 * If we had an error we just need to delete everybody from our 4508 * private list. 4509 */ 4510 if (ret) { 4511 clear_em_logging(tree, em); 4512 free_extent_map(em); 4513 continue; 4514 } 4515 4516 write_unlock(&tree->lock); 4517 4518 ret = log_one_extent(trans, inode, root, em, path, ctx); 4519 write_lock(&tree->lock); 4520 clear_em_logging(tree, em); 4521 free_extent_map(em); 4522 } 4523 WARN_ON(!list_empty(&extents)); 4524 write_unlock(&tree->lock); 4525 4526 btrfs_release_path(path); 4527 if (!ret) 4528 ret = btrfs_log_prealloc_extents(trans, inode, path); 4529 4530 return ret; 4531 } 4532 4533 static int logged_inode_size(struct btrfs_root *log, struct btrfs_inode *inode, 4534 struct btrfs_path *path, u64 *size_ret) 4535 { 4536 struct btrfs_key key; 4537 int ret; 4538 4539 key.objectid = btrfs_ino(inode); 4540 key.type = BTRFS_INODE_ITEM_KEY; 4541 key.offset = 0; 4542 4543 ret = btrfs_search_slot(NULL, log, &key, path, 0, 0); 4544 if (ret < 0) { 4545 return ret; 4546 } else if (ret > 0) { 4547 *size_ret = 0; 4548 } else { 4549 struct btrfs_inode_item *item; 4550 4551 item = btrfs_item_ptr(path->nodes[0], path->slots[0], 4552 struct btrfs_inode_item); 4553 *size_ret = btrfs_inode_size(path->nodes[0], item); 4554 /* 4555 * If the in-memory inode's i_size is smaller then the inode 4556 * size stored in the btree, return the inode's i_size, so 4557 * that we get a correct inode size after replaying the log 4558 * when before a power failure we had a shrinking truncate 4559 * followed by addition of a new name (rename / new hard link). 4560 * Otherwise return the inode size from the btree, to avoid 4561 * data loss when replaying a log due to previously doing a 4562 * write that expands the inode's size and logging a new name 4563 * immediately after. 4564 */ 4565 if (*size_ret > inode->vfs_inode.i_size) 4566 *size_ret = inode->vfs_inode.i_size; 4567 } 4568 4569 btrfs_release_path(path); 4570 return 0; 4571 } 4572 4573 /* 4574 * At the moment we always log all xattrs. This is to figure out at log replay 4575 * time which xattrs must have their deletion replayed. If a xattr is missing 4576 * in the log tree and exists in the fs/subvol tree, we delete it. This is 4577 * because if a xattr is deleted, the inode is fsynced and a power failure 4578 * happens, causing the log to be replayed the next time the fs is mounted, 4579 * we want the xattr to not exist anymore (same behaviour as other filesystems 4580 * with a journal, ext3/4, xfs, f2fs, etc). 4581 */ 4582 static int btrfs_log_all_xattrs(struct btrfs_trans_handle *trans, 4583 struct btrfs_root *root, 4584 struct btrfs_inode *inode, 4585 struct btrfs_path *path, 4586 struct btrfs_path *dst_path) 4587 { 4588 int ret; 4589 struct btrfs_key key; 4590 const u64 ino = btrfs_ino(inode); 4591 int ins_nr = 0; 4592 int start_slot = 0; 4593 4594 key.objectid = ino; 4595 key.type = BTRFS_XATTR_ITEM_KEY; 4596 key.offset = 0; 4597 4598 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4599 if (ret < 0) 4600 return ret; 4601 4602 while (true) { 4603 int slot = path->slots[0]; 4604 struct extent_buffer *leaf = path->nodes[0]; 4605 int nritems = btrfs_header_nritems(leaf); 4606 4607 if (slot >= nritems) { 4608 if (ins_nr > 0) { 4609 u64 last_extent = 0; 4610 4611 ret = copy_items(trans, inode, dst_path, path, 4612 &last_extent, start_slot, 4613 ins_nr, 1, 0); 4614 /* can't be 1, extent items aren't processed */ 4615 ASSERT(ret <= 0); 4616 if (ret < 0) 4617 return ret; 4618 ins_nr = 0; 4619 } 4620 ret = btrfs_next_leaf(root, path); 4621 if (ret < 0) 4622 return ret; 4623 else if (ret > 0) 4624 break; 4625 continue; 4626 } 4627 4628 btrfs_item_key_to_cpu(leaf, &key, slot); 4629 if (key.objectid != ino || key.type != BTRFS_XATTR_ITEM_KEY) 4630 break; 4631 4632 if (ins_nr == 0) 4633 start_slot = slot; 4634 ins_nr++; 4635 path->slots[0]++; 4636 cond_resched(); 4637 } 4638 if (ins_nr > 0) { 4639 u64 last_extent = 0; 4640 4641 ret = copy_items(trans, inode, dst_path, path, 4642 &last_extent, start_slot, 4643 ins_nr, 1, 0); 4644 /* can't be 1, extent items aren't processed */ 4645 ASSERT(ret <= 0); 4646 if (ret < 0) 4647 return ret; 4648 } 4649 4650 return 0; 4651 } 4652 4653 /* 4654 * If the no holes feature is enabled we need to make sure any hole between the 4655 * last extent and the i_size of our inode is explicitly marked in the log. This 4656 * is to make sure that doing something like: 4657 * 4658 * 1) create file with 128Kb of data 4659 * 2) truncate file to 64Kb 4660 * 3) truncate file to 256Kb 4661 * 4) fsync file 4662 * 5) <crash/power failure> 4663 * 6) mount fs and trigger log replay 4664 * 4665 * Will give us a file with a size of 256Kb, the first 64Kb of data match what 4666 * the file had in its first 64Kb of data at step 1 and the last 192Kb of the 4667 * file correspond to a hole. The presence of explicit holes in a log tree is 4668 * what guarantees that log replay will remove/adjust file extent items in the 4669 * fs/subvol tree. 4670 * 4671 * Here we do not need to care about holes between extents, that is already done 4672 * by copy_items(). We also only need to do this in the full sync path, where we 4673 * lookup for extents from the fs/subvol tree only. In the fast path case, we 4674 * lookup the list of modified extent maps and if any represents a hole, we 4675 * insert a corresponding extent representing a hole in the log tree. 4676 */ 4677 static int btrfs_log_trailing_hole(struct btrfs_trans_handle *trans, 4678 struct btrfs_root *root, 4679 struct btrfs_inode *inode, 4680 struct btrfs_path *path) 4681 { 4682 struct btrfs_fs_info *fs_info = root->fs_info; 4683 int ret; 4684 struct btrfs_key key; 4685 u64 hole_start; 4686 u64 hole_size; 4687 struct extent_buffer *leaf; 4688 struct btrfs_root *log = root->log_root; 4689 const u64 ino = btrfs_ino(inode); 4690 const u64 i_size = i_size_read(&inode->vfs_inode); 4691 4692 if (!btrfs_fs_incompat(fs_info, NO_HOLES)) 4693 return 0; 4694 4695 key.objectid = ino; 4696 key.type = BTRFS_EXTENT_DATA_KEY; 4697 key.offset = (u64)-1; 4698 4699 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4700 ASSERT(ret != 0); 4701 if (ret < 0) 4702 return ret; 4703 4704 ASSERT(path->slots[0] > 0); 4705 path->slots[0]--; 4706 leaf = path->nodes[0]; 4707 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); 4708 4709 if (key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY) { 4710 /* inode does not have any extents */ 4711 hole_start = 0; 4712 hole_size = i_size; 4713 } else { 4714 struct btrfs_file_extent_item *extent; 4715 u64 len; 4716 4717 /* 4718 * If there's an extent beyond i_size, an explicit hole was 4719 * already inserted by copy_items(). 4720 */ 4721 if (key.offset >= i_size) 4722 return 0; 4723 4724 extent = btrfs_item_ptr(leaf, path->slots[0], 4725 struct btrfs_file_extent_item); 4726 4727 if (btrfs_file_extent_type(leaf, extent) == 4728 BTRFS_FILE_EXTENT_INLINE) 4729 return 0; 4730 4731 len = btrfs_file_extent_num_bytes(leaf, extent); 4732 /* Last extent goes beyond i_size, no need to log a hole. */ 4733 if (key.offset + len > i_size) 4734 return 0; 4735 hole_start = key.offset + len; 4736 hole_size = i_size - hole_start; 4737 } 4738 btrfs_release_path(path); 4739 4740 /* Last extent ends at i_size. */ 4741 if (hole_size == 0) 4742 return 0; 4743 4744 hole_size = ALIGN(hole_size, fs_info->sectorsize); 4745 ret = btrfs_insert_file_extent(trans, log, ino, hole_start, 0, 0, 4746 hole_size, 0, hole_size, 0, 0, 0); 4747 return ret; 4748 } 4749 4750 /* 4751 * When we are logging a new inode X, check if it doesn't have a reference that 4752 * matches the reference from some other inode Y created in a past transaction 4753 * and that was renamed in the current transaction. If we don't do this, then at 4754 * log replay time we can lose inode Y (and all its files if it's a directory): 4755 * 4756 * mkdir /mnt/x 4757 * echo "hello world" > /mnt/x/foobar 4758 * sync 4759 * mv /mnt/x /mnt/y 4760 * mkdir /mnt/x # or touch /mnt/x 4761 * xfs_io -c fsync /mnt/x 4762 * <power fail> 4763 * mount fs, trigger log replay 4764 * 4765 * After the log replay procedure, we would lose the first directory and all its 4766 * files (file foobar). 4767 * For the case where inode Y is not a directory we simply end up losing it: 4768 * 4769 * echo "123" > /mnt/foo 4770 * sync 4771 * mv /mnt/foo /mnt/bar 4772 * echo "abc" > /mnt/foo 4773 * xfs_io -c fsync /mnt/foo 4774 * <power fail> 4775 * 4776 * We also need this for cases where a snapshot entry is replaced by some other 4777 * entry (file or directory) otherwise we end up with an unreplayable log due to 4778 * attempts to delete the snapshot entry (entry of type BTRFS_ROOT_ITEM_KEY) as 4779 * if it were a regular entry: 4780 * 4781 * mkdir /mnt/x 4782 * btrfs subvolume snapshot /mnt /mnt/x/snap 4783 * btrfs subvolume delete /mnt/x/snap 4784 * rmdir /mnt/x 4785 * mkdir /mnt/x 4786 * fsync /mnt/x or fsync some new file inside it 4787 * <power fail> 4788 * 4789 * The snapshot delete, rmdir of x, mkdir of a new x and the fsync all happen in 4790 * the same transaction. 4791 */ 4792 static int btrfs_check_ref_name_override(struct extent_buffer *eb, 4793 const int slot, 4794 const struct btrfs_key *key, 4795 struct btrfs_inode *inode, 4796 u64 *other_ino, u64 *other_parent) 4797 { 4798 int ret; 4799 struct btrfs_path *search_path; 4800 char *name = NULL; 4801 u32 name_len = 0; 4802 u32 item_size = btrfs_item_size_nr(eb, slot); 4803 u32 cur_offset = 0; 4804 unsigned long ptr = btrfs_item_ptr_offset(eb, slot); 4805 4806 search_path = btrfs_alloc_path(); 4807 if (!search_path) 4808 return -ENOMEM; 4809 search_path->search_commit_root = 1; 4810 search_path->skip_locking = 1; 4811 4812 while (cur_offset < item_size) { 4813 u64 parent; 4814 u32 this_name_len; 4815 u32 this_len; 4816 unsigned long name_ptr; 4817 struct btrfs_dir_item *di; 4818 4819 if (key->type == BTRFS_INODE_REF_KEY) { 4820 struct btrfs_inode_ref *iref; 4821 4822 iref = (struct btrfs_inode_ref *)(ptr + cur_offset); 4823 parent = key->offset; 4824 this_name_len = btrfs_inode_ref_name_len(eb, iref); 4825 name_ptr = (unsigned long)(iref + 1); 4826 this_len = sizeof(*iref) + this_name_len; 4827 } else { 4828 struct btrfs_inode_extref *extref; 4829 4830 extref = (struct btrfs_inode_extref *)(ptr + 4831 cur_offset); 4832 parent = btrfs_inode_extref_parent(eb, extref); 4833 this_name_len = btrfs_inode_extref_name_len(eb, extref); 4834 name_ptr = (unsigned long)&extref->name; 4835 this_len = sizeof(*extref) + this_name_len; 4836 } 4837 4838 if (this_name_len > name_len) { 4839 char *new_name; 4840 4841 new_name = krealloc(name, this_name_len, GFP_NOFS); 4842 if (!new_name) { 4843 ret = -ENOMEM; 4844 goto out; 4845 } 4846 name_len = this_name_len; 4847 name = new_name; 4848 } 4849 4850 read_extent_buffer(eb, name, name_ptr, this_name_len); 4851 di = btrfs_lookup_dir_item(NULL, inode->root, search_path, 4852 parent, name, this_name_len, 0); 4853 if (di && !IS_ERR(di)) { 4854 struct btrfs_key di_key; 4855 4856 btrfs_dir_item_key_to_cpu(search_path->nodes[0], 4857 di, &di_key); 4858 if (di_key.type == BTRFS_INODE_ITEM_KEY) { 4859 if (di_key.objectid != key->objectid) { 4860 ret = 1; 4861 *other_ino = di_key.objectid; 4862 *other_parent = parent; 4863 } else { 4864 ret = 0; 4865 } 4866 } else { 4867 ret = -EAGAIN; 4868 } 4869 goto out; 4870 } else if (IS_ERR(di)) { 4871 ret = PTR_ERR(di); 4872 goto out; 4873 } 4874 btrfs_release_path(search_path); 4875 4876 cur_offset += this_len; 4877 } 4878 ret = 0; 4879 out: 4880 btrfs_free_path(search_path); 4881 kfree(name); 4882 return ret; 4883 } 4884 4885 struct btrfs_ino_list { 4886 u64 ino; 4887 u64 parent; 4888 struct list_head list; 4889 }; 4890 4891 static int log_conflicting_inodes(struct btrfs_trans_handle *trans, 4892 struct btrfs_root *root, 4893 struct btrfs_path *path, 4894 struct btrfs_log_ctx *ctx, 4895 u64 ino, u64 parent) 4896 { 4897 struct btrfs_ino_list *ino_elem; 4898 LIST_HEAD(inode_list); 4899 int ret = 0; 4900 4901 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); 4902 if (!ino_elem) 4903 return -ENOMEM; 4904 ino_elem->ino = ino; 4905 ino_elem->parent = parent; 4906 list_add_tail(&ino_elem->list, &inode_list); 4907 4908 while (!list_empty(&inode_list)) { 4909 struct btrfs_fs_info *fs_info = root->fs_info; 4910 struct btrfs_key key; 4911 struct inode *inode; 4912 4913 ino_elem = list_first_entry(&inode_list, struct btrfs_ino_list, 4914 list); 4915 ino = ino_elem->ino; 4916 parent = ino_elem->parent; 4917 list_del(&ino_elem->list); 4918 kfree(ino_elem); 4919 if (ret) 4920 continue; 4921 4922 btrfs_release_path(path); 4923 4924 key.objectid = ino; 4925 key.type = BTRFS_INODE_ITEM_KEY; 4926 key.offset = 0; 4927 inode = btrfs_iget(fs_info->sb, &key, root, NULL); 4928 /* 4929 * If the other inode that had a conflicting dir entry was 4930 * deleted in the current transaction, we need to log its parent 4931 * directory. 4932 */ 4933 if (IS_ERR(inode)) { 4934 ret = PTR_ERR(inode); 4935 if (ret == -ENOENT) { 4936 key.objectid = parent; 4937 inode = btrfs_iget(fs_info->sb, &key, root, 4938 NULL); 4939 if (IS_ERR(inode)) { 4940 ret = PTR_ERR(inode); 4941 } else { 4942 ret = btrfs_log_inode(trans, root, 4943 BTRFS_I(inode), 4944 LOG_OTHER_INODE_ALL, 4945 0, LLONG_MAX, ctx); 4946 iput(inode); 4947 } 4948 } 4949 continue; 4950 } 4951 /* 4952 * We are safe logging the other inode without acquiring its 4953 * lock as long as we log with the LOG_INODE_EXISTS mode. We 4954 * are safe against concurrent renames of the other inode as 4955 * well because during a rename we pin the log and update the 4956 * log with the new name before we unpin it. 4957 */ 4958 ret = btrfs_log_inode(trans, root, BTRFS_I(inode), 4959 LOG_OTHER_INODE, 0, LLONG_MAX, ctx); 4960 if (ret) { 4961 iput(inode); 4962 continue; 4963 } 4964 4965 key.objectid = ino; 4966 key.type = BTRFS_INODE_REF_KEY; 4967 key.offset = 0; 4968 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 4969 if (ret < 0) { 4970 iput(inode); 4971 continue; 4972 } 4973 4974 while (true) { 4975 struct extent_buffer *leaf = path->nodes[0]; 4976 int slot = path->slots[0]; 4977 u64 other_ino = 0; 4978 u64 other_parent = 0; 4979 4980 if (slot >= btrfs_header_nritems(leaf)) { 4981 ret = btrfs_next_leaf(root, path); 4982 if (ret < 0) { 4983 break; 4984 } else if (ret > 0) { 4985 ret = 0; 4986 break; 4987 } 4988 continue; 4989 } 4990 4991 btrfs_item_key_to_cpu(leaf, &key, slot); 4992 if (key.objectid != ino || 4993 (key.type != BTRFS_INODE_REF_KEY && 4994 key.type != BTRFS_INODE_EXTREF_KEY)) { 4995 ret = 0; 4996 break; 4997 } 4998 4999 ret = btrfs_check_ref_name_override(leaf, slot, &key, 5000 BTRFS_I(inode), &other_ino, 5001 &other_parent); 5002 if (ret < 0) 5003 break; 5004 if (ret > 0) { 5005 ino_elem = kmalloc(sizeof(*ino_elem), GFP_NOFS); 5006 if (!ino_elem) { 5007 ret = -ENOMEM; 5008 break; 5009 } 5010 ino_elem->ino = other_ino; 5011 ino_elem->parent = other_parent; 5012 list_add_tail(&ino_elem->list, &inode_list); 5013 ret = 0; 5014 } 5015 path->slots[0]++; 5016 } 5017 iput(inode); 5018 } 5019 5020 return ret; 5021 } 5022 5023 /* log a single inode in the tree log. 5024 * At least one parent directory for this inode must exist in the tree 5025 * or be logged already. 5026 * 5027 * Any items from this inode changed by the current transaction are copied 5028 * to the log tree. An extra reference is taken on any extents in this 5029 * file, allowing us to avoid a whole pile of corner cases around logging 5030 * blocks that have been removed from the tree. 5031 * 5032 * See LOG_INODE_ALL and related defines for a description of what inode_only 5033 * does. 5034 * 5035 * This handles both files and directories. 5036 */ 5037 static int btrfs_log_inode(struct btrfs_trans_handle *trans, 5038 struct btrfs_root *root, struct btrfs_inode *inode, 5039 int inode_only, 5040 const loff_t start, 5041 const loff_t end, 5042 struct btrfs_log_ctx *ctx) 5043 { 5044 struct btrfs_fs_info *fs_info = root->fs_info; 5045 struct btrfs_path *path; 5046 struct btrfs_path *dst_path; 5047 struct btrfs_key min_key; 5048 struct btrfs_key max_key; 5049 struct btrfs_root *log = root->log_root; 5050 u64 last_extent = 0; 5051 int err = 0; 5052 int ret; 5053 int nritems; 5054 int ins_start_slot = 0; 5055 int ins_nr; 5056 bool fast_search = false; 5057 u64 ino = btrfs_ino(inode); 5058 struct extent_map_tree *em_tree = &inode->extent_tree; 5059 u64 logged_isize = 0; 5060 bool need_log_inode_item = true; 5061 bool xattrs_logged = false; 5062 bool recursive_logging = false; 5063 5064 path = btrfs_alloc_path(); 5065 if (!path) 5066 return -ENOMEM; 5067 dst_path = btrfs_alloc_path(); 5068 if (!dst_path) { 5069 btrfs_free_path(path); 5070 return -ENOMEM; 5071 } 5072 5073 min_key.objectid = ino; 5074 min_key.type = BTRFS_INODE_ITEM_KEY; 5075 min_key.offset = 0; 5076 5077 max_key.objectid = ino; 5078 5079 5080 /* today the code can only do partial logging of directories */ 5081 if (S_ISDIR(inode->vfs_inode.i_mode) || 5082 (!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5083 &inode->runtime_flags) && 5084 inode_only >= LOG_INODE_EXISTS)) 5085 max_key.type = BTRFS_XATTR_ITEM_KEY; 5086 else 5087 max_key.type = (u8)-1; 5088 max_key.offset = (u64)-1; 5089 5090 /* 5091 * Only run delayed items if we are a dir or a new file. 5092 * Otherwise commit the delayed inode only, which is needed in 5093 * order for the log replay code to mark inodes for link count 5094 * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items). 5095 */ 5096 if (S_ISDIR(inode->vfs_inode.i_mode) || 5097 inode->generation > fs_info->last_trans_committed) 5098 ret = btrfs_commit_inode_delayed_items(trans, inode); 5099 else 5100 ret = btrfs_commit_inode_delayed_inode(inode); 5101 5102 if (ret) { 5103 btrfs_free_path(path); 5104 btrfs_free_path(dst_path); 5105 return ret; 5106 } 5107 5108 if (inode_only == LOG_OTHER_INODE || inode_only == LOG_OTHER_INODE_ALL) { 5109 recursive_logging = true; 5110 if (inode_only == LOG_OTHER_INODE) 5111 inode_only = LOG_INODE_EXISTS; 5112 else 5113 inode_only = LOG_INODE_ALL; 5114 mutex_lock_nested(&inode->log_mutex, SINGLE_DEPTH_NESTING); 5115 } else { 5116 mutex_lock(&inode->log_mutex); 5117 } 5118 5119 /* 5120 * a brute force approach to making sure we get the most uptodate 5121 * copies of everything. 5122 */ 5123 if (S_ISDIR(inode->vfs_inode.i_mode)) { 5124 int max_key_type = BTRFS_DIR_LOG_INDEX_KEY; 5125 5126 if (inode_only == LOG_INODE_EXISTS) 5127 max_key_type = BTRFS_XATTR_ITEM_KEY; 5128 ret = drop_objectid_items(trans, log, path, ino, max_key_type); 5129 } else { 5130 if (inode_only == LOG_INODE_EXISTS) { 5131 /* 5132 * Make sure the new inode item we write to the log has 5133 * the same isize as the current one (if it exists). 5134 * This is necessary to prevent data loss after log 5135 * replay, and also to prevent doing a wrong expanding 5136 * truncate - for e.g. create file, write 4K into offset 5137 * 0, fsync, write 4K into offset 4096, add hard link, 5138 * fsync some other file (to sync log), power fail - if 5139 * we use the inode's current i_size, after log replay 5140 * we get a 8Kb file, with the last 4Kb extent as a hole 5141 * (zeroes), as if an expanding truncate happened, 5142 * instead of getting a file of 4Kb only. 5143 */ 5144 err = logged_inode_size(log, inode, path, &logged_isize); 5145 if (err) 5146 goto out_unlock; 5147 } 5148 if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5149 &inode->runtime_flags)) { 5150 if (inode_only == LOG_INODE_EXISTS) { 5151 max_key.type = BTRFS_XATTR_ITEM_KEY; 5152 ret = drop_objectid_items(trans, log, path, ino, 5153 max_key.type); 5154 } else { 5155 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC, 5156 &inode->runtime_flags); 5157 clear_bit(BTRFS_INODE_COPY_EVERYTHING, 5158 &inode->runtime_flags); 5159 while(1) { 5160 ret = btrfs_truncate_inode_items(trans, 5161 log, &inode->vfs_inode, 0, 0); 5162 if (ret != -EAGAIN) 5163 break; 5164 } 5165 } 5166 } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING, 5167 &inode->runtime_flags) || 5168 inode_only == LOG_INODE_EXISTS) { 5169 if (inode_only == LOG_INODE_ALL) 5170 fast_search = true; 5171 max_key.type = BTRFS_XATTR_ITEM_KEY; 5172 ret = drop_objectid_items(trans, log, path, ino, 5173 max_key.type); 5174 } else { 5175 if (inode_only == LOG_INODE_ALL) 5176 fast_search = true; 5177 goto log_extents; 5178 } 5179 5180 } 5181 if (ret) { 5182 err = ret; 5183 goto out_unlock; 5184 } 5185 5186 while (1) { 5187 ins_nr = 0; 5188 ret = btrfs_search_forward(root, &min_key, 5189 path, trans->transid); 5190 if (ret < 0) { 5191 err = ret; 5192 goto out_unlock; 5193 } 5194 if (ret != 0) 5195 break; 5196 again: 5197 /* note, ins_nr might be > 0 here, cleanup outside the loop */ 5198 if (min_key.objectid != ino) 5199 break; 5200 if (min_key.type > max_key.type) 5201 break; 5202 5203 if (min_key.type == BTRFS_INODE_ITEM_KEY) 5204 need_log_inode_item = false; 5205 5206 if ((min_key.type == BTRFS_INODE_REF_KEY || 5207 min_key.type == BTRFS_INODE_EXTREF_KEY) && 5208 inode->generation == trans->transid && 5209 !recursive_logging) { 5210 u64 other_ino = 0; 5211 u64 other_parent = 0; 5212 5213 ret = btrfs_check_ref_name_override(path->nodes[0], 5214 path->slots[0], &min_key, inode, 5215 &other_ino, &other_parent); 5216 if (ret < 0) { 5217 err = ret; 5218 goto out_unlock; 5219 } else if (ret > 0 && ctx && 5220 other_ino != btrfs_ino(BTRFS_I(ctx->inode))) { 5221 if (ins_nr > 0) { 5222 ins_nr++; 5223 } else { 5224 ins_nr = 1; 5225 ins_start_slot = path->slots[0]; 5226 } 5227 ret = copy_items(trans, inode, dst_path, path, 5228 &last_extent, ins_start_slot, 5229 ins_nr, inode_only, 5230 logged_isize); 5231 if (ret < 0) { 5232 err = ret; 5233 goto out_unlock; 5234 } 5235 ins_nr = 0; 5236 5237 err = log_conflicting_inodes(trans, root, path, 5238 ctx, other_ino, other_parent); 5239 if (err) 5240 goto out_unlock; 5241 btrfs_release_path(path); 5242 goto next_key; 5243 } 5244 } 5245 5246 /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */ 5247 if (min_key.type == BTRFS_XATTR_ITEM_KEY) { 5248 if (ins_nr == 0) 5249 goto next_slot; 5250 ret = copy_items(trans, inode, dst_path, path, 5251 &last_extent, ins_start_slot, 5252 ins_nr, inode_only, logged_isize); 5253 if (ret < 0) { 5254 err = ret; 5255 goto out_unlock; 5256 } 5257 ins_nr = 0; 5258 if (ret) { 5259 btrfs_release_path(path); 5260 continue; 5261 } 5262 goto next_slot; 5263 } 5264 5265 if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) { 5266 ins_nr++; 5267 goto next_slot; 5268 } else if (!ins_nr) { 5269 ins_start_slot = path->slots[0]; 5270 ins_nr = 1; 5271 goto next_slot; 5272 } 5273 5274 ret = copy_items(trans, inode, dst_path, path, &last_extent, 5275 ins_start_slot, ins_nr, inode_only, 5276 logged_isize); 5277 if (ret < 0) { 5278 err = ret; 5279 goto out_unlock; 5280 } 5281 if (ret) { 5282 ins_nr = 0; 5283 btrfs_release_path(path); 5284 continue; 5285 } 5286 ins_nr = 1; 5287 ins_start_slot = path->slots[0]; 5288 next_slot: 5289 5290 nritems = btrfs_header_nritems(path->nodes[0]); 5291 path->slots[0]++; 5292 if (path->slots[0] < nritems) { 5293 btrfs_item_key_to_cpu(path->nodes[0], &min_key, 5294 path->slots[0]); 5295 goto again; 5296 } 5297 if (ins_nr) { 5298 ret = copy_items(trans, inode, dst_path, path, 5299 &last_extent, ins_start_slot, 5300 ins_nr, inode_only, logged_isize); 5301 if (ret < 0) { 5302 err = ret; 5303 goto out_unlock; 5304 } 5305 ret = 0; 5306 ins_nr = 0; 5307 } 5308 btrfs_release_path(path); 5309 next_key: 5310 if (min_key.offset < (u64)-1) { 5311 min_key.offset++; 5312 } else if (min_key.type < max_key.type) { 5313 min_key.type++; 5314 min_key.offset = 0; 5315 } else { 5316 break; 5317 } 5318 } 5319 if (ins_nr) { 5320 ret = copy_items(trans, inode, dst_path, path, &last_extent, 5321 ins_start_slot, ins_nr, inode_only, 5322 logged_isize); 5323 if (ret < 0) { 5324 err = ret; 5325 goto out_unlock; 5326 } 5327 ret = 0; 5328 ins_nr = 0; 5329 } 5330 5331 btrfs_release_path(path); 5332 btrfs_release_path(dst_path); 5333 err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path); 5334 if (err) 5335 goto out_unlock; 5336 xattrs_logged = true; 5337 if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) { 5338 btrfs_release_path(path); 5339 btrfs_release_path(dst_path); 5340 err = btrfs_log_trailing_hole(trans, root, inode, path); 5341 if (err) 5342 goto out_unlock; 5343 } 5344 log_extents: 5345 btrfs_release_path(path); 5346 btrfs_release_path(dst_path); 5347 if (need_log_inode_item) { 5348 err = log_inode_item(trans, log, dst_path, inode); 5349 if (!err && !xattrs_logged) { 5350 err = btrfs_log_all_xattrs(trans, root, inode, path, 5351 dst_path); 5352 btrfs_release_path(path); 5353 } 5354 if (err) 5355 goto out_unlock; 5356 } 5357 if (fast_search) { 5358 ret = btrfs_log_changed_extents(trans, root, inode, dst_path, 5359 ctx, start, end); 5360 if (ret) { 5361 err = ret; 5362 goto out_unlock; 5363 } 5364 } else if (inode_only == LOG_INODE_ALL) { 5365 struct extent_map *em, *n; 5366 5367 write_lock(&em_tree->lock); 5368 /* 5369 * We can't just remove every em if we're called for a ranged 5370 * fsync - that is, one that doesn't cover the whole possible 5371 * file range (0 to LLONG_MAX). This is because we can have 5372 * em's that fall outside the range we're logging and therefore 5373 * their ordered operations haven't completed yet 5374 * (btrfs_finish_ordered_io() not invoked yet). This means we 5375 * didn't get their respective file extent item in the fs/subvol 5376 * tree yet, and need to let the next fast fsync (one which 5377 * consults the list of modified extent maps) find the em so 5378 * that it logs a matching file extent item and waits for the 5379 * respective ordered operation to complete (if it's still 5380 * running). 5381 * 5382 * Removing every em outside the range we're logging would make 5383 * the next fast fsync not log their matching file extent items, 5384 * therefore making us lose data after a log replay. 5385 */ 5386 list_for_each_entry_safe(em, n, &em_tree->modified_extents, 5387 list) { 5388 const u64 mod_end = em->mod_start + em->mod_len - 1; 5389 5390 if (em->mod_start >= start && mod_end <= end) 5391 list_del_init(&em->list); 5392 } 5393 write_unlock(&em_tree->lock); 5394 } 5395 5396 if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->vfs_inode.i_mode)) { 5397 ret = log_directory_changes(trans, root, inode, path, dst_path, 5398 ctx); 5399 if (ret) { 5400 err = ret; 5401 goto out_unlock; 5402 } 5403 } 5404 5405 spin_lock(&inode->lock); 5406 inode->logged_trans = trans->transid; 5407 inode->last_log_commit = inode->last_sub_trans; 5408 spin_unlock(&inode->lock); 5409 out_unlock: 5410 mutex_unlock(&inode->log_mutex); 5411 5412 btrfs_free_path(path); 5413 btrfs_free_path(dst_path); 5414 return err; 5415 } 5416 5417 /* 5418 * Check if we must fallback to a transaction commit when logging an inode. 5419 * This must be called after logging the inode and is used only in the context 5420 * when fsyncing an inode requires the need to log some other inode - in which 5421 * case we can't lock the i_mutex of each other inode we need to log as that 5422 * can lead to deadlocks with concurrent fsync against other inodes (as we can 5423 * log inodes up or down in the hierarchy) or rename operations for example. So 5424 * we take the log_mutex of the inode after we have logged it and then check for 5425 * its last_unlink_trans value - this is safe because any task setting 5426 * last_unlink_trans must take the log_mutex and it must do this before it does 5427 * the actual unlink operation, so if we do this check before a concurrent task 5428 * sets last_unlink_trans it means we've logged a consistent version/state of 5429 * all the inode items, otherwise we are not sure and must do a transaction 5430 * commit (the concurrent task might have only updated last_unlink_trans before 5431 * we logged the inode or it might have also done the unlink). 5432 */ 5433 static bool btrfs_must_commit_transaction(struct btrfs_trans_handle *trans, 5434 struct btrfs_inode *inode) 5435 { 5436 struct btrfs_fs_info *fs_info = inode->root->fs_info; 5437 bool ret = false; 5438 5439 mutex_lock(&inode->log_mutex); 5440 if (inode->last_unlink_trans > fs_info->last_trans_committed) { 5441 /* 5442 * Make sure any commits to the log are forced to be full 5443 * commits. 5444 */ 5445 btrfs_set_log_full_commit(fs_info, trans); 5446 ret = true; 5447 } 5448 mutex_unlock(&inode->log_mutex); 5449 5450 return ret; 5451 } 5452 5453 /* 5454 * follow the dentry parent pointers up the chain and see if any 5455 * of the directories in it require a full commit before they can 5456 * be logged. Returns zero if nothing special needs to be done or 1 if 5457 * a full commit is required. 5458 */ 5459 static noinline int check_parent_dirs_for_sync(struct btrfs_trans_handle *trans, 5460 struct btrfs_inode *inode, 5461 struct dentry *parent, 5462 struct super_block *sb, 5463 u64 last_committed) 5464 { 5465 int ret = 0; 5466 struct dentry *old_parent = NULL; 5467 struct btrfs_inode *orig_inode = inode; 5468 5469 /* 5470 * for regular files, if its inode is already on disk, we don't 5471 * have to worry about the parents at all. This is because 5472 * we can use the last_unlink_trans field to record renames 5473 * and other fun in this file. 5474 */ 5475 if (S_ISREG(inode->vfs_inode.i_mode) && 5476 inode->generation <= last_committed && 5477 inode->last_unlink_trans <= last_committed) 5478 goto out; 5479 5480 if (!S_ISDIR(inode->vfs_inode.i_mode)) { 5481 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5482 goto out; 5483 inode = BTRFS_I(d_inode(parent)); 5484 } 5485 5486 while (1) { 5487 /* 5488 * If we are logging a directory then we start with our inode, 5489 * not our parent's inode, so we need to skip setting the 5490 * logged_trans so that further down in the log code we don't 5491 * think this inode has already been logged. 5492 */ 5493 if (inode != orig_inode) 5494 inode->logged_trans = trans->transid; 5495 smp_mb(); 5496 5497 if (btrfs_must_commit_transaction(trans, inode)) { 5498 ret = 1; 5499 break; 5500 } 5501 5502 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5503 break; 5504 5505 if (IS_ROOT(parent)) { 5506 inode = BTRFS_I(d_inode(parent)); 5507 if (btrfs_must_commit_transaction(trans, inode)) 5508 ret = 1; 5509 break; 5510 } 5511 5512 parent = dget_parent(parent); 5513 dput(old_parent); 5514 old_parent = parent; 5515 inode = BTRFS_I(d_inode(parent)); 5516 5517 } 5518 dput(old_parent); 5519 out: 5520 return ret; 5521 } 5522 5523 struct btrfs_dir_list { 5524 u64 ino; 5525 struct list_head list; 5526 }; 5527 5528 /* 5529 * Log the inodes of the new dentries of a directory. See log_dir_items() for 5530 * details about the why it is needed. 5531 * This is a recursive operation - if an existing dentry corresponds to a 5532 * directory, that directory's new entries are logged too (same behaviour as 5533 * ext3/4, xfs, f2fs, reiserfs, nilfs2). Note that when logging the inodes 5534 * the dentries point to we do not lock their i_mutex, otherwise lockdep 5535 * complains about the following circular lock dependency / possible deadlock: 5536 * 5537 * CPU0 CPU1 5538 * ---- ---- 5539 * lock(&type->i_mutex_dir_key#3/2); 5540 * lock(sb_internal#2); 5541 * lock(&type->i_mutex_dir_key#3/2); 5542 * lock(&sb->s_type->i_mutex_key#14); 5543 * 5544 * Where sb_internal is the lock (a counter that works as a lock) acquired by 5545 * sb_start_intwrite() in btrfs_start_transaction(). 5546 * Not locking i_mutex of the inodes is still safe because: 5547 * 5548 * 1) For regular files we log with a mode of LOG_INODE_EXISTS. It's possible 5549 * that while logging the inode new references (names) are added or removed 5550 * from the inode, leaving the logged inode item with a link count that does 5551 * not match the number of logged inode reference items. This is fine because 5552 * at log replay time we compute the real number of links and correct the 5553 * link count in the inode item (see replay_one_buffer() and 5554 * link_to_fixup_dir()); 5555 * 5556 * 2) For directories we log with a mode of LOG_INODE_ALL. It's possible that 5557 * while logging the inode's items new items with keys BTRFS_DIR_ITEM_KEY and 5558 * BTRFS_DIR_INDEX_KEY are added to fs/subvol tree and the logged inode item 5559 * has a size that doesn't match the sum of the lengths of all the logged 5560 * names. This does not result in a problem because if a dir_item key is 5561 * logged but its matching dir_index key is not logged, at log replay time we 5562 * don't use it to replay the respective name (see replay_one_name()). On the 5563 * other hand if only the dir_index key ends up being logged, the respective 5564 * name is added to the fs/subvol tree with both the dir_item and dir_index 5565 * keys created (see replay_one_name()). 5566 * The directory's inode item with a wrong i_size is not a problem as well, 5567 * since we don't use it at log replay time to set the i_size in the inode 5568 * item of the fs/subvol tree (see overwrite_item()). 5569 */ 5570 static int log_new_dir_dentries(struct btrfs_trans_handle *trans, 5571 struct btrfs_root *root, 5572 struct btrfs_inode *start_inode, 5573 struct btrfs_log_ctx *ctx) 5574 { 5575 struct btrfs_fs_info *fs_info = root->fs_info; 5576 struct btrfs_root *log = root->log_root; 5577 struct btrfs_path *path; 5578 LIST_HEAD(dir_list); 5579 struct btrfs_dir_list *dir_elem; 5580 int ret = 0; 5581 5582 path = btrfs_alloc_path(); 5583 if (!path) 5584 return -ENOMEM; 5585 5586 dir_elem = kmalloc(sizeof(*dir_elem), GFP_NOFS); 5587 if (!dir_elem) { 5588 btrfs_free_path(path); 5589 return -ENOMEM; 5590 } 5591 dir_elem->ino = btrfs_ino(start_inode); 5592 list_add_tail(&dir_elem->list, &dir_list); 5593 5594 while (!list_empty(&dir_list)) { 5595 struct extent_buffer *leaf; 5596 struct btrfs_key min_key; 5597 int nritems; 5598 int i; 5599 5600 dir_elem = list_first_entry(&dir_list, struct btrfs_dir_list, 5601 list); 5602 if (ret) 5603 goto next_dir_inode; 5604 5605 min_key.objectid = dir_elem->ino; 5606 min_key.type = BTRFS_DIR_ITEM_KEY; 5607 min_key.offset = 0; 5608 again: 5609 btrfs_release_path(path); 5610 ret = btrfs_search_forward(log, &min_key, path, trans->transid); 5611 if (ret < 0) { 5612 goto next_dir_inode; 5613 } else if (ret > 0) { 5614 ret = 0; 5615 goto next_dir_inode; 5616 } 5617 5618 process_leaf: 5619 leaf = path->nodes[0]; 5620 nritems = btrfs_header_nritems(leaf); 5621 for (i = path->slots[0]; i < nritems; i++) { 5622 struct btrfs_dir_item *di; 5623 struct btrfs_key di_key; 5624 struct inode *di_inode; 5625 struct btrfs_dir_list *new_dir_elem; 5626 int log_mode = LOG_INODE_EXISTS; 5627 int type; 5628 5629 btrfs_item_key_to_cpu(leaf, &min_key, i); 5630 if (min_key.objectid != dir_elem->ino || 5631 min_key.type != BTRFS_DIR_ITEM_KEY) 5632 goto next_dir_inode; 5633 5634 di = btrfs_item_ptr(leaf, i, struct btrfs_dir_item); 5635 type = btrfs_dir_type(leaf, di); 5636 if (btrfs_dir_transid(leaf, di) < trans->transid && 5637 type != BTRFS_FT_DIR) 5638 continue; 5639 btrfs_dir_item_key_to_cpu(leaf, di, &di_key); 5640 if (di_key.type == BTRFS_ROOT_ITEM_KEY) 5641 continue; 5642 5643 btrfs_release_path(path); 5644 di_inode = btrfs_iget(fs_info->sb, &di_key, root, NULL); 5645 if (IS_ERR(di_inode)) { 5646 ret = PTR_ERR(di_inode); 5647 goto next_dir_inode; 5648 } 5649 5650 if (btrfs_inode_in_log(BTRFS_I(di_inode), trans->transid)) { 5651 iput(di_inode); 5652 break; 5653 } 5654 5655 ctx->log_new_dentries = false; 5656 if (type == BTRFS_FT_DIR || type == BTRFS_FT_SYMLINK) 5657 log_mode = LOG_INODE_ALL; 5658 ret = btrfs_log_inode(trans, root, BTRFS_I(di_inode), 5659 log_mode, 0, LLONG_MAX, ctx); 5660 if (!ret && 5661 btrfs_must_commit_transaction(trans, BTRFS_I(di_inode))) 5662 ret = 1; 5663 iput(di_inode); 5664 if (ret) 5665 goto next_dir_inode; 5666 if (ctx->log_new_dentries) { 5667 new_dir_elem = kmalloc(sizeof(*new_dir_elem), 5668 GFP_NOFS); 5669 if (!new_dir_elem) { 5670 ret = -ENOMEM; 5671 goto next_dir_inode; 5672 } 5673 new_dir_elem->ino = di_key.objectid; 5674 list_add_tail(&new_dir_elem->list, &dir_list); 5675 } 5676 break; 5677 } 5678 if (i == nritems) { 5679 ret = btrfs_next_leaf(log, path); 5680 if (ret < 0) { 5681 goto next_dir_inode; 5682 } else if (ret > 0) { 5683 ret = 0; 5684 goto next_dir_inode; 5685 } 5686 goto process_leaf; 5687 } 5688 if (min_key.offset < (u64)-1) { 5689 min_key.offset++; 5690 goto again; 5691 } 5692 next_dir_inode: 5693 list_del(&dir_elem->list); 5694 kfree(dir_elem); 5695 } 5696 5697 btrfs_free_path(path); 5698 return ret; 5699 } 5700 5701 static int btrfs_log_all_parents(struct btrfs_trans_handle *trans, 5702 struct btrfs_inode *inode, 5703 struct btrfs_log_ctx *ctx) 5704 { 5705 struct btrfs_fs_info *fs_info = trans->fs_info; 5706 int ret; 5707 struct btrfs_path *path; 5708 struct btrfs_key key; 5709 struct btrfs_root *root = inode->root; 5710 const u64 ino = btrfs_ino(inode); 5711 5712 path = btrfs_alloc_path(); 5713 if (!path) 5714 return -ENOMEM; 5715 path->skip_locking = 1; 5716 path->search_commit_root = 1; 5717 5718 key.objectid = ino; 5719 key.type = BTRFS_INODE_REF_KEY; 5720 key.offset = 0; 5721 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5722 if (ret < 0) 5723 goto out; 5724 5725 while (true) { 5726 struct extent_buffer *leaf = path->nodes[0]; 5727 int slot = path->slots[0]; 5728 u32 cur_offset = 0; 5729 u32 item_size; 5730 unsigned long ptr; 5731 5732 if (slot >= btrfs_header_nritems(leaf)) { 5733 ret = btrfs_next_leaf(root, path); 5734 if (ret < 0) 5735 goto out; 5736 else if (ret > 0) 5737 break; 5738 continue; 5739 } 5740 5741 btrfs_item_key_to_cpu(leaf, &key, slot); 5742 /* BTRFS_INODE_EXTREF_KEY is BTRFS_INODE_REF_KEY + 1 */ 5743 if (key.objectid != ino || key.type > BTRFS_INODE_EXTREF_KEY) 5744 break; 5745 5746 item_size = btrfs_item_size_nr(leaf, slot); 5747 ptr = btrfs_item_ptr_offset(leaf, slot); 5748 while (cur_offset < item_size) { 5749 struct btrfs_key inode_key; 5750 struct inode *dir_inode; 5751 5752 inode_key.type = BTRFS_INODE_ITEM_KEY; 5753 inode_key.offset = 0; 5754 5755 if (key.type == BTRFS_INODE_EXTREF_KEY) { 5756 struct btrfs_inode_extref *extref; 5757 5758 extref = (struct btrfs_inode_extref *) 5759 (ptr + cur_offset); 5760 inode_key.objectid = btrfs_inode_extref_parent( 5761 leaf, extref); 5762 cur_offset += sizeof(*extref); 5763 cur_offset += btrfs_inode_extref_name_len(leaf, 5764 extref); 5765 } else { 5766 inode_key.objectid = key.offset; 5767 cur_offset = item_size; 5768 } 5769 5770 dir_inode = btrfs_iget(fs_info->sb, &inode_key, 5771 root, NULL); 5772 /* 5773 * If the parent inode was deleted, return an error to 5774 * fallback to a transaction commit. This is to prevent 5775 * getting an inode that was moved from one parent A to 5776 * a parent B, got its former parent A deleted and then 5777 * it got fsync'ed, from existing at both parents after 5778 * a log replay (and the old parent still existing). 5779 * Example: 5780 * 5781 * mkdir /mnt/A 5782 * mkdir /mnt/B 5783 * touch /mnt/B/bar 5784 * sync 5785 * mv /mnt/B/bar /mnt/A/bar 5786 * mv -T /mnt/A /mnt/B 5787 * fsync /mnt/B/bar 5788 * <power fail> 5789 * 5790 * If we ignore the old parent B which got deleted, 5791 * after a log replay we would have file bar linked 5792 * at both parents and the old parent B would still 5793 * exist. 5794 */ 5795 if (IS_ERR(dir_inode)) { 5796 ret = PTR_ERR(dir_inode); 5797 goto out; 5798 } 5799 5800 if (ctx) 5801 ctx->log_new_dentries = false; 5802 ret = btrfs_log_inode(trans, root, BTRFS_I(dir_inode), 5803 LOG_INODE_ALL, 0, LLONG_MAX, ctx); 5804 if (!ret && 5805 btrfs_must_commit_transaction(trans, BTRFS_I(dir_inode))) 5806 ret = 1; 5807 if (!ret && ctx && ctx->log_new_dentries) 5808 ret = log_new_dir_dentries(trans, root, 5809 BTRFS_I(dir_inode), ctx); 5810 iput(dir_inode); 5811 if (ret) 5812 goto out; 5813 } 5814 path->slots[0]++; 5815 } 5816 ret = 0; 5817 out: 5818 btrfs_free_path(path); 5819 return ret; 5820 } 5821 5822 /* 5823 * helper function around btrfs_log_inode to make sure newly created 5824 * parent directories also end up in the log. A minimal inode and backref 5825 * only logging is done of any parent directories that are older than 5826 * the last committed transaction 5827 */ 5828 static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, 5829 struct btrfs_inode *inode, 5830 struct dentry *parent, 5831 const loff_t start, 5832 const loff_t end, 5833 int inode_only, 5834 struct btrfs_log_ctx *ctx) 5835 { 5836 struct btrfs_root *root = inode->root; 5837 struct btrfs_fs_info *fs_info = root->fs_info; 5838 struct super_block *sb; 5839 struct dentry *old_parent = NULL; 5840 int ret = 0; 5841 u64 last_committed = fs_info->last_trans_committed; 5842 bool log_dentries = false; 5843 struct btrfs_inode *orig_inode = inode; 5844 5845 sb = inode->vfs_inode.i_sb; 5846 5847 if (btrfs_test_opt(fs_info, NOTREELOG)) { 5848 ret = 1; 5849 goto end_no_trans; 5850 } 5851 5852 /* 5853 * The prev transaction commit doesn't complete, we need do 5854 * full commit by ourselves. 5855 */ 5856 if (fs_info->last_trans_log_full_commit > 5857 fs_info->last_trans_committed) { 5858 ret = 1; 5859 goto end_no_trans; 5860 } 5861 5862 if (btrfs_root_refs(&root->root_item) == 0) { 5863 ret = 1; 5864 goto end_no_trans; 5865 } 5866 5867 ret = check_parent_dirs_for_sync(trans, inode, parent, sb, 5868 last_committed); 5869 if (ret) 5870 goto end_no_trans; 5871 5872 /* 5873 * Skip already logged inodes or inodes corresponding to tmpfiles 5874 * (since logging them is pointless, a link count of 0 means they 5875 * will never be accessible). 5876 */ 5877 if (btrfs_inode_in_log(inode, trans->transid) || 5878 inode->vfs_inode.i_nlink == 0) { 5879 ret = BTRFS_NO_LOG_SYNC; 5880 goto end_no_trans; 5881 } 5882 5883 ret = start_log_trans(trans, root, ctx); 5884 if (ret) 5885 goto end_no_trans; 5886 5887 ret = btrfs_log_inode(trans, root, inode, inode_only, start, end, ctx); 5888 if (ret) 5889 goto end_trans; 5890 5891 /* 5892 * for regular files, if its inode is already on disk, we don't 5893 * have to worry about the parents at all. This is because 5894 * we can use the last_unlink_trans field to record renames 5895 * and other fun in this file. 5896 */ 5897 if (S_ISREG(inode->vfs_inode.i_mode) && 5898 inode->generation <= last_committed && 5899 inode->last_unlink_trans <= last_committed) { 5900 ret = 0; 5901 goto end_trans; 5902 } 5903 5904 if (S_ISDIR(inode->vfs_inode.i_mode) && ctx && ctx->log_new_dentries) 5905 log_dentries = true; 5906 5907 /* 5908 * On unlink we must make sure all our current and old parent directory 5909 * inodes are fully logged. This is to prevent leaving dangling 5910 * directory index entries in directories that were our parents but are 5911 * not anymore. Not doing this results in old parent directory being 5912 * impossible to delete after log replay (rmdir will always fail with 5913 * error -ENOTEMPTY). 5914 * 5915 * Example 1: 5916 * 5917 * mkdir testdir 5918 * touch testdir/foo 5919 * ln testdir/foo testdir/bar 5920 * sync 5921 * unlink testdir/bar 5922 * xfs_io -c fsync testdir/foo 5923 * <power failure> 5924 * mount fs, triggers log replay 5925 * 5926 * If we don't log the parent directory (testdir), after log replay the 5927 * directory still has an entry pointing to the file inode using the bar 5928 * name, but a matching BTRFS_INODE_[REF|EXTREF]_KEY does not exist and 5929 * the file inode has a link count of 1. 5930 * 5931 * Example 2: 5932 * 5933 * mkdir testdir 5934 * touch foo 5935 * ln foo testdir/foo2 5936 * ln foo testdir/foo3 5937 * sync 5938 * unlink testdir/foo3 5939 * xfs_io -c fsync foo 5940 * <power failure> 5941 * mount fs, triggers log replay 5942 * 5943 * Similar as the first example, after log replay the parent directory 5944 * testdir still has an entry pointing to the inode file with name foo3 5945 * but the file inode does not have a matching BTRFS_INODE_REF_KEY item 5946 * and has a link count of 2. 5947 */ 5948 if (inode->last_unlink_trans > last_committed) { 5949 ret = btrfs_log_all_parents(trans, orig_inode, ctx); 5950 if (ret) 5951 goto end_trans; 5952 } 5953 5954 /* 5955 * If a new hard link was added to the inode in the current transaction 5956 * and its link count is now greater than 1, we need to fallback to a 5957 * transaction commit, otherwise we can end up not logging all its new 5958 * parents for all the hard links. Here just from the dentry used to 5959 * fsync, we can not visit the ancestor inodes for all the other hard 5960 * links to figure out if any is new, so we fallback to a transaction 5961 * commit (instead of adding a lot of complexity of scanning a btree, 5962 * since this scenario is not a common use case). 5963 */ 5964 if (inode->vfs_inode.i_nlink > 1 && 5965 inode->last_link_trans > last_committed) { 5966 ret = -EMLINK; 5967 goto end_trans; 5968 } 5969 5970 while (1) { 5971 if (!parent || d_really_is_negative(parent) || sb != parent->d_sb) 5972 break; 5973 5974 inode = BTRFS_I(d_inode(parent)); 5975 if (root != inode->root) 5976 break; 5977 5978 if (inode->generation > last_committed) { 5979 ret = btrfs_log_inode(trans, root, inode, 5980 LOG_INODE_EXISTS, 0, LLONG_MAX, ctx); 5981 if (ret) 5982 goto end_trans; 5983 } 5984 if (IS_ROOT(parent)) 5985 break; 5986 5987 parent = dget_parent(parent); 5988 dput(old_parent); 5989 old_parent = parent; 5990 } 5991 if (log_dentries) 5992 ret = log_new_dir_dentries(trans, root, orig_inode, ctx); 5993 else 5994 ret = 0; 5995 end_trans: 5996 dput(old_parent); 5997 if (ret < 0) { 5998 btrfs_set_log_full_commit(fs_info, trans); 5999 ret = 1; 6000 } 6001 6002 if (ret) 6003 btrfs_remove_log_ctx(root, ctx); 6004 btrfs_end_log_trans(root); 6005 end_no_trans: 6006 return ret; 6007 } 6008 6009 /* 6010 * it is not safe to log dentry if the chunk root has added new 6011 * chunks. This returns 0 if the dentry was logged, and 1 otherwise. 6012 * If this returns 1, you must commit the transaction to safely get your 6013 * data on disk. 6014 */ 6015 int btrfs_log_dentry_safe(struct btrfs_trans_handle *trans, 6016 struct dentry *dentry, 6017 const loff_t start, 6018 const loff_t end, 6019 struct btrfs_log_ctx *ctx) 6020 { 6021 struct dentry *parent = dget_parent(dentry); 6022 int ret; 6023 6024 ret = btrfs_log_inode_parent(trans, BTRFS_I(d_inode(dentry)), parent, 6025 start, end, LOG_INODE_ALL, ctx); 6026 dput(parent); 6027 6028 return ret; 6029 } 6030 6031 /* 6032 * should be called during mount to recover any replay any log trees 6033 * from the FS 6034 */ 6035 int btrfs_recover_log_trees(struct btrfs_root *log_root_tree) 6036 { 6037 int ret; 6038 struct btrfs_path *path; 6039 struct btrfs_trans_handle *trans; 6040 struct btrfs_key key; 6041 struct btrfs_key found_key; 6042 struct btrfs_key tmp_key; 6043 struct btrfs_root *log; 6044 struct btrfs_fs_info *fs_info = log_root_tree->fs_info; 6045 struct walk_control wc = { 6046 .process_func = process_one_buffer, 6047 .stage = 0, 6048 }; 6049 6050 path = btrfs_alloc_path(); 6051 if (!path) 6052 return -ENOMEM; 6053 6054 set_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 6055 6056 trans = btrfs_start_transaction(fs_info->tree_root, 0); 6057 if (IS_ERR(trans)) { 6058 ret = PTR_ERR(trans); 6059 goto error; 6060 } 6061 6062 wc.trans = trans; 6063 wc.pin = 1; 6064 6065 ret = walk_log_tree(trans, log_root_tree, &wc); 6066 if (ret) { 6067 btrfs_handle_fs_error(fs_info, ret, 6068 "Failed to pin buffers while recovering log root tree."); 6069 goto error; 6070 } 6071 6072 again: 6073 key.objectid = BTRFS_TREE_LOG_OBJECTID; 6074 key.offset = (u64)-1; 6075 key.type = BTRFS_ROOT_ITEM_KEY; 6076 6077 while (1) { 6078 ret = btrfs_search_slot(NULL, log_root_tree, &key, path, 0, 0); 6079 6080 if (ret < 0) { 6081 btrfs_handle_fs_error(fs_info, ret, 6082 "Couldn't find tree log root."); 6083 goto error; 6084 } 6085 if (ret > 0) { 6086 if (path->slots[0] == 0) 6087 break; 6088 path->slots[0]--; 6089 } 6090 btrfs_item_key_to_cpu(path->nodes[0], &found_key, 6091 path->slots[0]); 6092 btrfs_release_path(path); 6093 if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) 6094 break; 6095 6096 log = btrfs_read_fs_root(log_root_tree, &found_key); 6097 if (IS_ERR(log)) { 6098 ret = PTR_ERR(log); 6099 btrfs_handle_fs_error(fs_info, ret, 6100 "Couldn't read tree log root."); 6101 goto error; 6102 } 6103 6104 tmp_key.objectid = found_key.offset; 6105 tmp_key.type = BTRFS_ROOT_ITEM_KEY; 6106 tmp_key.offset = (u64)-1; 6107 6108 wc.replay_dest = btrfs_read_fs_root_no_name(fs_info, &tmp_key); 6109 if (IS_ERR(wc.replay_dest)) { 6110 ret = PTR_ERR(wc.replay_dest); 6111 free_extent_buffer(log->node); 6112 free_extent_buffer(log->commit_root); 6113 kfree(log); 6114 btrfs_handle_fs_error(fs_info, ret, 6115 "Couldn't read target root for tree log recovery."); 6116 goto error; 6117 } 6118 6119 wc.replay_dest->log_root = log; 6120 btrfs_record_root_in_trans(trans, wc.replay_dest); 6121 ret = walk_log_tree(trans, log, &wc); 6122 6123 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 6124 ret = fixup_inode_link_counts(trans, wc.replay_dest, 6125 path); 6126 } 6127 6128 if (!ret && wc.stage == LOG_WALK_REPLAY_ALL) { 6129 struct btrfs_root *root = wc.replay_dest; 6130 6131 btrfs_release_path(path); 6132 6133 /* 6134 * We have just replayed everything, and the highest 6135 * objectid of fs roots probably has changed in case 6136 * some inode_item's got replayed. 6137 * 6138 * root->objectid_mutex is not acquired as log replay 6139 * could only happen during mount. 6140 */ 6141 ret = btrfs_find_highest_objectid(root, 6142 &root->highest_objectid); 6143 } 6144 6145 key.offset = found_key.offset - 1; 6146 wc.replay_dest->log_root = NULL; 6147 free_extent_buffer(log->node); 6148 free_extent_buffer(log->commit_root); 6149 kfree(log); 6150 6151 if (ret) 6152 goto error; 6153 6154 if (found_key.offset == 0) 6155 break; 6156 } 6157 btrfs_release_path(path); 6158 6159 /* step one is to pin it all, step two is to replay just inodes */ 6160 if (wc.pin) { 6161 wc.pin = 0; 6162 wc.process_func = replay_one_buffer; 6163 wc.stage = LOG_WALK_REPLAY_INODES; 6164 goto again; 6165 } 6166 /* step three is to replay everything */ 6167 if (wc.stage < LOG_WALK_REPLAY_ALL) { 6168 wc.stage++; 6169 goto again; 6170 } 6171 6172 btrfs_free_path(path); 6173 6174 /* step 4: commit the transaction, which also unpins the blocks */ 6175 ret = btrfs_commit_transaction(trans); 6176 if (ret) 6177 return ret; 6178 6179 free_extent_buffer(log_root_tree->node); 6180 log_root_tree->log_root = NULL; 6181 clear_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags); 6182 kfree(log_root_tree); 6183 6184 return 0; 6185 error: 6186 if (wc.trans) 6187 btrfs_end_transaction(wc.trans); 6188 btrfs_free_path(path); 6189 return ret; 6190 } 6191 6192 /* 6193 * there are some corner cases where we want to force a full 6194 * commit instead of allowing a directory to be logged. 6195 * 6196 * They revolve around files there were unlinked from the directory, and 6197 * this function updates the parent directory so that a full commit is 6198 * properly done if it is fsync'd later after the unlinks are done. 6199 * 6200 * Must be called before the unlink operations (updates to the subvolume tree, 6201 * inodes, etc) are done. 6202 */ 6203 void btrfs_record_unlink_dir(struct btrfs_trans_handle *trans, 6204 struct btrfs_inode *dir, struct btrfs_inode *inode, 6205 int for_rename) 6206 { 6207 /* 6208 * when we're logging a file, if it hasn't been renamed 6209 * or unlinked, and its inode is fully committed on disk, 6210 * we don't have to worry about walking up the directory chain 6211 * to log its parents. 6212 * 6213 * So, we use the last_unlink_trans field to put this transid 6214 * into the file. When the file is logged we check it and 6215 * don't log the parents if the file is fully on disk. 6216 */ 6217 mutex_lock(&inode->log_mutex); 6218 inode->last_unlink_trans = trans->transid; 6219 mutex_unlock(&inode->log_mutex); 6220 6221 /* 6222 * if this directory was already logged any new 6223 * names for this file/dir will get recorded 6224 */ 6225 smp_mb(); 6226 if (dir->logged_trans == trans->transid) 6227 return; 6228 6229 /* 6230 * if the inode we're about to unlink was logged, 6231 * the log will be properly updated for any new names 6232 */ 6233 if (inode->logged_trans == trans->transid) 6234 return; 6235 6236 /* 6237 * when renaming files across directories, if the directory 6238 * there we're unlinking from gets fsync'd later on, there's 6239 * no way to find the destination directory later and fsync it 6240 * properly. So, we have to be conservative and force commits 6241 * so the new name gets discovered. 6242 */ 6243 if (for_rename) 6244 goto record; 6245 6246 /* we can safely do the unlink without any special recording */ 6247 return; 6248 6249 record: 6250 mutex_lock(&dir->log_mutex); 6251 dir->last_unlink_trans = trans->transid; 6252 mutex_unlock(&dir->log_mutex); 6253 } 6254 6255 /* 6256 * Make sure that if someone attempts to fsync the parent directory of a deleted 6257 * snapshot, it ends up triggering a transaction commit. This is to guarantee 6258 * that after replaying the log tree of the parent directory's root we will not 6259 * see the snapshot anymore and at log replay time we will not see any log tree 6260 * corresponding to the deleted snapshot's root, which could lead to replaying 6261 * it after replaying the log tree of the parent directory (which would replay 6262 * the snapshot delete operation). 6263 * 6264 * Must be called before the actual snapshot destroy operation (updates to the 6265 * parent root and tree of tree roots trees, etc) are done. 6266 */ 6267 void btrfs_record_snapshot_destroy(struct btrfs_trans_handle *trans, 6268 struct btrfs_inode *dir) 6269 { 6270 mutex_lock(&dir->log_mutex); 6271 dir->last_unlink_trans = trans->transid; 6272 mutex_unlock(&dir->log_mutex); 6273 } 6274 6275 /* 6276 * Call this after adding a new name for a file and it will properly 6277 * update the log to reflect the new name. 6278 * 6279 * @ctx can not be NULL when @sync_log is false, and should be NULL when it's 6280 * true (because it's not used). 6281 * 6282 * Return value depends on whether @sync_log is true or false. 6283 * When true: returns BTRFS_NEED_TRANS_COMMIT if the transaction needs to be 6284 * committed by the caller, and BTRFS_DONT_NEED_TRANS_COMMIT 6285 * otherwise. 6286 * When false: returns BTRFS_DONT_NEED_LOG_SYNC if the caller does not need to 6287 * to sync the log, BTRFS_NEED_LOG_SYNC if it needs to sync the log, 6288 * or BTRFS_NEED_TRANS_COMMIT if the transaction needs to be 6289 * committed (without attempting to sync the log). 6290 */ 6291 int btrfs_log_new_name(struct btrfs_trans_handle *trans, 6292 struct btrfs_inode *inode, struct btrfs_inode *old_dir, 6293 struct dentry *parent, 6294 bool sync_log, struct btrfs_log_ctx *ctx) 6295 { 6296 struct btrfs_fs_info *fs_info = trans->fs_info; 6297 int ret; 6298 6299 /* 6300 * this will force the logging code to walk the dentry chain 6301 * up for the file 6302 */ 6303 if (!S_ISDIR(inode->vfs_inode.i_mode)) 6304 inode->last_unlink_trans = trans->transid; 6305 6306 /* 6307 * if this inode hasn't been logged and directory we're renaming it 6308 * from hasn't been logged, we don't need to log it 6309 */ 6310 if (inode->logged_trans <= fs_info->last_trans_committed && 6311 (!old_dir || old_dir->logged_trans <= fs_info->last_trans_committed)) 6312 return sync_log ? BTRFS_DONT_NEED_TRANS_COMMIT : 6313 BTRFS_DONT_NEED_LOG_SYNC; 6314 6315 if (sync_log) { 6316 struct btrfs_log_ctx ctx2; 6317 6318 btrfs_init_log_ctx(&ctx2, &inode->vfs_inode); 6319 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, 6320 LOG_INODE_EXISTS, &ctx2); 6321 if (ret == BTRFS_NO_LOG_SYNC) 6322 return BTRFS_DONT_NEED_TRANS_COMMIT; 6323 else if (ret) 6324 return BTRFS_NEED_TRANS_COMMIT; 6325 6326 ret = btrfs_sync_log(trans, inode->root, &ctx2); 6327 if (ret) 6328 return BTRFS_NEED_TRANS_COMMIT; 6329 return BTRFS_DONT_NEED_TRANS_COMMIT; 6330 } 6331 6332 ASSERT(ctx); 6333 ret = btrfs_log_inode_parent(trans, inode, parent, 0, LLONG_MAX, 6334 LOG_INODE_EXISTS, ctx); 6335 if (ret == BTRFS_NO_LOG_SYNC) 6336 return BTRFS_DONT_NEED_LOG_SYNC; 6337 else if (ret) 6338 return BTRFS_NEED_TRANS_COMMIT; 6339 6340 return BTRFS_NEED_LOG_SYNC; 6341 } 6342 6343